diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index f559f704d5..eea64ff6d8 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -4,7 +4,7 @@ steps: provider: "gcp" env: TEST_SUITE: "{{ matrix.suite }}" - STACK_VERSION: 8.16.0-SNAPSHOT + STACK_VERSION: 9.0.0-SNAPSHOT WORKSPACE: /tmp/go-elasticsearch matrix: setup: diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8bc2695370..29a2be6403 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -7,15 +7,13 @@ on: branches: - github-actions - main - - "8.12" - - "8.13" - - "7.17" + - "8.*" + - "9.*" pull_request: branches: - - "main" - - "8.12" - - "8.13" - - "7.17" + - main + - "8.*" + - "9.*" env: GITHUB_ACTIONS: true diff --git a/.github/workflows/test-api.yml b/.github/workflows/test-api.yml index 92aa7bbef6..c82317a36a 100644 --- a/.github/workflows/test-api.yml +++ b/.github/workflows/test-api.yml @@ -11,7 +11,7 @@ jobs: test-free: name: Free env: - ELASTICSEARCH_VERSION: elasticsearch:8.16.0-SNAPSHOT + ELASTICSEARCH_VERSION: elasticsearch:9.0.0-SNAPSHOT ELASTICSEARCH_URL: http://localhost:9200 runs-on: ubuntu-latest steps: @@ -43,7 +43,7 @@ jobs: test-platinum: name: Platinum env: - ELASTICSEARCH_VERSION: elasticsearch:8.16.0-SNAPSHOT + ELASTICSEARCH_VERSION: elasticsearch:9.0.0-SNAPSHOT ELASTICSEARCH_URL: https://elastic:elastic@localhost:9200 runs-on: ubuntu-latest steps: diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index 81e7be2925..36ecaff9ed 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -7,15 +7,13 @@ on: branches: - github-actions - main - - "8.12" - - "8.13" - - "7.17" + - "8.*" + - "9.*" pull_request: branches: - main - - "8.12" - - "8.13" - - "7.17" + - "8.*" + - "9.*" env: GITHUB_ACTIONS: true diff --git a/.github/workflows/test-unit.yml b/.github/workflows/test-unit.yml index 82dd95f121..472a9ba138 100644 --- a/.github/workflows/test-unit.yml +++ b/.github/workflows/test-unit.yml @@ -7,15 +7,13 @@ on: branches: - github-actions - main - - "8.12" - - "8.13" - - "7.17" + - "8.*" + - "9.*" pull_request: branches: - main - - "8.12" - - "8.13" - - "7.17" + - "8.*" + - "9.*" env: GITHUB_ACTIONS: true diff --git a/Makefile b/Makefile index 283682fbcb..e205867abe 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ SHELL := /bin/bash -ELASTICSEARCH_DEFAULT_BUILD_VERSION = "8.16.0-SNAPSHOT" +ELASTICSEARCH_DEFAULT_BUILD_VERSION = "9.0.0-SNAPSHOT" ##@ Test test-unit: ## Run unit tests diff --git a/_benchmarks/benchmarks/go.mod b/_benchmarks/benchmarks/go.mod index d87265c368..352f2487d9 100644 --- a/_benchmarks/benchmarks/go.mod +++ b/_benchmarks/benchmarks/go.mod @@ -7,7 +7,7 @@ toolchain go1.22.0 replace github.com/elastic/go-elasticsearch/v8 => ../../ require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 + github.com/elastic/elastic-transport-go/v8 v8.6.1 github.com/elastic/go-elasticsearch/v8 v8.0.0-20200408073057-6f36a473b19f github.com/fatih/color v1.7.0 github.com/montanaflynn/stats v0.6.3 diff --git a/_examples/bulk/kafka/go.mod b/_examples/bulk/kafka/go.mod index ecc89764a2..d4e9494411 100644 --- a/_examples/bulk/kafka/go.mod +++ b/_examples/bulk/kafka/go.mod @@ -1,6 +1,7 @@ module github.com/elastic/go-elasticsearch/v8/_examples/bulk/kafka go 1.22 + toolchain go1.22.0 replace github.com/elastic/go-elasticsearch/v8 => ../../.. @@ -13,7 +14,7 @@ require ( require ( github.com/armon/go-radix v1.0.0 // indirect - github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.6.1 // indirect github.com/elastic/go-licenser v0.3.1 // indirect github.com/elastic/go-sysinfo v1.1.1 // indirect github.com/elastic/go-windows v1.0.0 // indirect diff --git a/_examples/bulk/kafka/go.sum b/_examples/bulk/kafka/go.sum index f1be7e8b0d..0b5f141175 100644 --- a/_examples/bulk/kafka/go.sum +++ b/_examples/bulk/kafka/go.sum @@ -7,6 +7,7 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/elastic-transport-go/v8 v8.6.1/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/elastic/go-licenser v0.3.1 h1:RmRukU/JUmts+rpexAw0Fvt2ly7VVu6mw8z4HrEzObU= github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= github.com/elastic/go-sysinfo v1.1.1 h1:ZVlaLDyhVkDfjwPGU55CQRCRolNpc7P0BbyhhQZQmMI= diff --git a/_examples/cloudfunction/go.mod b/_examples/cloudfunction/go.mod index afe8b83b38..279db98966 100644 --- a/_examples/cloudfunction/go.mod +++ b/_examples/cloudfunction/go.mod @@ -9,7 +9,7 @@ replace github.com/elastic/go-elasticsearch/v8 => ../.. require github.com/elastic/go-elasticsearch/v8 v8.0.0-20210817150010-57d659deaca7 require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.6.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect go.opentelemetry.io/otel v1.28.0 // indirect diff --git a/_examples/encoding/go.mod b/_examples/encoding/go.mod index 9d49969481..e819475cda 100644 --- a/_examples/encoding/go.mod +++ b/_examples/encoding/go.mod @@ -14,7 +14,7 @@ require ( ) require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.6.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/josharian/intern v1.0.0 // indirect diff --git a/_examples/extension/go.mod b/_examples/extension/go.mod index a709f366f8..120abf10af 100644 --- a/_examples/extension/go.mod +++ b/_examples/extension/go.mod @@ -7,7 +7,7 @@ toolchain go1.22.0 replace github.com/elastic/go-elasticsearch/v8 => ../.. require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 + github.com/elastic/elastic-transport-go/v8 v8.6.1 github.com/elastic/go-elasticsearch/v8 v8.13.1 ) diff --git a/_examples/fasthttp/go.mod b/_examples/fasthttp/go.mod index 4e37a24618..d4b0424e9c 100644 --- a/_examples/fasthttp/go.mod +++ b/_examples/fasthttp/go.mod @@ -13,7 +13,7 @@ require ( require ( github.com/andybalholm/brotli v1.0.4 // indirect - github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.6.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/klauspost/compress v1.15.0 // indirect diff --git a/_examples/instrumentation/go.mod b/_examples/instrumentation/go.mod index b3b40843ca..cc4695fced 100644 --- a/_examples/instrumentation/go.mod +++ b/_examples/instrumentation/go.mod @@ -7,7 +7,7 @@ toolchain go1.22.0 replace github.com/elastic/go-elasticsearch/v8 => ../.. require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 + github.com/elastic/elastic-transport-go/v8 v8.6.1 github.com/elastic/go-elasticsearch/v8 v8.0.0-20191002063538-b491ce54d752 github.com/fatih/color v1.7.0 go.elastic.co/apm v1.11.0 diff --git a/_examples/logging/go.mod b/_examples/logging/go.mod index 34600ff313..699aea2671 100644 --- a/_examples/logging/go.mod +++ b/_examples/logging/go.mod @@ -7,7 +7,7 @@ toolchain go1.22.0 replace github.com/elastic/go-elasticsearch/v8 => ../.. require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 + github.com/elastic/elastic-transport-go/v8 v8.6.1 github.com/elastic/go-elasticsearch/v8 v8.0.0-00010101000000-000000000000 github.com/rs/zerolog v1.32.0 ) diff --git a/_examples/security/go.mod b/_examples/security/go.mod index 45ea67de2f..5a15d4de37 100644 --- a/_examples/security/go.mod +++ b/_examples/security/go.mod @@ -9,7 +9,7 @@ replace github.com/elastic/go-elasticsearch/v8 => ../.. require github.com/elastic/go-elasticsearch/v8 v8.0.0-00010101000000-000000000000 require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.6.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect go.opentelemetry.io/otel v1.28.0 // indirect diff --git a/_examples/xkcdsearch/go.mod b/_examples/xkcdsearch/go.mod index 62c3141fd8..973a39fa62 100644 --- a/_examples/xkcdsearch/go.mod +++ b/_examples/xkcdsearch/go.mod @@ -14,7 +14,7 @@ require ( ) require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.6.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect diff --git a/esapi/api._.go b/esapi/api._.go index 349030a82a..ab89dd4dbd 100755 --- a/esapi/api._.go +++ b/esapi/api._.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0 (30399bb): DO NOT EDIT +// Code generated from specification version 9.0.0 (0c7ccb6): DO NOT EDIT package esapi @@ -103,8 +103,10 @@ type API struct { EqlGet EqlGet EqlGetStatus EqlGetStatus EqlSearch EqlSearch + EsqlAsyncQueryDelete EsqlAsyncQueryDelete EsqlAsyncQueryGet EsqlAsyncQueryGet EsqlAsyncQuery EsqlAsyncQuery + EsqlAsyncQueryStop EsqlAsyncQueryStop EsqlQuery EsqlQuery Exists Exists ExistsSource ExistsSource @@ -126,11 +128,18 @@ type API struct { GraphExplore GraphExplore HealthReport HealthReport Index Index + InferenceChatCompletionUnified InferenceChatCompletionUnified + InferenceCompletion InferenceCompletion InferenceDelete InferenceDelete InferenceGet InferenceGet InferenceInference InferenceInference InferencePut InferencePut + InferenceRerank InferenceRerank + InferenceSparseEmbedding InferenceSparseEmbedding + InferenceStreamCompletion InferenceStreamCompletion InferenceStreamInference InferenceStreamInference + InferenceTextEmbedding InferenceTextEmbedding + InferenceUpdate InferenceUpdate Info Info KnnSearch KnnSearch LogstashDeletePipeline LogstashDeletePipeline @@ -274,65 +283,70 @@ type Cluster struct { // Indices contains the Indices APIs type Indices struct { - AddBlock IndicesAddBlock - Analyze IndicesAnalyze - ClearCache IndicesClearCache - Clone IndicesClone - Close IndicesClose - CreateDataStream IndicesCreateDataStream - Create IndicesCreate - DataStreamsStats IndicesDataStreamsStats - DeleteAlias IndicesDeleteAlias - DeleteDataLifecycle IndicesDeleteDataLifecycle - DeleteDataStream IndicesDeleteDataStream - DeleteIndexTemplate IndicesDeleteIndexTemplate - Delete IndicesDelete - DeleteTemplate IndicesDeleteTemplate - DiskUsage IndicesDiskUsage - Downsample IndicesDownsample - ExistsAlias IndicesExistsAlias - ExistsIndexTemplate IndicesExistsIndexTemplate - Exists IndicesExists - ExistsTemplate IndicesExistsTemplate - ExplainDataLifecycle IndicesExplainDataLifecycle - FieldUsageStats IndicesFieldUsageStats - Flush IndicesFlush - Forcemerge IndicesForcemerge - GetAlias IndicesGetAlias - GetDataLifecycle IndicesGetDataLifecycle - GetDataStream IndicesGetDataStream - GetFieldMapping IndicesGetFieldMapping - GetIndexTemplate IndicesGetIndexTemplate - GetMapping IndicesGetMapping - Get IndicesGet - GetSettings IndicesGetSettings - GetTemplate IndicesGetTemplate - MigrateToDataStream IndicesMigrateToDataStream - ModifyDataStream IndicesModifyDataStream - Open IndicesOpen - PromoteDataStream IndicesPromoteDataStream - PutAlias IndicesPutAlias - PutDataLifecycle IndicesPutDataLifecycle - PutIndexTemplate IndicesPutIndexTemplate - PutMapping IndicesPutMapping - PutSettings IndicesPutSettings - PutTemplate IndicesPutTemplate - Recovery IndicesRecovery - Refresh IndicesRefresh - ReloadSearchAnalyzers IndicesReloadSearchAnalyzers - ResolveCluster IndicesResolveCluster - ResolveIndex IndicesResolveIndex - Rollover IndicesRollover - Segments IndicesSegments - ShardStores IndicesShardStores - Shrink IndicesShrink - SimulateIndexTemplate IndicesSimulateIndexTemplate - SimulateTemplate IndicesSimulateTemplate - Split IndicesSplit - Stats IndicesStats - Unfreeze IndicesUnfreeze - UpdateAliases IndicesUpdateAliases - ValidateQuery IndicesValidateQuery + AddBlock IndicesAddBlock + Analyze IndicesAnalyze + CancelMigrateReindex IndicesCancelMigrateReindex + ClearCache IndicesClearCache + Clone IndicesClone + Close IndicesClose + CreateDataStream IndicesCreateDataStream + CreateFrom IndicesCreateFrom + Create IndicesCreate + DataStreamsStats IndicesDataStreamsStats + DeleteAlias IndicesDeleteAlias + DeleteDataLifecycle IndicesDeleteDataLifecycle + DeleteDataStream IndicesDeleteDataStream + DeleteIndexTemplate IndicesDeleteIndexTemplate + Delete IndicesDelete + DeleteTemplate IndicesDeleteTemplate + DiskUsage IndicesDiskUsage + Downsample IndicesDownsample + ExistsAlias IndicesExistsAlias + ExistsIndexTemplate IndicesExistsIndexTemplate + Exists IndicesExists + ExistsTemplate IndicesExistsTemplate + ExplainDataLifecycle IndicesExplainDataLifecycle + FieldUsageStats IndicesFieldUsageStats + Flush IndicesFlush + Forcemerge IndicesForcemerge + GetAlias IndicesGetAlias + GetDataLifecycle IndicesGetDataLifecycle + GetDataLifecycleStats IndicesGetDataLifecycleStats + GetDataStream IndicesGetDataStream + GetFieldMapping IndicesGetFieldMapping + GetIndexTemplate IndicesGetIndexTemplate + GetMapping IndicesGetMapping + GetMigrateReindexStatus IndicesGetMigrateReindexStatus + Get IndicesGet + GetSettings IndicesGetSettings + GetTemplate IndicesGetTemplate + MigrateReindex IndicesMigrateReindex + MigrateToDataStream IndicesMigrateToDataStream + ModifyDataStream IndicesModifyDataStream + Open IndicesOpen + PromoteDataStream IndicesPromoteDataStream + PutAlias IndicesPutAlias + PutDataLifecycle IndicesPutDataLifecycle + PutIndexTemplate IndicesPutIndexTemplate + PutMapping IndicesPutMapping + PutSettings IndicesPutSettings + PutTemplate IndicesPutTemplate + Recovery IndicesRecovery + Refresh IndicesRefresh + ReloadSearchAnalyzers IndicesReloadSearchAnalyzers + ResolveCluster IndicesResolveCluster + ResolveIndex IndicesResolveIndex + Rollover IndicesRollover + Segments IndicesSegments + ShardStores IndicesShardStores + Shrink IndicesShrink + SimulateIndexTemplate IndicesSimulateIndexTemplate + SimulateTemplate IndicesSimulateTemplate + Split IndicesSplit + Stats IndicesStats + Unfreeze IndicesUnfreeze + UpdateAliases IndicesUpdateAliases + ValidateQuery IndicesValidateQuery } // Ingest contains the Ingest APIs @@ -558,6 +572,7 @@ type Security struct { CreateAPIKey SecurityCreateAPIKey CreateCrossClusterAPIKey SecurityCreateCrossClusterAPIKey CreateServiceToken SecurityCreateServiceToken + DelegatePki SecurityDelegatePki DeletePrivileges SecurityDeletePrivileges DeleteRoleMapping SecurityDeleteRoleMapping DeleteRole SecurityDeleteRole @@ -710,8 +725,10 @@ func New(t Transport) *API { EqlGet: newEqlGetFunc(t), EqlGetStatus: newEqlGetStatusFunc(t), EqlSearch: newEqlSearchFunc(t), + EsqlAsyncQueryDelete: newEsqlAsyncQueryDeleteFunc(t), EsqlAsyncQueryGet: newEsqlAsyncQueryGetFunc(t), EsqlAsyncQuery: newEsqlAsyncQueryFunc(t), + EsqlAsyncQueryStop: newEsqlAsyncQueryStopFunc(t), EsqlQuery: newEsqlQueryFunc(t), Exists: newExistsFunc(t), ExistsSource: newExistsSourceFunc(t), @@ -733,11 +750,18 @@ func New(t Transport) *API { GraphExplore: newGraphExploreFunc(t), HealthReport: newHealthReportFunc(t), Index: newIndexFunc(t), + InferenceChatCompletionUnified: newInferenceChatCompletionUnifiedFunc(t), + InferenceCompletion: newInferenceCompletionFunc(t), InferenceDelete: newInferenceDeleteFunc(t), InferenceGet: newInferenceGetFunc(t), InferenceInference: newInferenceInferenceFunc(t), InferencePut: newInferencePutFunc(t), + InferenceRerank: newInferenceRerankFunc(t), + InferenceSparseEmbedding: newInferenceSparseEmbeddingFunc(t), + InferenceStreamCompletion: newInferenceStreamCompletionFunc(t), InferenceStreamInference: newInferenceStreamInferenceFunc(t), + InferenceTextEmbedding: newInferenceTextEmbeddingFunc(t), + InferenceUpdate: newInferenceUpdateFunc(t), Info: newInfoFunc(t), KnnSearch: newKnnSearchFunc(t), LogstashDeletePipeline: newLogstashDeletePipelineFunc(t), @@ -874,65 +898,70 @@ func New(t Transport) *API { Stats: newClusterStatsFunc(t), }, Indices: &Indices{ - AddBlock: newIndicesAddBlockFunc(t), - Analyze: newIndicesAnalyzeFunc(t), - ClearCache: newIndicesClearCacheFunc(t), - Clone: newIndicesCloneFunc(t), - Close: newIndicesCloseFunc(t), - CreateDataStream: newIndicesCreateDataStreamFunc(t), - Create: newIndicesCreateFunc(t), - DataStreamsStats: newIndicesDataStreamsStatsFunc(t), - DeleteAlias: newIndicesDeleteAliasFunc(t), - DeleteDataLifecycle: newIndicesDeleteDataLifecycleFunc(t), - DeleteDataStream: newIndicesDeleteDataStreamFunc(t), - DeleteIndexTemplate: newIndicesDeleteIndexTemplateFunc(t), - Delete: newIndicesDeleteFunc(t), - DeleteTemplate: newIndicesDeleteTemplateFunc(t), - DiskUsage: newIndicesDiskUsageFunc(t), - Downsample: newIndicesDownsampleFunc(t), - ExistsAlias: newIndicesExistsAliasFunc(t), - ExistsIndexTemplate: newIndicesExistsIndexTemplateFunc(t), - Exists: newIndicesExistsFunc(t), - ExistsTemplate: newIndicesExistsTemplateFunc(t), - ExplainDataLifecycle: newIndicesExplainDataLifecycleFunc(t), - FieldUsageStats: newIndicesFieldUsageStatsFunc(t), - Flush: newIndicesFlushFunc(t), - Forcemerge: newIndicesForcemergeFunc(t), - GetAlias: newIndicesGetAliasFunc(t), - GetDataLifecycle: newIndicesGetDataLifecycleFunc(t), - GetDataStream: newIndicesGetDataStreamFunc(t), - GetFieldMapping: newIndicesGetFieldMappingFunc(t), - GetIndexTemplate: newIndicesGetIndexTemplateFunc(t), - GetMapping: newIndicesGetMappingFunc(t), - Get: newIndicesGetFunc(t), - GetSettings: newIndicesGetSettingsFunc(t), - GetTemplate: newIndicesGetTemplateFunc(t), - MigrateToDataStream: newIndicesMigrateToDataStreamFunc(t), - ModifyDataStream: newIndicesModifyDataStreamFunc(t), - Open: newIndicesOpenFunc(t), - PromoteDataStream: newIndicesPromoteDataStreamFunc(t), - PutAlias: newIndicesPutAliasFunc(t), - PutDataLifecycle: newIndicesPutDataLifecycleFunc(t), - PutIndexTemplate: newIndicesPutIndexTemplateFunc(t), - PutMapping: newIndicesPutMappingFunc(t), - PutSettings: newIndicesPutSettingsFunc(t), - PutTemplate: newIndicesPutTemplateFunc(t), - Recovery: newIndicesRecoveryFunc(t), - Refresh: newIndicesRefreshFunc(t), - ReloadSearchAnalyzers: newIndicesReloadSearchAnalyzersFunc(t), - ResolveCluster: newIndicesResolveClusterFunc(t), - ResolveIndex: newIndicesResolveIndexFunc(t), - Rollover: newIndicesRolloverFunc(t), - Segments: newIndicesSegmentsFunc(t), - ShardStores: newIndicesShardStoresFunc(t), - Shrink: newIndicesShrinkFunc(t), - SimulateIndexTemplate: newIndicesSimulateIndexTemplateFunc(t), - SimulateTemplate: newIndicesSimulateTemplateFunc(t), - Split: newIndicesSplitFunc(t), - Stats: newIndicesStatsFunc(t), - Unfreeze: newIndicesUnfreezeFunc(t), - UpdateAliases: newIndicesUpdateAliasesFunc(t), - ValidateQuery: newIndicesValidateQueryFunc(t), + AddBlock: newIndicesAddBlockFunc(t), + Analyze: newIndicesAnalyzeFunc(t), + CancelMigrateReindex: newIndicesCancelMigrateReindexFunc(t), + ClearCache: newIndicesClearCacheFunc(t), + Clone: newIndicesCloneFunc(t), + Close: newIndicesCloseFunc(t), + CreateDataStream: newIndicesCreateDataStreamFunc(t), + CreateFrom: newIndicesCreateFromFunc(t), + Create: newIndicesCreateFunc(t), + DataStreamsStats: newIndicesDataStreamsStatsFunc(t), + DeleteAlias: newIndicesDeleteAliasFunc(t), + DeleteDataLifecycle: newIndicesDeleteDataLifecycleFunc(t), + DeleteDataStream: newIndicesDeleteDataStreamFunc(t), + DeleteIndexTemplate: newIndicesDeleteIndexTemplateFunc(t), + Delete: newIndicesDeleteFunc(t), + DeleteTemplate: newIndicesDeleteTemplateFunc(t), + DiskUsage: newIndicesDiskUsageFunc(t), + Downsample: newIndicesDownsampleFunc(t), + ExistsAlias: newIndicesExistsAliasFunc(t), + ExistsIndexTemplate: newIndicesExistsIndexTemplateFunc(t), + Exists: newIndicesExistsFunc(t), + ExistsTemplate: newIndicesExistsTemplateFunc(t), + ExplainDataLifecycle: newIndicesExplainDataLifecycleFunc(t), + FieldUsageStats: newIndicesFieldUsageStatsFunc(t), + Flush: newIndicesFlushFunc(t), + Forcemerge: newIndicesForcemergeFunc(t), + GetAlias: newIndicesGetAliasFunc(t), + GetDataLifecycle: newIndicesGetDataLifecycleFunc(t), + GetDataLifecycleStats: newIndicesGetDataLifecycleStatsFunc(t), + GetDataStream: newIndicesGetDataStreamFunc(t), + GetFieldMapping: newIndicesGetFieldMappingFunc(t), + GetIndexTemplate: newIndicesGetIndexTemplateFunc(t), + GetMapping: newIndicesGetMappingFunc(t), + GetMigrateReindexStatus: newIndicesGetMigrateReindexStatusFunc(t), + Get: newIndicesGetFunc(t), + GetSettings: newIndicesGetSettingsFunc(t), + GetTemplate: newIndicesGetTemplateFunc(t), + MigrateReindex: newIndicesMigrateReindexFunc(t), + MigrateToDataStream: newIndicesMigrateToDataStreamFunc(t), + ModifyDataStream: newIndicesModifyDataStreamFunc(t), + Open: newIndicesOpenFunc(t), + PromoteDataStream: newIndicesPromoteDataStreamFunc(t), + PutAlias: newIndicesPutAliasFunc(t), + PutDataLifecycle: newIndicesPutDataLifecycleFunc(t), + PutIndexTemplate: newIndicesPutIndexTemplateFunc(t), + PutMapping: newIndicesPutMappingFunc(t), + PutSettings: newIndicesPutSettingsFunc(t), + PutTemplate: newIndicesPutTemplateFunc(t), + Recovery: newIndicesRecoveryFunc(t), + Refresh: newIndicesRefreshFunc(t), + ReloadSearchAnalyzers: newIndicesReloadSearchAnalyzersFunc(t), + ResolveCluster: newIndicesResolveClusterFunc(t), + ResolveIndex: newIndicesResolveIndexFunc(t), + Rollover: newIndicesRolloverFunc(t), + Segments: newIndicesSegmentsFunc(t), + ShardStores: newIndicesShardStoresFunc(t), + Shrink: newIndicesShrinkFunc(t), + SimulateIndexTemplate: newIndicesSimulateIndexTemplateFunc(t), + SimulateTemplate: newIndicesSimulateTemplateFunc(t), + Split: newIndicesSplitFunc(t), + Stats: newIndicesStatsFunc(t), + Unfreeze: newIndicesUnfreezeFunc(t), + UpdateAliases: newIndicesUpdateAliasesFunc(t), + ValidateQuery: newIndicesValidateQueryFunc(t), }, Ingest: &Ingest{ DeleteGeoipDatabase: newIngestDeleteGeoipDatabaseFunc(t), @@ -1129,6 +1158,7 @@ func New(t Transport) *API { CreateAPIKey: newSecurityCreateAPIKeyFunc(t), CreateCrossClusterAPIKey: newSecurityCreateCrossClusterAPIKeyFunc(t), CreateServiceToken: newSecurityCreateServiceTokenFunc(t), + DelegatePki: newSecurityDelegatePkiFunc(t), DeletePrivileges: newSecurityDeletePrivilegesFunc(t), DeleteRoleMapping: newSecurityDeleteRoleMappingFunc(t), DeleteRole: newSecurityDeleteRoleFunc(t), diff --git a/esapi/api.bulk.go b/esapi/api.bulk.go index 8389c75ce4..34b40ab61a 100644 --- a/esapi/api.bulk.go +++ b/esapi/api.bulk.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -56,6 +56,7 @@ type BulkRequest struct { Body io.Reader + IncludeSourceOnError *bool ListExecutedPipelines *bool Pipeline string Refresh string @@ -66,7 +67,6 @@ type BulkRequest struct { SourceExcludes []string SourceIncludes []string Timeout time.Duration - DocumentType string WaitForActiveShards string Pretty bool @@ -114,6 +114,10 @@ func (r BulkRequest) Do(providedCtx context.Context, transport Transport) (*Resp params = make(map[string]string) + if r.IncludeSourceOnError != nil { + params["include_source_on_error"] = strconv.FormatBool(*r.IncludeSourceOnError) + } + if r.ListExecutedPipelines != nil { params["list_executed_pipelines"] = strconv.FormatBool(*r.ListExecutedPipelines) } @@ -154,10 +158,6 @@ func (r BulkRequest) Do(providedCtx context.Context, transport Transport) (*Resp params["timeout"] = formatDuration(r.Timeout) } - if r.DocumentType != "" { - params["type"] = r.DocumentType - } - if r.WaitForActiveShards != "" { params["wait_for_active_shards"] = r.WaitForActiveShards } @@ -254,6 +254,13 @@ func (f Bulk) WithIndex(v string) func(*BulkRequest) { } } +// WithIncludeSourceOnError - true or false if to include the document source in the error message in case of parsing errors. defaults to true.. +func (f Bulk) WithIncludeSourceOnError(v bool) func(*BulkRequest) { + return func(r *BulkRequest) { + r.IncludeSourceOnError = &v + } +} + // WithListExecutedPipelines - sets list_executed_pipelines for all incoming documents. defaults to unset (false). func (f Bulk) WithListExecutedPipelines(v bool) func(*BulkRequest) { return func(r *BulkRequest) { @@ -275,14 +282,14 @@ func (f Bulk) WithRefresh(v string) func(*BulkRequest) { } } -// WithRequireAlias - sets require_alias for all incoming documents. defaults to unset (false). +// WithRequireAlias - if true, the request’s actions must target an index alias. defaults to false.. func (f Bulk) WithRequireAlias(v bool) func(*BulkRequest) { return func(r *BulkRequest) { r.RequireAlias = &v } } -// WithRequireDataStream - when true, requires the destination to be a data stream (existing or to-be-created). default is false. +// WithRequireDataStream - if true, the request's actions must target a data stream (existing or to-be-created). default to false. func (f Bulk) WithRequireDataStream(v bool) func(*BulkRequest) { return func(r *BulkRequest) { r.RequireDataStream = &v @@ -324,13 +331,6 @@ func (f Bulk) WithTimeout(v time.Duration) func(*BulkRequest) { } } -// WithDocumentType - default document type for items which don't provide one. -func (f Bulk) WithDocumentType(v string) func(*BulkRequest) { - return func(r *BulkRequest) { - r.DocumentType = v - } -} - // WithWaitForActiveShards - sets the number of shard copies that must be active before proceeding with the bulk operation. defaults to 1, meaning the primary shard only. set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1). func (f Bulk) WithWaitForActiveShards(v string) func(*BulkRequest) { return func(r *BulkRequest) { diff --git a/esapi/api.capabilities.go b/esapi/api.capabilities.go index 40957be3d1..f8c5c959f7 100644 --- a/esapi/api.capabilities.go +++ b/esapi/api.capabilities.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.aliases.go b/esapi/api.cat.aliases.go index 109a6c7825..ce8d84ab9a 100644 --- a/esapi/api.cat.aliases.go +++ b/esapi/api.cat.aliases.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -24,6 +24,7 @@ import ( "net/http" "strconv" "strings" + "time" ) func newCatAliasesFunc(t Transport) CatAliases { @@ -56,7 +57,7 @@ type CatAliasesRequest struct { Format string H []string Help *bool - Local *bool + MasterTimeout time.Duration S []string V *bool @@ -123,8 +124,8 @@ func (r CatAliasesRequest) Do(providedCtx context.Context, transport Transport) params["help"] = strconv.FormatBool(*r.Help) } - if r.Local != nil { - params["local"] = strconv.FormatBool(*r.Local) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) } if len(r.S) > 0 { @@ -248,10 +249,10 @@ func (f CatAliases) WithHelp(v bool) func(*CatAliasesRequest) { } } -// WithLocal - return local information, do not retrieve the state from master node (default: false). -func (f CatAliases) WithLocal(v bool) func(*CatAliasesRequest) { +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. +func (f CatAliases) WithMasterTimeout(v time.Duration) func(*CatAliasesRequest) { return func(r *CatAliasesRequest) { - r.Local = &v + r.MasterTimeout = v } } diff --git a/esapi/api.cat.allocation.go b/esapi/api.cat.allocation.go index ceb6fa6287..434ecf6388 100644 --- a/esapi/api.cat.allocation.go +++ b/esapi/api.cat.allocation.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.component_templates.go b/esapi/api.cat.component_templates.go index 1a7532f282..76248c15f4 100644 --- a/esapi/api.cat.component_templates.go +++ b/esapi/api.cat.component_templates.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.count.go b/esapi/api.cat.count.go index 227420c3f9..9749b00735 100644 --- a/esapi/api.cat.count.go +++ b/esapi/api.cat.count.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.fielddata.go b/esapi/api.cat.fielddata.go index c62adb1c1f..d4331eb9a2 100644 --- a/esapi/api.cat.fielddata.go +++ b/esapi/api.cat.fielddata.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.health.go b/esapi/api.cat.health.go index c5e1c5f1bf..a2035ab36c 100644 --- a/esapi/api.cat.health.go +++ b/esapi/api.cat.health.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.help.go b/esapi/api.cat.help.go index a03a4f52cc..955f6ff04b 100644 --- a/esapi/api.cat.help.go +++ b/esapi/api.cat.help.go @@ -15,14 +15,13 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi import ( "context" "net/http" - "strconv" "strings" ) @@ -50,9 +49,6 @@ type CatHelp func(o ...func(*CatHelpRequest)) (*Response, error) // CatHelpRequest configures the Cat Help API request. type CatHelpRequest struct { - Help *bool - S []string - Pretty bool Human bool ErrorTrace bool @@ -90,14 +86,6 @@ func (r CatHelpRequest) Do(providedCtx context.Context, transport Transport) (*R params = make(map[string]string) - if r.Help != nil { - params["help"] = strconv.FormatBool(*r.Help) - } - - if len(r.S) > 0 { - params["s"] = strings.Join(r.S, ",") - } - if r.Pretty { params["pretty"] = "true" } @@ -176,20 +164,6 @@ func (f CatHelp) WithContext(v context.Context) func(*CatHelpRequest) { } } -// WithHelp - return help information. -func (f CatHelp) WithHelp(v bool) func(*CatHelpRequest) { - return func(r *CatHelpRequest) { - r.Help = &v - } -} - -// WithS - comma-separated list of column names or column aliases to sort by. -func (f CatHelp) WithS(v ...string) func(*CatHelpRequest) { - return func(r *CatHelpRequest) { - r.S = v - } -} - // WithPretty makes the response body pretty-printed. func (f CatHelp) WithPretty() func(*CatHelpRequest) { return func(r *CatHelpRequest) { diff --git a/esapi/api.cat.indices.go b/esapi/api.cat.indices.go index f7081d8c5f..ef2048195f 100644 --- a/esapi/api.cat.indices.go +++ b/esapi/api.cat.indices.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.master.go b/esapi/api.cat.master.go index a13b9ab237..3f42cabfa2 100644 --- a/esapi/api.cat.master.go +++ b/esapi/api.cat.master.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.nodeattrs.go b/esapi/api.cat.nodeattrs.go index f017502db2..b0d31be5d9 100644 --- a/esapi/api.cat.nodeattrs.go +++ b/esapi/api.cat.nodeattrs.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.nodes.go b/esapi/api.cat.nodes.go index 2b7b0be9a5..fbfc321bae 100644 --- a/esapi/api.cat.nodes.go +++ b/esapi/api.cat.nodes.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.pending_tasks.go b/esapi/api.cat.pending_tasks.go index de66e85cf4..84e935b49a 100644 --- a/esapi/api.cat.pending_tasks.go +++ b/esapi/api.cat.pending_tasks.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.plugins.go b/esapi/api.cat.plugins.go index 14ac83ca9c..c0f1e63a6e 100644 --- a/esapi/api.cat.plugins.go +++ b/esapi/api.cat.plugins.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.recovery.go b/esapi/api.cat.recovery.go index 1ebc1ffd5f..105265f2a3 100644 --- a/esapi/api.cat.recovery.go +++ b/esapi/api.cat.recovery.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.repositories.go b/esapi/api.cat.repositories.go index 7cdeabade6..36317782d5 100644 --- a/esapi/api.cat.repositories.go +++ b/esapi/api.cat.repositories.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.segments.go b/esapi/api.cat.segments.go index 4139ff08ec..8e9d3bcb91 100644 --- a/esapi/api.cat.segments.go +++ b/esapi/api.cat.segments.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -24,6 +24,7 @@ import ( "net/http" "strconv" "strings" + "time" ) func newCatSegmentsFunc(t Transport) CatSegments { @@ -52,12 +53,14 @@ type CatSegments func(o ...func(*CatSegmentsRequest)) (*Response, error) type CatSegmentsRequest struct { Index []string - Bytes string - Format string - H []string - Help *bool - S []string - V *bool + Bytes string + Format string + H []string + Help *bool + Local *bool + MasterTimeout time.Duration + S []string + V *bool Pretty bool Human bool @@ -122,6 +125,14 @@ func (r CatSegmentsRequest) Do(providedCtx context.Context, transport Transport) params["help"] = strconv.FormatBool(*r.Help) } + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + if len(r.S) > 0 { params["s"] = strings.Join(r.S, ",") } @@ -243,6 +254,20 @@ func (f CatSegments) WithHelp(v bool) func(*CatSegmentsRequest) { } } +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f CatSegments) WithLocal(v bool) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CatSegments) WithMasterTimeout(v time.Duration) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.MasterTimeout = v + } +} + // WithS - comma-separated list of column names or column aliases to sort by. func (f CatSegments) WithS(v ...string) func(*CatSegmentsRequest) { return func(r *CatSegmentsRequest) { diff --git a/esapi/api.cat.shards.go b/esapi/api.cat.shards.go index 403f4b4839..7f99f195a5 100644 --- a/esapi/api.cat.shards.go +++ b/esapi/api.cat.shards.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.snapshots.go b/esapi/api.cat.snapshots.go index 74d8ca72f4..a724fc49cc 100644 --- a/esapi/api.cat.snapshots.go +++ b/esapi/api.cat.snapshots.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.tasks.go b/esapi/api.cat.tasks.go index de5171ff85..53e7e8ee9f 100644 --- a/esapi/api.cat.tasks.go +++ b/esapi/api.cat.tasks.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -24,6 +24,7 @@ import ( "net/http" "strconv" "strings" + "time" ) func newCatTasksFunc(t Transport) CatTasks { @@ -52,16 +53,18 @@ type CatTasks func(o ...func(*CatTasksRequest)) (*Response, error) // CatTasksRequest configures the Cat Tasks API request. type CatTasksRequest struct { - Actions []string - Detailed *bool - Format string - H []string - Help *bool - Nodes []string - ParentTaskID string - S []string - Time string - V *bool + Actions []string + Detailed *bool + Format string + H []string + Help *bool + Nodes []string + ParentTaskID string + S []string + Time string + Timeout time.Duration + V *bool + WaitForCompletion *bool Pretty bool Human bool @@ -136,10 +139,18 @@ func (r CatTasksRequest) Do(providedCtx context.Context, transport Transport) (* params["time"] = r.Time } + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.V != nil { params["v"] = strconv.FormatBool(*r.V) } + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + if r.Pretty { params["pretty"] = "true" } @@ -281,6 +292,13 @@ func (f CatTasks) WithTime(v string) func(*CatTasksRequest) { } } +// WithTimeout - period to wait for a response. if no response is received before the timeout expires, the request fails and returns an error.. +func (f CatTasks) WithTimeout(v time.Duration) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.Timeout = v + } +} + // WithV - verbose mode. display column headers. func (f CatTasks) WithV(v bool) func(*CatTasksRequest) { return func(r *CatTasksRequest) { @@ -288,6 +306,13 @@ func (f CatTasks) WithV(v bool) func(*CatTasksRequest) { } } +// WithWaitForCompletion - if `true`, the request blocks until the task has completed.. +func (f CatTasks) WithWaitForCompletion(v bool) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.WaitForCompletion = &v + } +} + // WithPretty makes the response body pretty-printed. func (f CatTasks) WithPretty() func(*CatTasksRequest) { return func(r *CatTasksRequest) { diff --git a/esapi/api.cat.templates.go b/esapi/api.cat.templates.go index 2cff2b8746..7a9d4aeed3 100644 --- a/esapi/api.cat.templates.go +++ b/esapi/api.cat.templates.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.thread_pool.go b/esapi/api.cat.thread_pool.go index af889c1869..1ab0e8ad81 100644 --- a/esapi/api.cat.thread_pool.go +++ b/esapi/api.cat.thread_pool.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.clear_scroll.go b/esapi/api.clear_scroll.go index 9aa6cc8195..b90338a3a4 100644 --- a/esapi/api.clear_scroll.go +++ b/esapi/api.clear_scroll.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.allocation_explain.go b/esapi/api.cluster.allocation_explain.go index 385e0ad03d..bbf1c1f339 100644 --- a/esapi/api.cluster.allocation_explain.go +++ b/esapi/api.cluster.allocation_explain.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.delete_component_template.go b/esapi/api.cluster.delete_component_template.go index 704d056a41..540f0ff97c 100644 --- a/esapi/api.cluster.delete_component_template.go +++ b/esapi/api.cluster.delete_component_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.delete_voting_config_exclusions.go b/esapi/api.cluster.delete_voting_config_exclusions.go index 497fa212cd..73923fe2ab 100644 --- a/esapi/api.cluster.delete_voting_config_exclusions.go +++ b/esapi/api.cluster.delete_voting_config_exclusions.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.exists_component_template.go b/esapi/api.cluster.exists_component_template.go index 6802b1b1e7..82995a838a 100644 --- a/esapi/api.cluster.exists_component_template.go +++ b/esapi/api.cluster.exists_component_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -192,7 +192,7 @@ func (f ClusterExistsComponentTemplate) WithLocal(v bool) func(*ClusterExistsCom } } -// WithMasterTimeout - explicit operation timeout for connection to master node. +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. func (f ClusterExistsComponentTemplate) WithMasterTimeout(v time.Duration) func(*ClusterExistsComponentTemplateRequest) { return func(r *ClusterExistsComponentTemplateRequest) { r.MasterTimeout = v diff --git a/esapi/api.cluster.get_component_template.go b/esapi/api.cluster.get_component_template.go index 7445496f6c..a526c74339 100644 --- a/esapi/api.cluster.get_component_template.go +++ b/esapi/api.cluster.get_component_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -213,7 +213,7 @@ func (f ClusterGetComponentTemplate) WithLocal(v bool) func(*ClusterGetComponent } } -// WithMasterTimeout - explicit operation timeout for connection to master node. +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. func (f ClusterGetComponentTemplate) WithMasterTimeout(v time.Duration) func(*ClusterGetComponentTemplateRequest) { return func(r *ClusterGetComponentTemplateRequest) { r.MasterTimeout = v diff --git a/esapi/api.cluster.get_settings.go b/esapi/api.cluster.get_settings.go index 728bdacb3f..15030d99b9 100644 --- a/esapi/api.cluster.get_settings.go +++ b/esapi/api.cluster.get_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -201,7 +201,7 @@ func (f ClusterGetSettings) WithIncludeDefaults(v bool) func(*ClusterGetSettings } } -// WithMasterTimeout - explicit operation timeout for connection to master node. +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. func (f ClusterGetSettings) WithMasterTimeout(v time.Duration) func(*ClusterGetSettingsRequest) { return func(r *ClusterGetSettingsRequest) { r.MasterTimeout = v diff --git a/esapi/api.cluster.health.go b/esapi/api.cluster.health.go index 2349ec537d..947e46f68a 100644 --- a/esapi/api.cluster.health.go +++ b/esapi/api.cluster.health.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.info.go b/esapi/api.cluster.info.go index 2b74fc438e..d9ff9090e9 100644 --- a/esapi/api.cluster.info.go +++ b/esapi/api.cluster.info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.pending_tasks.go b/esapi/api.cluster.pending_tasks.go index 616059e56d..c0d47f7f59 100644 --- a/esapi/api.cluster.pending_tasks.go +++ b/esapi/api.cluster.pending_tasks.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.post_voting_config_exclusions.go b/esapi/api.cluster.post_voting_config_exclusions.go index e018d275e6..6a767e1aed 100644 --- a/esapi/api.cluster.post_voting_config_exclusions.go +++ b/esapi/api.cluster.post_voting_config_exclusions.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.put_component_template.go b/esapi/api.cluster.put_component_template.go index 977724aca8..cd61002c8b 100644 --- a/esapi/api.cluster.put_component_template.go +++ b/esapi/api.cluster.put_component_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.put_settings.go b/esapi/api.cluster.put_settings.go index 0f9b9c74d9..d8e7291818 100644 --- a/esapi/api.cluster.put_settings.go +++ b/esapi/api.cluster.put_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.remote_info.go b/esapi/api.cluster.remote_info.go index 2f46cceafd..7f5150da19 100644 --- a/esapi/api.cluster.remote_info.go +++ b/esapi/api.cluster.remote_info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.reroute.go b/esapi/api.cluster.reroute.go index d2487f1af6..cd9891b437 100644 --- a/esapi/api.cluster.reroute.go +++ b/esapi/api.cluster.reroute.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.state.go b/esapi/api.cluster.state.go index dd219a7c91..d7f3817880 100644 --- a/esapi/api.cluster.state.go +++ b/esapi/api.cluster.state.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.stats.go b/esapi/api.cluster.stats.go index 28daf94943..c8668e8239 100644 --- a/esapi/api.cluster.stats.go +++ b/esapi/api.cluster.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.check_in.go b/esapi/api.connector.check_in.go index 32f435fb91..aada67c1df 100644 --- a/esapi/api.connector.check_in.go +++ b/esapi/api.connector.check_in.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.delete.go b/esapi/api.connector.delete.go index a4749cb87e..190cf952ad 100644 --- a/esapi/api.connector.delete.go +++ b/esapi/api.connector.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -55,6 +55,7 @@ type ConnectorDeleteRequest struct { ConnectorID string DeleteSyncJobs *bool + Hard *bool Pretty bool Human bool @@ -103,6 +104,10 @@ func (r ConnectorDeleteRequest) Do(providedCtx context.Context, transport Transp params["delete_sync_jobs"] = strconv.FormatBool(*r.DeleteSyncJobs) } + if r.Hard != nil { + params["hard"] = strconv.FormatBool(*r.Hard) + } + if r.Pretty { params["pretty"] = "true" } @@ -188,6 +193,13 @@ func (f ConnectorDelete) WithDeleteSyncJobs(v bool) func(*ConnectorDeleteRequest } } +// WithHard - if true, the connector doc is deleted. if false, connector doc is marked as deleted (soft-deleted).. +func (f ConnectorDelete) WithHard(v bool) func(*ConnectorDeleteRequest) { + return func(r *ConnectorDeleteRequest) { + r.Hard = &v + } +} + // WithPretty makes the response body pretty-printed. func (f ConnectorDelete) WithPretty() func(*ConnectorDeleteRequest) { return func(r *ConnectorDeleteRequest) { diff --git a/esapi/api.connector.get.go b/esapi/api.connector.get.go index 0a2a6081a9..6f8508db37 100644 --- a/esapi/api.connector.get.go +++ b/esapi/api.connector.get.go @@ -15,13 +15,14 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi import ( "context" "net/http" + "strconv" "strings" ) @@ -53,6 +54,8 @@ type ConnectorGet func(connector_id string, o ...func(*ConnectorGetRequest)) (*R type ConnectorGetRequest struct { ConnectorID string + IncludeDeleted *bool + Pretty bool Human bool ErrorTrace bool @@ -96,6 +99,10 @@ func (r ConnectorGetRequest) Do(providedCtx context.Context, transport Transport params = make(map[string]string) + if r.IncludeDeleted != nil { + params["include_deleted"] = strconv.FormatBool(*r.IncludeDeleted) + } + if r.Pretty { params["pretty"] = "true" } @@ -174,6 +181,13 @@ func (f ConnectorGet) WithContext(v context.Context) func(*ConnectorGetRequest) } } +// WithIncludeDeleted - a flag indicating whether to return connectors that have been soft-deleted.. +func (f ConnectorGet) WithIncludeDeleted(v bool) func(*ConnectorGetRequest) { + return func(r *ConnectorGetRequest) { + r.IncludeDeleted = &v + } +} + // WithPretty makes the response body pretty-printed. func (f ConnectorGet) WithPretty() func(*ConnectorGetRequest) { return func(r *ConnectorGetRequest) { diff --git a/esapi/api.connector.last_sync.go b/esapi/api.connector.last_sync.go index ce07ab1325..8ba6997026 100644 --- a/esapi/api.connector.last_sync.go +++ b/esapi/api.connector.last_sync.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.list.go b/esapi/api.connector.list.go index d9d61f7999..e93ed767c3 100644 --- a/esapi/api.connector.list.go +++ b/esapi/api.connector.list.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -52,12 +52,13 @@ type ConnectorList func(o ...func(*ConnectorListRequest)) (*Response, error) // ConnectorListRequest configures the Connector List API request. type ConnectorListRequest struct { - ConnectorName []string - From *int - IndexName []string - Query string - ServiceType []string - Size *int + ConnectorName []string + From *int + IncludeDeleted *bool + IndexName []string + Query string + ServiceType []string + Size *int Pretty bool Human bool @@ -104,6 +105,10 @@ func (r ConnectorListRequest) Do(providedCtx context.Context, transport Transpor params["from"] = strconv.FormatInt(int64(*r.From), 10) } + if r.IncludeDeleted != nil { + params["include_deleted"] = strconv.FormatBool(*r.IncludeDeleted) + } + if len(r.IndexName) > 0 { params["index_name"] = strings.Join(r.IndexName, ",") } @@ -212,6 +217,13 @@ func (f ConnectorList) WithFrom(v int) func(*ConnectorListRequest) { } } +// WithIncludeDeleted - a flag indicating whether to return connectors that have been soft-deleted.. +func (f ConnectorList) WithIncludeDeleted(v bool) func(*ConnectorListRequest) { + return func(r *ConnectorListRequest) { + r.IncludeDeleted = &v + } +} + // WithIndexName - a list of connector index names to fetch connector documents for. func (f ConnectorList) WithIndexName(v ...string) func(*ConnectorListRequest) { return func(r *ConnectorListRequest) { diff --git a/esapi/api.connector.post.go b/esapi/api.connector.post.go index 1810ba76d5..e8165d0b17 100644 --- a/esapi/api.connector.post.go +++ b/esapi/api.connector.post.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.put.go b/esapi/api.connector.put.go index c710e831ae..5030fd9173 100644 --- a/esapi/api.connector.put.go +++ b/esapi/api.connector.put.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.secret_delete.go b/esapi/api.connector.secret_delete.go index dd877e4f49..5ed3fea359 100644 --- a/esapi/api.connector.secret_delete.go +++ b/esapi/api.connector.secret_delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.secret_get.go b/esapi/api.connector.secret_get.go index a860294d7a..f74bb12415 100644 --- a/esapi/api.connector.secret_get.go +++ b/esapi/api.connector.secret_get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.secret_post.go b/esapi/api.connector.secret_post.go index fe1dc7c16d..808eaf5e00 100644 --- a/esapi/api.connector.secret_post.go +++ b/esapi/api.connector.secret_post.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.secret_put.go b/esapi/api.connector.secret_put.go index 563432118a..6fd628c7bc 100644 --- a/esapi/api.connector.secret_put.go +++ b/esapi/api.connector.secret_put.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_cancel.go b/esapi/api.connector.sync_job_cancel.go index 78a1ce91a0..f31274e7bf 100644 --- a/esapi/api.connector.sync_job_cancel.go +++ b/esapi/api.connector.sync_job_cancel.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_check_in.go b/esapi/api.connector.sync_job_check_in.go index e75b083acb..749fe0cf47 100644 --- a/esapi/api.connector.sync_job_check_in.go +++ b/esapi/api.connector.sync_job_check_in.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_claim.go b/esapi/api.connector.sync_job_claim.go index 10a0e26422..cd980c7c20 100644 --- a/esapi/api.connector.sync_job_claim.go +++ b/esapi/api.connector.sync_job_claim.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_delete.go b/esapi/api.connector.sync_job_delete.go index c22be45922..5b3cea9e13 100644 --- a/esapi/api.connector.sync_job_delete.go +++ b/esapi/api.connector.sync_job_delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_error.go b/esapi/api.connector.sync_job_error.go index 9e71bdc7b5..4a955fede9 100644 --- a/esapi/api.connector.sync_job_error.go +++ b/esapi/api.connector.sync_job_error.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_get.go b/esapi/api.connector.sync_job_get.go index 7db21eef5f..6ef0c98dd0 100644 --- a/esapi/api.connector.sync_job_get.go +++ b/esapi/api.connector.sync_job_get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_list.go b/esapi/api.connector.sync_job_list.go index 508206e768..fa4ecff016 100644 --- a/esapi/api.connector.sync_job_list.go +++ b/esapi/api.connector.sync_job_list.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_post.go b/esapi/api.connector.sync_job_post.go index 97bf8f023d..bf8d80109e 100644 --- a/esapi/api.connector.sync_job_post.go +++ b/esapi/api.connector.sync_job_post.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_update_stats.go b/esapi/api.connector.sync_job_update_stats.go index a4e5791280..d2721aa8bf 100644 --- a/esapi/api.connector.sync_job_update_stats.go +++ b/esapi/api.connector.sync_job_update_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_active_filtering.go b/esapi/api.connector.update_active_filtering.go index af425f2c52..2126e009bd 100644 --- a/esapi/api.connector.update_active_filtering.go +++ b/esapi/api.connector.update_active_filtering.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_api_key_id.go b/esapi/api.connector.update_api_key_id.go index 4fe903e2a0..fd949d5eba 100644 --- a/esapi/api.connector.update_api_key_id.go +++ b/esapi/api.connector.update_api_key_id.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_configuration.go b/esapi/api.connector.update_configuration.go index 667e0683f4..5cb0a5a179 100644 --- a/esapi/api.connector.update_configuration.go +++ b/esapi/api.connector.update_configuration.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_error.go b/esapi/api.connector.update_error.go index 68ab7432bf..0ba0cf673d 100644 --- a/esapi/api.connector.update_error.go +++ b/esapi/api.connector.update_error.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_features.go b/esapi/api.connector.update_features.go index a645dc7741..c7a57eae91 100644 --- a/esapi/api.connector.update_features.go +++ b/esapi/api.connector.update_features.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_filtering.go b/esapi/api.connector.update_filtering.go index 3a96096b80..b84428920a 100644 --- a/esapi/api.connector.update_filtering.go +++ b/esapi/api.connector.update_filtering.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_filtering_validation.go b/esapi/api.connector.update_filtering_validation.go index 5bbb9d1126..9ce620fa1d 100644 --- a/esapi/api.connector.update_filtering_validation.go +++ b/esapi/api.connector.update_filtering_validation.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_index_name.go b/esapi/api.connector.update_index_name.go index 8cf47b74c5..2f0010818d 100644 --- a/esapi/api.connector.update_index_name.go +++ b/esapi/api.connector.update_index_name.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_name.go b/esapi/api.connector.update_name.go index c540c2b407..1b7eca48fa 100644 --- a/esapi/api.connector.update_name.go +++ b/esapi/api.connector.update_name.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_native.go b/esapi/api.connector.update_native.go index dfbd7aaeaf..db048af02e 100644 --- a/esapi/api.connector.update_native.go +++ b/esapi/api.connector.update_native.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_pipeline.go b/esapi/api.connector.update_pipeline.go index dbc5da91b4..a2736c0e00 100644 --- a/esapi/api.connector.update_pipeline.go +++ b/esapi/api.connector.update_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_scheduling.go b/esapi/api.connector.update_scheduling.go index 197ac84c83..fe6f55c122 100644 --- a/esapi/api.connector.update_scheduling.go +++ b/esapi/api.connector.update_scheduling.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_service_type.go b/esapi/api.connector.update_service_type.go index a3ea51318e..5c8dd72119 100644 --- a/esapi/api.connector.update_service_type.go +++ b/esapi/api.connector.update_service_type.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_status.go b/esapi/api.connector.update_status.go index a172bfdbba..1b1937fff6 100644 --- a/esapi/api.connector.update_status.go +++ b/esapi/api.connector.update_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.count.go b/esapi/api.count.go index cbbf22707d..0d5da3e734 100644 --- a/esapi/api.count.go +++ b/esapi/api.count.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.create.go b/esapi/api.create.go index 5205b3b04b..3ee75997a4 100644 --- a/esapi/api.create.go +++ b/esapi/api.create.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -59,13 +59,14 @@ type CreateRequest struct { Body io.Reader - Pipeline string - Refresh string - Routing string - Timeout time.Duration - Version *int - VersionType string - WaitForActiveShards string + IncludeSourceOnError *bool + Pipeline string + Refresh string + Routing string + Timeout time.Duration + Version *int + VersionType string + WaitForActiveShards string Pretty bool Human bool @@ -115,6 +116,10 @@ func (r CreateRequest) Do(providedCtx context.Context, transport Transport) (*Re params = make(map[string]string) + if r.IncludeSourceOnError != nil { + params["include_source_on_error"] = strconv.FormatBool(*r.IncludeSourceOnError) + } + if r.Pipeline != "" { params["pipeline"] = r.Pipeline } @@ -228,6 +233,13 @@ func (f Create) WithContext(v context.Context) func(*CreateRequest) { } } +// WithIncludeSourceOnError - true or false if to include the document source in the error message in case of parsing errors. defaults to true.. +func (f Create) WithIncludeSourceOnError(v bool) func(*CreateRequest) { + return func(r *CreateRequest) { + r.IncludeSourceOnError = &v + } +} + // WithPipeline - the pipeline ID to preprocess incoming documents with. func (f Create) WithPipeline(v string) func(*CreateRequest) { return func(r *CreateRequest) { diff --git a/esapi/api.dangling_indices.delete_dangling_index.go b/esapi/api.dangling_indices.delete_dangling_index.go index 2fb1275f44..84311eb6e6 100644 --- a/esapi/api.dangling_indices.delete_dangling_index.go +++ b/esapi/api.dangling_indices.delete_dangling_index.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.dangling_indices.import_dangling_index.go b/esapi/api.dangling_indices.import_dangling_index.go index 8b1bae064f..10d825a7ab 100644 --- a/esapi/api.dangling_indices.import_dangling_index.go +++ b/esapi/api.dangling_indices.import_dangling_index.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.dangling_indices.list_dangling_indices.go b/esapi/api.dangling_indices.list_dangling_indices.go index 2c7daf7395..e19fbd6c01 100644 --- a/esapi/api.dangling_indices.list_dangling_indices.go +++ b/esapi/api.dangling_indices.list_dangling_indices.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.delete.go b/esapi/api.delete.go index b6d3de072c..4d7cd1ce24 100644 --- a/esapi/api.delete.go +++ b/esapi/api.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.delete_by_query.go b/esapi/api.delete_by_query.go index da43f04daa..b919b2301c 100644 --- a/esapi/api.delete_by_query.go +++ b/esapi/api.delete_by_query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.delete_by_query_rethrottle.go b/esapi/api.delete_by_query_rethrottle.go index d1441e6683..03402fefa9 100644 --- a/esapi/api.delete_by_query_rethrottle.go +++ b/esapi/api.delete_by_query_rethrottle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.delete_script.go b/esapi/api.delete_script.go index 7f22136968..819f3479b8 100644 --- a/esapi/api.delete_script.go +++ b/esapi/api.delete_script.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.exists.go b/esapi/api.exists.go index 6ce32ba5a7..95e4daa3b7 100644 --- a/esapi/api.exists.go +++ b/esapi/api.exists.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.exists_source.go b/esapi/api.exists_source.go index 3199ebb08f..9a5d6ef46f 100644 --- a/esapi/api.exists_source.go +++ b/esapi/api.exists_source.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.explain.go b/esapi/api.explain.go index a63bffc1e1..dda44c6a38 100644 --- a/esapi/api.explain.go +++ b/esapi/api.explain.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.features.get_features.go b/esapi/api.features.get_features.go index 055f83c274..0712c79880 100644 --- a/esapi/api.features.get_features.go +++ b/esapi/api.features.get_features.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.features.reset_features.go b/esapi/api.features.reset_features.go index fcce6e3695..fa77bfedca 100644 --- a/esapi/api.features.reset_features.go +++ b/esapi/api.features.reset_features.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.field_caps.go b/esapi/api.field_caps.go index 1de29546ac..6abb24af69 100644 --- a/esapi/api.field_caps.go +++ b/esapi/api.field_caps.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.fleet.delete_secret.go b/esapi/api.fleet.delete_secret.go index 496249a517..15a11da013 100644 --- a/esapi/api.fleet.delete_secret.go +++ b/esapi/api.fleet.delete_secret.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.fleet.get_secret.go b/esapi/api.fleet.get_secret.go index 0ed9da0fb2..500c8cbf22 100644 --- a/esapi/api.fleet.get_secret.go +++ b/esapi/api.fleet.get_secret.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.fleet.global_checkpoints.go b/esapi/api.fleet.global_checkpoints.go index cf9ab110ea..d67363d433 100644 --- a/esapi/api.fleet.global_checkpoints.go +++ b/esapi/api.fleet.global_checkpoints.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.fleet.msearch.go b/esapi/api.fleet.msearch.go index a848d1ef92..066a1bbcf3 100644 --- a/esapi/api.fleet.msearch.go +++ b/esapi/api.fleet.msearch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.fleet.post_secret.go b/esapi/api.fleet.post_secret.go index 1aefeaef20..73c74a2126 100644 --- a/esapi/api.fleet.post_secret.go +++ b/esapi/api.fleet.post_secret.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.fleet.search.go b/esapi/api.fleet.search.go index b9764f73ed..80eab59ac0 100644 --- a/esapi/api.fleet.search.go +++ b/esapi/api.fleet.search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.get.go b/esapi/api.get.go index 561ab84669..48918ca796 100644 --- a/esapi/api.get.go +++ b/esapi/api.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.get_script.go b/esapi/api.get_script.go index abbb0ea08c..dfa5ad3889 100644 --- a/esapi/api.get_script.go +++ b/esapi/api.get_script.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.get_script_context.go b/esapi/api.get_script_context.go index 58add2132d..1fd132a7ba 100644 --- a/esapi/api.get_script_context.go +++ b/esapi/api.get_script_context.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.get_script_languages.go b/esapi/api.get_script_languages.go index 6111542e00..0bfbf89d3b 100644 --- a/esapi/api.get_script_languages.go +++ b/esapi/api.get_script_languages.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.get_source.go b/esapi/api.get_source.go index 36fa4fd7a3..e7164da5be 100644 --- a/esapi/api.get_source.go +++ b/esapi/api.get_source.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.health_report.go b/esapi/api.health_report.go index 276602a3d4..474ae0cc9d 100644 --- a/esapi/api.health_report.go +++ b/esapi/api.health_report.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.index.go b/esapi/api.index.go index 50f7b60517..cab764bffa 100644 --- a/esapi/api.index.go +++ b/esapi/api.index.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -57,18 +57,19 @@ type IndexRequest struct { Body io.Reader - IfPrimaryTerm *int - IfSeqNo *int - OpType string - Pipeline string - Refresh string - RequireAlias *bool - RequireDataStream *bool - Routing string - Timeout time.Duration - Version *int - VersionType string - WaitForActiveShards string + IfPrimaryTerm *int + IfSeqNo *int + IncludeSourceOnError *bool + OpType string + Pipeline string + Refresh string + RequireAlias *bool + RequireDataStream *bool + Routing string + Timeout time.Duration + Version *int + VersionType string + WaitForActiveShards string Pretty bool Human bool @@ -132,6 +133,10 @@ func (r IndexRequest) Do(providedCtx context.Context, transport Transport) (*Res params["if_seq_no"] = strconv.FormatInt(int64(*r.IfSeqNo), 10) } + if r.IncludeSourceOnError != nil { + params["include_source_on_error"] = strconv.FormatBool(*r.IncludeSourceOnError) + } + if r.OpType != "" { params["op_type"] = r.OpType } @@ -278,6 +283,13 @@ func (f Index) WithIfSeqNo(v int) func(*IndexRequest) { } } +// WithIncludeSourceOnError - true or false if to include the document source in the error message in case of parsing errors. defaults to true.. +func (f Index) WithIncludeSourceOnError(v bool) func(*IndexRequest) { + return func(r *IndexRequest) { + r.IncludeSourceOnError = &v + } +} + // WithOpType - explicit operation type. defaults to `index` for requests with an explicit document ID, and to `create`for requests without an explicit document ID. func (f Index) WithOpType(v string) func(*IndexRequest) { return func(r *IndexRequest) { diff --git a/esapi/api.indices.add_block.go b/esapi/api.indices.add_block.go index 4297642f49..be2aa7afe6 100644 --- a/esapi/api.indices.add_block.go +++ b/esapi/api.indices.add_block.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.analyze.go b/esapi/api.indices.analyze.go index accf120546..3a56e88d05 100644 --- a/esapi/api.indices.analyze.go +++ b/esapi/api.indices.analyze.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.cancel_migrate_reindex.go b/esapi/api.indices.cancel_migrate_reindex.go new file mode 100644 index 0000000000..5a1906daf4 --- /dev/null +++ b/esapi/api.indices.cancel_migrate_reindex.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.0.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newIndicesCancelMigrateReindexFunc(t Transport) IndicesCancelMigrateReindex { + return func(index string, o ...func(*IndicesCancelMigrateReindexRequest)) (*Response, error) { + var r = IndicesCancelMigrateReindexRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesCancelMigrateReindex this API returns the status of a migration reindex attempt for a data stream or index +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex-cancel-api.html. +type IndicesCancelMigrateReindex func(index string, o ...func(*IndicesCancelMigrateReindexRequest)) (*Response, error) + +// IndicesCancelMigrateReindexRequest configures the Indices Cancel Migrate Reindex API request. +type IndicesCancelMigrateReindexRequest struct { + Index string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesCancelMigrateReindexRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.cancel_migrate_reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_migration") + 1 + len("reindex") + 1 + len(r.Index) + 1 + len("_cancel")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("reindex") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_cancel") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.cancel_migrate_reindex") + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.cancel_migrate_reindex") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesCancelMigrateReindex) WithContext(v context.Context) func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesCancelMigrateReindex) WithPretty() func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesCancelMigrateReindex) WithHuman() func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesCancelMigrateReindex) WithErrorTrace() func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesCancelMigrateReindex) WithFilterPath(v ...string) func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesCancelMigrateReindex) WithHeader(h map[string]string) func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesCancelMigrateReindex) WithOpaqueID(s string) func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.indices.clear_cache.go b/esapi/api.indices.clear_cache.go index 20c931cbfb..2080b89b13 100644 --- a/esapi/api.indices.clear_cache.go +++ b/esapi/api.indices.clear_cache.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.clone.go b/esapi/api.indices.clone.go index 2754d3f989..f56b0c537a 100644 --- a/esapi/api.indices.clone.go +++ b/esapi/api.indices.clone.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.close.go b/esapi/api.indices.close.go index 3601cd94cd..fdf9c2b5e6 100644 --- a/esapi/api.indices.close.go +++ b/esapi/api.indices.close.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.create.go b/esapi/api.indices.create.go index 4356d58eb4..b0a786a50c 100644 --- a/esapi/api.indices.create.go +++ b/esapi/api.indices.create.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.create_from.go b/esapi/api.indices.create_from.go new file mode 100644 index 0000000000..79a2464774 --- /dev/null +++ b/esapi/api.indices.create_from.go @@ -0,0 +1,248 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.0.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newIndicesCreateFromFunc(t Transport) IndicesCreateFrom { + return func(dest string, source string, o ...func(*IndicesCreateFromRequest)) (*Response, error) { + var r = IndicesCreateFromRequest{Dest: dest, Source: source} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesCreateFrom this API creates a destination from a source index. It copies the mappings and settings from the source index while allowing request settings and mappings to override the source values. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index-from-source.html. +type IndicesCreateFrom func(dest string, source string, o ...func(*IndicesCreateFromRequest)) (*Response, error) + +// IndicesCreateFromRequest configures the Indices Create From API request. +type IndicesCreateFromRequest struct { + Body io.Reader + + Dest string + Source string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesCreateFromRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.create_from") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_create_from") + 1 + len(r.Source) + 1 + len(r.Dest)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_create_from") + path.WriteString("/") + path.WriteString(r.Source) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "source", r.Source) + } + path.WriteString("/") + path.WriteString(r.Dest) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "dest", r.Dest) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.create_from") + if reader := instrument.RecordRequestBody(ctx, "indices.create_from", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.create_from") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesCreateFrom) WithContext(v context.Context) func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.ctx = v + } +} + +// WithBody - The body contains the fields `mappings_override`, `settings_override`, and `remove_index_blocks`.. +func (f IndicesCreateFrom) WithBody(v io.Reader) func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesCreateFrom) WithPretty() func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesCreateFrom) WithHuman() func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesCreateFrom) WithErrorTrace() func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesCreateFrom) WithFilterPath(v ...string) func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesCreateFrom) WithHeader(h map[string]string) func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesCreateFrom) WithOpaqueID(s string) func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.indices.delete.go b/esapi/api.indices.delete.go index 449da765c5..ca929d9108 100644 --- a/esapi/api.indices.delete.go +++ b/esapi/api.indices.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.delete_alias.go b/esapi/api.indices.delete_alias.go index c59086b934..2959ee8ebc 100644 --- a/esapi/api.indices.delete_alias.go +++ b/esapi/api.indices.delete_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.delete_data_lifecycle.go b/esapi/api.indices.delete_data_lifecycle.go index 30abf73550..b6f3927008 100644 --- a/esapi/api.indices.delete_data_lifecycle.go +++ b/esapi/api.indices.delete_data_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.delete_index_template.go b/esapi/api.indices.delete_index_template.go index 5babf1c954..ff1f512138 100644 --- a/esapi/api.indices.delete_index_template.go +++ b/esapi/api.indices.delete_index_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.delete_template.go b/esapi/api.indices.delete_template.go index 6b7d5b4e70..cee53c5c0b 100644 --- a/esapi/api.indices.delete_template.go +++ b/esapi/api.indices.delete_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.disk_usage.go b/esapi/api.indices.disk_usage.go index e0a94adfa5..2d0b48042c 100644 --- a/esapi/api.indices.disk_usage.go +++ b/esapi/api.indices.disk_usage.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.downsample.go b/esapi/api.indices.downsample.go index 3db0e6de86..c1a36d2afa 100644 --- a/esapi/api.indices.downsample.go +++ b/esapi/api.indices.downsample.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.exists.go b/esapi/api.indices.exists.go index 3bd22065ca..c795c2dec2 100644 --- a/esapi/api.indices.exists.go +++ b/esapi/api.indices.exists.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.exists_alias.go b/esapi/api.indices.exists_alias.go index 1dafdb9309..b7b298d847 100644 --- a/esapi/api.indices.exists_alias.go +++ b/esapi/api.indices.exists_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -25,6 +25,7 @@ import ( "net/http" "strconv" "strings" + "time" ) func newIndicesExistsAliasFunc(t Transport) IndicesExistsAlias { @@ -58,7 +59,7 @@ type IndicesExistsAliasRequest struct { AllowNoIndices *bool ExpandWildcards string IgnoreUnavailable *bool - Local *bool + MasterTimeout time.Duration Pretty bool Human bool @@ -126,8 +127,8 @@ func (r IndicesExistsAliasRequest) Do(providedCtx context.Context, transport Tra params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) } - if r.Local != nil { - params["local"] = strconv.FormatBool(*r.Local) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) } if r.Pretty { @@ -236,10 +237,10 @@ func (f IndicesExistsAlias) WithIgnoreUnavailable(v bool) func(*IndicesExistsAli } } -// WithLocal - return local information, do not retrieve the state from master node (default: false). -func (f IndicesExistsAlias) WithLocal(v bool) func(*IndicesExistsAliasRequest) { +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. +func (f IndicesExistsAlias) WithMasterTimeout(v time.Duration) func(*IndicesExistsAliasRequest) { return func(r *IndicesExistsAliasRequest) { - r.Local = &v + r.MasterTimeout = v } } diff --git a/esapi/api.indices.exists_index_template.go b/esapi/api.indices.exists_index_template.go index 211d8b4885..e67b2f00f8 100644 --- a/esapi/api.indices.exists_index_template.go +++ b/esapi/api.indices.exists_index_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.exists_template.go b/esapi/api.indices.exists_template.go index 3dcdf54977..6ae637a310 100644 --- a/esapi/api.indices.exists_template.go +++ b/esapi/api.indices.exists_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -209,7 +209,7 @@ func (f IndicesExistsTemplate) WithLocal(v bool) func(*IndicesExistsTemplateRequ } } -// WithMasterTimeout - explicit operation timeout for connection to master node. +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. func (f IndicesExistsTemplate) WithMasterTimeout(v time.Duration) func(*IndicesExistsTemplateRequest) { return func(r *IndicesExistsTemplateRequest) { r.MasterTimeout = v diff --git a/esapi/api.indices.explain_data_lifecycle.go b/esapi/api.indices.explain_data_lifecycle.go index 8a4baba168..12c4298f23 100644 --- a/esapi/api.indices.explain_data_lifecycle.go +++ b/esapi/api.indices.explain_data_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.field_usage_stats.go b/esapi/api.indices.field_usage_stats.go index 7c7083bb82..b4a0decf42 100644 --- a/esapi/api.indices.field_usage_stats.go +++ b/esapi/api.indices.field_usage_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.flush.go b/esapi/api.indices.flush.go index 133a0dc0ba..b13123f7ed 100644 --- a/esapi/api.indices.flush.go +++ b/esapi/api.indices.flush.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.forcemerge.go b/esapi/api.indices.forcemerge.go index 667e749707..6ca2612814 100644 --- a/esapi/api.indices.forcemerge.go +++ b/esapi/api.indices.forcemerge.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.get.go b/esapi/api.indices.get.go index 1ac65c0a7a..3e44654a46 100644 --- a/esapi/api.indices.get.go +++ b/esapi/api.indices.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.get_alias.go b/esapi/api.indices.get_alias.go index f69d37f75f..db16ecd3a2 100644 --- a/esapi/api.indices.get_alias.go +++ b/esapi/api.indices.get_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -24,6 +24,7 @@ import ( "net/http" "strconv" "strings" + "time" ) func newIndicesGetAliasFunc(t Transport) IndicesGetAlias { @@ -57,7 +58,7 @@ type IndicesGetAliasRequest struct { AllowNoIndices *bool ExpandWildcards string IgnoreUnavailable *bool - Local *bool + MasterTimeout time.Duration Pretty bool Human bool @@ -123,8 +124,8 @@ func (r IndicesGetAliasRequest) Do(providedCtx context.Context, transport Transp params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) } - if r.Local != nil { - params["local"] = strconv.FormatBool(*r.Local) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) } if r.Pretty { @@ -240,10 +241,10 @@ func (f IndicesGetAlias) WithIgnoreUnavailable(v bool) func(*IndicesGetAliasRequ } } -// WithLocal - return local information, do not retrieve the state from master node (default: false). -func (f IndicesGetAlias) WithLocal(v bool) func(*IndicesGetAliasRequest) { +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. +func (f IndicesGetAlias) WithMasterTimeout(v time.Duration) func(*IndicesGetAliasRequest) { return func(r *IndicesGetAliasRequest) { - r.Local = &v + r.MasterTimeout = v } } diff --git a/esapi/api.indices.get_data_lifecycle.go b/esapi/api.indices.get_data_lifecycle.go index 0a26745d33..e11091fdf7 100644 --- a/esapi/api.indices.get_data_lifecycle.go +++ b/esapi/api.indices.get_data_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.get_data_lifecycle_stats.go b/esapi/api.indices.get_data_lifecycle_stats.go new file mode 100644 index 0000000000..478c5579c2 --- /dev/null +++ b/esapi/api.indices.get_data_lifecycle_stats.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.0.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newIndicesGetDataLifecycleStatsFunc(t Transport) IndicesGetDataLifecycleStats { + return func(o ...func(*IndicesGetDataLifecycleStatsRequest)) (*Response, error) { + var r = IndicesGetDataLifecycleStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetDataLifecycleStats get data stream lifecycle statistics. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle-stats.html. +type IndicesGetDataLifecycleStats func(o ...func(*IndicesGetDataLifecycleStatsRequest)) (*Response, error) + +// IndicesGetDataLifecycleStatsRequest configures the Indices Get Data Lifecycle Stats API request. +type IndicesGetDataLifecycleStatsRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetDataLifecycleStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_lifecycle_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_lifecycle/stats")) + path.WriteString("http://") + path.WriteString("/_lifecycle/stats") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_data_lifecycle_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_data_lifecycle_stats") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetDataLifecycleStats) WithContext(v context.Context) func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetDataLifecycleStats) WithPretty() func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetDataLifecycleStats) WithHuman() func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetDataLifecycleStats) WithErrorTrace() func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetDataLifecycleStats) WithFilterPath(v ...string) func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetDataLifecycleStats) WithHeader(h map[string]string) func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetDataLifecycleStats) WithOpaqueID(s string) func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.indices.get_field_mapping.go b/esapi/api.indices.get_field_mapping.go index d67cb2af50..e7e4e1a31c 100644 --- a/esapi/api.indices.get_field_mapping.go +++ b/esapi/api.indices.get_field_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.get_index_template.go b/esapi/api.indices.get_index_template.go index 7568269bec..585b6d980c 100644 --- a/esapi/api.indices.get_index_template.go +++ b/esapi/api.indices.get_index_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -225,7 +225,7 @@ func (f IndicesGetIndexTemplate) WithLocal(v bool) func(*IndicesGetIndexTemplate } } -// WithMasterTimeout - explicit operation timeout for connection to master node. +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. func (f IndicesGetIndexTemplate) WithMasterTimeout(v time.Duration) func(*IndicesGetIndexTemplateRequest) { return func(r *IndicesGetIndexTemplateRequest) { r.MasterTimeout = v diff --git a/esapi/api.indices.get_mapping.go b/esapi/api.indices.get_mapping.go index 762d143f43..0270d5eca4 100644 --- a/esapi/api.indices.get_mapping.go +++ b/esapi/api.indices.get_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.get_migrate_reindex_status.go b/esapi/api.indices.get_migrate_reindex_status.go new file mode 100644 index 0000000000..e7d3abb984 --- /dev/null +++ b/esapi/api.indices.get_migrate_reindex_status.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.0.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newIndicesGetMigrateReindexStatusFunc(t Transport) IndicesGetMigrateReindexStatus { + return func(index string, o ...func(*IndicesGetMigrateReindexStatusRequest)) (*Response, error) { + var r = IndicesGetMigrateReindexStatusRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetMigrateReindexStatus this API returns the status of a migration reindex attempt for a data stream or index +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex-status-api.html. +type IndicesGetMigrateReindexStatus func(index string, o ...func(*IndicesGetMigrateReindexStatusRequest)) (*Response, error) + +// IndicesGetMigrateReindexStatusRequest configures the Indices Get Migrate Reindex Status API request. +type IndicesGetMigrateReindexStatusRequest struct { + Index string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetMigrateReindexStatusRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_migrate_reindex_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_migration") + 1 + len("reindex") + 1 + len(r.Index) + 1 + len("_status")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("reindex") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_status") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_migrate_reindex_status") + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_migrate_reindex_status") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetMigrateReindexStatus) WithContext(v context.Context) func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetMigrateReindexStatus) WithPretty() func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetMigrateReindexStatus) WithHuman() func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetMigrateReindexStatus) WithErrorTrace() func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetMigrateReindexStatus) WithFilterPath(v ...string) func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetMigrateReindexStatus) WithHeader(h map[string]string) func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetMigrateReindexStatus) WithOpaqueID(s string) func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.indices.get_settings.go b/esapi/api.indices.get_settings.go index 08c36a63cc..b53eefaad6 100644 --- a/esapi/api.indices.get_settings.go +++ b/esapi/api.indices.get_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.get_template.go b/esapi/api.indices.get_template.go index 08772e5bfe..29da37bcb4 100644 --- a/esapi/api.indices.get_template.go +++ b/esapi/api.indices.get_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -213,7 +213,7 @@ func (f IndicesGetTemplate) WithLocal(v bool) func(*IndicesGetTemplateRequest) { } } -// WithMasterTimeout - explicit operation timeout for connection to master node. +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. func (f IndicesGetTemplate) WithMasterTimeout(v time.Duration) func(*IndicesGetTemplateRequest) { return func(r *IndicesGetTemplateRequest) { r.MasterTimeout = v diff --git a/esapi/api.indices.migrate_reindex.go b/esapi/api.indices.migrate_reindex.go new file mode 100644 index 0000000000..5c5c9adba0 --- /dev/null +++ b/esapi/api.indices.migrate_reindex.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.0.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newIndicesMigrateReindexFunc(t Transport) IndicesMigrateReindex { + return func(body io.Reader, o ...func(*IndicesMigrateReindexRequest)) (*Response, error) { + var r = IndicesMigrateReindexRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesMigrateReindex this API reindexes all legacy backing indices for a data stream. It does this in a persistent task. The persistent task id is returned immediately, and the reindexing work is completed in that task +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex-api.html. +type IndicesMigrateReindex func(body io.Reader, o ...func(*IndicesMigrateReindexRequest)) (*Response, error) + +// IndicesMigrateReindexRequest configures the Indices Migrate Reindex API request. +type IndicesMigrateReindexRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesMigrateReindexRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.migrate_reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_migration/reindex")) + path.WriteString("http://") + path.WriteString("/_migration/reindex") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.migrate_reindex") + if reader := instrument.RecordRequestBody(ctx, "indices.migrate_reindex", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.migrate_reindex") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesMigrateReindex) WithContext(v context.Context) func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesMigrateReindex) WithPretty() func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesMigrateReindex) WithHuman() func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesMigrateReindex) WithErrorTrace() func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesMigrateReindex) WithFilterPath(v ...string) func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesMigrateReindex) WithHeader(h map[string]string) func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesMigrateReindex) WithOpaqueID(s string) func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.indices.modify_data_stream.go b/esapi/api.indices.modify_data_stream.go index e60f2e2cde..b646692a16 100644 --- a/esapi/api.indices.modify_data_stream.go +++ b/esapi/api.indices.modify_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.open.go b/esapi/api.indices.open.go index 30120936f8..09463da85d 100644 --- a/esapi/api.indices.open.go +++ b/esapi/api.indices.open.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.put_alias.go b/esapi/api.indices.put_alias.go index 064779b444..61a9a693e1 100644 --- a/esapi/api.indices.put_alias.go +++ b/esapi/api.indices.put_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.put_data_lifecycle.go b/esapi/api.indices.put_data_lifecycle.go index 693f71535c..cc6b27d016 100644 --- a/esapi/api.indices.put_data_lifecycle.go +++ b/esapi/api.indices.put_data_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.put_index_template.go b/esapi/api.indices.put_index_template.go index ecc4fbf408..db597988cc 100644 --- a/esapi/api.indices.put_index_template.go +++ b/esapi/api.indices.put_index_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.put_mapping.go b/esapi/api.indices.put_mapping.go index 93c88c1571..9157a1421c 100644 --- a/esapi/api.indices.put_mapping.go +++ b/esapi/api.indices.put_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.put_settings.go b/esapi/api.indices.put_settings.go index be486699ff..54c87be23b 100644 --- a/esapi/api.indices.put_settings.go +++ b/esapi/api.indices.put_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.put_template.go b/esapi/api.indices.put_template.go index 9a074077a5..f61bcfe962 100644 --- a/esapi/api.indices.put_template.go +++ b/esapi/api.indices.put_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.recovery.go b/esapi/api.indices.recovery.go index 02a157974e..d09a3678a1 100644 --- a/esapi/api.indices.recovery.go +++ b/esapi/api.indices.recovery.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.refresh.go b/esapi/api.indices.refresh.go index 611e283f7c..d500bfb8bb 100644 --- a/esapi/api.indices.refresh.go +++ b/esapi/api.indices.refresh.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.resolve_cluster.go b/esapi/api.indices.resolve_cluster.go index 582e1275d1..ca923d2482 100644 --- a/esapi/api.indices.resolve_cluster.go +++ b/esapi/api.indices.resolve_cluster.go @@ -15,21 +15,21 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi import ( "context" - "errors" "net/http" "strconv" "strings" + "time" ) func newIndicesResolveClusterFunc(t Transport) IndicesResolveCluster { - return func(name []string, o ...func(*IndicesResolveClusterRequest)) (*Response, error) { - var r = IndicesResolveClusterRequest{Name: name} + return func(o ...func(*IndicesResolveClusterRequest)) (*Response, error) { + var r = IndicesResolveClusterRequest{} for _, f := range o { f(&r) } @@ -44,10 +44,10 @@ func newIndicesResolveClusterFunc(t Transport) IndicesResolveCluster { // ----- API Definition ------------------------------------------------------- -// IndicesResolveCluster resolves the specified index expressions to return information about each cluster, including the local cluster, if included. +// IndicesResolveCluster resolves the specified index expressions to return information about each cluster. If no index expression is provided, this endpoint will return information about all the remote clusters that are configured on the local cluster. // // See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-cluster-api.html. -type IndicesResolveCluster func(name []string, o ...func(*IndicesResolveClusterRequest)) (*Response, error) +type IndicesResolveCluster func(o ...func(*IndicesResolveClusterRequest)) (*Response, error) // IndicesResolveClusterRequest configures the Indices Resolve Cluster API request. type IndicesResolveClusterRequest struct { @@ -57,6 +57,7 @@ type IndicesResolveClusterRequest struct { ExpandWildcards string IgnoreThrottled *bool IgnoreUnavailable *bool + Timeout time.Duration Pretty bool Human bool @@ -89,20 +90,18 @@ func (r IndicesResolveClusterRequest) Do(providedCtx context.Context, transport method = "GET" - if len(r.Name) == 0 { - return nil, errors.New("name is required and cannot be nil or empty") - } - path.Grow(7 + 1 + len("_resolve") + 1 + len("cluster") + 1 + len(strings.Join(r.Name, ","))) path.WriteString("http://") path.WriteString("/") path.WriteString("_resolve") path.WriteString("/") path.WriteString("cluster") - path.WriteString("/") - path.WriteString(strings.Join(r.Name, ",")) - if instrument, ok := r.instrument.(Instrumentation); ok { - instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + if len(r.Name) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } } params = make(map[string]string) @@ -123,6 +122,10 @@ func (r IndicesResolveClusterRequest) Do(providedCtx context.Context, transport params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) } + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -201,34 +204,48 @@ func (f IndicesResolveCluster) WithContext(v context.Context) func(*IndicesResol } } -// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +// WithName - a list of cluster:index names or wildcard expressions. +func (f IndicesResolveCluster) WithName(v ...string) func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + r.Name = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). only allowed when providing an index expression.. func (f IndicesResolveCluster) WithAllowNoIndices(v bool) func(*IndicesResolveClusterRequest) { return func(r *IndicesResolveClusterRequest) { r.AllowNoIndices = &v } } -// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). only allowed when providing an index expression.. func (f IndicesResolveCluster) WithExpandWildcards(v string) func(*IndicesResolveClusterRequest) { return func(r *IndicesResolveClusterRequest) { r.ExpandWildcards = v } } -// WithIgnoreThrottled - whether specified concrete, expanded or aliased indices should be ignored when throttled. +// WithIgnoreThrottled - whether specified concrete, expanded or aliased indices should be ignored when throttled. only allowed when providing an index expression.. func (f IndicesResolveCluster) WithIgnoreThrottled(v bool) func(*IndicesResolveClusterRequest) { return func(r *IndicesResolveClusterRequest) { r.IgnoreThrottled = &v } } -// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). only allowed when providing an index expression.. func (f IndicesResolveCluster) WithIgnoreUnavailable(v bool) func(*IndicesResolveClusterRequest) { return func(r *IndicesResolveClusterRequest) { r.IgnoreUnavailable = &v } } +// WithTimeout - the maximum time to wait for remote clusters to respond. +func (f IndicesResolveCluster) WithTimeout(v time.Duration) func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f IndicesResolveCluster) WithPretty() func(*IndicesResolveClusterRequest) { return func(r *IndicesResolveClusterRequest) { diff --git a/esapi/api.indices.resolve_index.go b/esapi/api.indices.resolve_index.go index 870bfbef81..c3cd2b3e09 100644 --- a/esapi/api.indices.resolve_index.go +++ b/esapi/api.indices.resolve_index.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.rollover.go b/esapi/api.indices.rollover.go index c75eebe9f3..f145c31b74 100644 --- a/esapi/api.indices.rollover.go +++ b/esapi/api.indices.rollover.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -61,7 +61,6 @@ type IndicesRolloverRequest struct { DryRun *bool Lazy *bool MasterTimeout time.Duration - TargetFailureStore *bool Timeout time.Duration WaitForActiveShards string @@ -127,10 +126,6 @@ func (r IndicesRolloverRequest) Do(providedCtx context.Context, transport Transp params["master_timeout"] = formatDuration(r.MasterTimeout) } - if r.TargetFailureStore != nil { - params["target_failure_store"] = strconv.FormatBool(*r.TargetFailureStore) - } - if r.Timeout != 0 { params["timeout"] = formatDuration(r.Timeout) } @@ -259,13 +254,6 @@ func (f IndicesRollover) WithMasterTimeout(v time.Duration) func(*IndicesRollove } } -// WithTargetFailureStore - if set to true, the rollover action will be applied on the failure store of the data stream.. -func (f IndicesRollover) WithTargetFailureStore(v bool) func(*IndicesRolloverRequest) { - return func(r *IndicesRolloverRequest) { - r.TargetFailureStore = &v - } -} - // WithTimeout - explicit operation timeout. func (f IndicesRollover) WithTimeout(v time.Duration) func(*IndicesRolloverRequest) { return func(r *IndicesRolloverRequest) { diff --git a/esapi/api.indices.segments.go b/esapi/api.indices.segments.go index ecfe009bbe..883998bb4b 100644 --- a/esapi/api.indices.segments.go +++ b/esapi/api.indices.segments.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -55,7 +55,6 @@ type IndicesSegmentsRequest struct { AllowNoIndices *bool ExpandWildcards string IgnoreUnavailable *bool - Verbose *bool Pretty bool Human bool @@ -114,10 +113,6 @@ func (r IndicesSegmentsRequest) Do(providedCtx context.Context, transport Transp params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) } - if r.Verbose != nil { - params["verbose"] = strconv.FormatBool(*r.Verbose) - } - if r.Pretty { params["pretty"] = "true" } @@ -224,13 +219,6 @@ func (f IndicesSegments) WithIgnoreUnavailable(v bool) func(*IndicesSegmentsRequ } } -// WithVerbose - includes detailed memory usage by lucene.. -func (f IndicesSegments) WithVerbose(v bool) func(*IndicesSegmentsRequest) { - return func(r *IndicesSegmentsRequest) { - r.Verbose = &v - } -} - // WithPretty makes the response body pretty-printed. func (f IndicesSegments) WithPretty() func(*IndicesSegmentsRequest) { return func(r *IndicesSegmentsRequest) { diff --git a/esapi/api.indices.shard_stores.go b/esapi/api.indices.shard_stores.go index 933a70dbbf..923ce1ca9a 100644 --- a/esapi/api.indices.shard_stores.go +++ b/esapi/api.indices.shard_stores.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.shrink.go b/esapi/api.indices.shrink.go index cc9d992482..705854f1f7 100644 --- a/esapi/api.indices.shrink.go +++ b/esapi/api.indices.shrink.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.simulate_index_template.go b/esapi/api.indices.simulate_index_template.go index b7bf01084a..5edaa458d4 100644 --- a/esapi/api.indices.simulate_index_template.go +++ b/esapi/api.indices.simulate_index_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.simulate_template.go b/esapi/api.indices.simulate_template.go index fcb6e977de..0270c13dce 100644 --- a/esapi/api.indices.simulate_template.go +++ b/esapi/api.indices.simulate_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.split.go b/esapi/api.indices.split.go index 27a59c0bf6..d77be764bd 100644 --- a/esapi/api.indices.split.go +++ b/esapi/api.indices.split.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.stats.go b/esapi/api.indices.stats.go index e48590147e..ed31e12477 100644 --- a/esapi/api.indices.stats.go +++ b/esapi/api.indices.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.update_aliases.go b/esapi/api.indices.update_aliases.go index 8a62abdf2c..ddbd4f40ea 100644 --- a/esapi/api.indices.update_aliases.go +++ b/esapi/api.indices.update_aliases.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.validate_query.go b/esapi/api.indices.validate_query.go index 2410ac43c3..11948d15d8 100644 --- a/esapi/api.indices.validate_query.go +++ b/esapi/api.indices.validate_query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.inference.chat_completion_unified.go b/esapi/api.inference.chat_completion_unified.go new file mode 100644 index 0000000000..64b79a5949 --- /dev/null +++ b/esapi/api.inference.chat_completion_unified.go @@ -0,0 +1,244 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.0.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceChatCompletionUnifiedFunc(t Transport) InferenceChatCompletionUnified { + return func(inference_id string, o ...func(*InferenceChatCompletionUnifiedRequest)) (*Response, error) { + var r = InferenceChatCompletionUnifiedRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceChatCompletionUnified perform chat completion inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/chat-completion-inference.html. +type InferenceChatCompletionUnified func(inference_id string, o ...func(*InferenceChatCompletionUnifiedRequest)) (*Response, error) + +// InferenceChatCompletionUnifiedRequest configures the Inference Chat Completion Unified API request. +type InferenceChatCompletionUnifiedRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceChatCompletionUnifiedRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.chat_completion_unified") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("chat_completion") + 1 + len(r.InferenceID) + 1 + len("_stream")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("chat_completion") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + path.WriteString("/") + path.WriteString("_stream") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.chat_completion_unified") + if reader := instrument.RecordRequestBody(ctx, "inference.chat_completion_unified", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.chat_completion_unified") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceChatCompletionUnified) WithContext(v context.Context) func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceChatCompletionUnified) WithBody(v io.Reader) func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceChatCompletionUnified) WithPretty() func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceChatCompletionUnified) WithHuman() func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceChatCompletionUnified) WithErrorTrace() func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceChatCompletionUnified) WithFilterPath(v ...string) func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceChatCompletionUnified) WithHeader(h map[string]string) func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceChatCompletionUnified) WithOpaqueID(s string) func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.inference.completion.go b/esapi/api.inference.completion.go new file mode 100644 index 0000000000..26ae2f5cb4 --- /dev/null +++ b/esapi/api.inference.completion.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.0.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceCompletionFunc(t Transport) InferenceCompletion { + return func(inference_id string, o ...func(*InferenceCompletionRequest)) (*Response, error) { + var r = InferenceCompletionRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceCompletion perform completion inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html. +type InferenceCompletion func(inference_id string, o ...func(*InferenceCompletionRequest)) (*Response, error) + +// InferenceCompletionRequest configures the Inference Completion API request. +type InferenceCompletionRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceCompletionRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.completion") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("completion") + 1 + len(r.InferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("completion") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.completion") + if reader := instrument.RecordRequestBody(ctx, "inference.completion", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.completion") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceCompletion) WithContext(v context.Context) func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceCompletion) WithBody(v io.Reader) func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceCompletion) WithPretty() func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceCompletion) WithHuman() func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceCompletion) WithErrorTrace() func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceCompletion) WithFilterPath(v ...string) func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceCompletion) WithHeader(h map[string]string) func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceCompletion) WithOpaqueID(s string) func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.inference.delete.go b/esapi/api.inference.delete.go index 73828f2513..959828a0d0 100644 --- a/esapi/api.inference.delete.go +++ b/esapi/api.inference.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -45,8 +45,6 @@ func newInferenceDeleteFunc(t Transport) InferenceDelete { // InferenceDelete delete an inference endpoint // -// This API is experimental. -// // See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-inference-api.html. type InferenceDelete func(inference_id string, o ...func(*InferenceDeleteRequest)) (*Response, error) diff --git a/esapi/api.inference.get.go b/esapi/api.inference.get.go index ceb1673d7f..be005947e9 100644 --- a/esapi/api.inference.get.go +++ b/esapi/api.inference.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -44,8 +44,6 @@ func newInferenceGetFunc(t Transport) InferenceGet { // InferenceGet get an inference endpoint // -// This API is experimental. -// // See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-inference-api.html. type InferenceGet func(o ...func(*InferenceGetRequest)) (*Response, error) diff --git a/esapi/api.inference.put.go b/esapi/api.inference.put.go index 7193a51065..640238893e 100644 --- a/esapi/api.inference.put.go +++ b/esapi/api.inference.put.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -45,8 +45,6 @@ func newInferencePutFunc(t Transport) InferencePut { // InferencePut configure an inference endpoint for use in the Inference API // -// This API is experimental. -// // See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-inference-api.html. type InferencePut func(inference_id string, o ...func(*InferencePutRequest)) (*Response, error) diff --git a/esapi/api.inference.rerank.go b/esapi/api.inference.rerank.go new file mode 100644 index 0000000000..04bc219e2b --- /dev/null +++ b/esapi/api.inference.rerank.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.0.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceRerankFunc(t Transport) InferenceRerank { + return func(inference_id string, o ...func(*InferenceRerankRequest)) (*Response, error) { + var r = InferenceRerankRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceRerank perform reranking inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html. +type InferenceRerank func(inference_id string, o ...func(*InferenceRerankRequest)) (*Response, error) + +// InferenceRerankRequest configures the Inference Rerank API request. +type InferenceRerankRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceRerankRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.rerank") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("rerank") + 1 + len(r.InferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("rerank") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.rerank") + if reader := instrument.RecordRequestBody(ctx, "inference.rerank", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.rerank") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceRerank) WithContext(v context.Context) func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceRerank) WithBody(v io.Reader) func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceRerank) WithPretty() func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceRerank) WithHuman() func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceRerank) WithErrorTrace() func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceRerank) WithFilterPath(v ...string) func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceRerank) WithHeader(h map[string]string) func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceRerank) WithOpaqueID(s string) func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.inference.sparse_embedding.go b/esapi/api.inference.sparse_embedding.go new file mode 100644 index 0000000000..5770947aae --- /dev/null +++ b/esapi/api.inference.sparse_embedding.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.0.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceSparseEmbeddingFunc(t Transport) InferenceSparseEmbedding { + return func(inference_id string, o ...func(*InferenceSparseEmbeddingRequest)) (*Response, error) { + var r = InferenceSparseEmbeddingRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceSparseEmbedding perform sparse embedding inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html. +type InferenceSparseEmbedding func(inference_id string, o ...func(*InferenceSparseEmbeddingRequest)) (*Response, error) + +// InferenceSparseEmbeddingRequest configures the Inference Sparse Embedding API request. +type InferenceSparseEmbeddingRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceSparseEmbeddingRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.sparse_embedding") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("sparse_embedding") + 1 + len(r.InferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("sparse_embedding") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.sparse_embedding") + if reader := instrument.RecordRequestBody(ctx, "inference.sparse_embedding", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.sparse_embedding") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceSparseEmbedding) WithContext(v context.Context) func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceSparseEmbedding) WithBody(v io.Reader) func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceSparseEmbedding) WithPretty() func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceSparseEmbedding) WithHuman() func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceSparseEmbedding) WithErrorTrace() func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceSparseEmbedding) WithFilterPath(v ...string) func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceSparseEmbedding) WithHeader(h map[string]string) func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceSparseEmbedding) WithOpaqueID(s string) func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.inference.stream_completion.go b/esapi/api.inference.stream_completion.go new file mode 100644 index 0000000000..156f458cbd --- /dev/null +++ b/esapi/api.inference.stream_completion.go @@ -0,0 +1,244 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.0.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceStreamCompletionFunc(t Transport) InferenceStreamCompletion { + return func(inference_id string, o ...func(*InferenceStreamCompletionRequest)) (*Response, error) { + var r = InferenceStreamCompletionRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceStreamCompletion perform streaming completion inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-stream-inference-api.html. +type InferenceStreamCompletion func(inference_id string, o ...func(*InferenceStreamCompletionRequest)) (*Response, error) + +// InferenceStreamCompletionRequest configures the Inference Stream Completion API request. +type InferenceStreamCompletionRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceStreamCompletionRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.stream_completion") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("completion") + 1 + len(r.InferenceID) + 1 + len("_stream")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("completion") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + path.WriteString("/") + path.WriteString("_stream") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.stream_completion") + if reader := instrument.RecordRequestBody(ctx, "inference.stream_completion", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.stream_completion") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceStreamCompletion) WithContext(v context.Context) func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceStreamCompletion) WithBody(v io.Reader) func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceStreamCompletion) WithPretty() func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceStreamCompletion) WithHuman() func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceStreamCompletion) WithErrorTrace() func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceStreamCompletion) WithFilterPath(v ...string) func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceStreamCompletion) WithHeader(h map[string]string) func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceStreamCompletion) WithOpaqueID(s string) func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.inference.text_embedding.go b/esapi/api.inference.text_embedding.go new file mode 100644 index 0000000000..c09013fdc2 --- /dev/null +++ b/esapi/api.inference.text_embedding.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.0.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceTextEmbeddingFunc(t Transport) InferenceTextEmbedding { + return func(inference_id string, o ...func(*InferenceTextEmbeddingRequest)) (*Response, error) { + var r = InferenceTextEmbeddingRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceTextEmbedding perform text embedding inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html. +type InferenceTextEmbedding func(inference_id string, o ...func(*InferenceTextEmbeddingRequest)) (*Response, error) + +// InferenceTextEmbeddingRequest configures the Inference Text Embedding API request. +type InferenceTextEmbeddingRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceTextEmbeddingRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.text_embedding") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("text_embedding") + 1 + len(r.InferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("text_embedding") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.text_embedding") + if reader := instrument.RecordRequestBody(ctx, "inference.text_embedding", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.text_embedding") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceTextEmbedding) WithContext(v context.Context) func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceTextEmbedding) WithBody(v io.Reader) func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceTextEmbedding) WithPretty() func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceTextEmbedding) WithHuman() func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceTextEmbedding) WithErrorTrace() func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceTextEmbedding) WithFilterPath(v ...string) func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceTextEmbedding) WithHeader(h map[string]string) func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceTextEmbedding) WithOpaqueID(s string) func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.inference.update.go b/esapi/api.inference.update.go new file mode 100644 index 0000000000..9b12cef588 --- /dev/null +++ b/esapi/api.inference.update.go @@ -0,0 +1,257 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.0.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceUpdateFunc(t Transport) InferenceUpdate { + return func(inference_id string, o ...func(*InferenceUpdateRequest)) (*Response, error) { + var r = InferenceUpdateRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceUpdate update inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-inference-api.html. +type InferenceUpdate func(inference_id string, o ...func(*InferenceUpdateRequest)) (*Response, error) + +// InferenceUpdateRequest configures the Inference Update API request. +type InferenceUpdateRequest struct { + Body io.Reader + + InferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceUpdateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.update") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.InferenceID) + 1 + len("_update")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + if r.TaskType != "" { + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + } + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + path.WriteString("/") + path.WriteString("_update") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.update") + if reader := instrument.RecordRequestBody(ctx, "inference.update", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.update") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceUpdate) WithContext(v context.Context) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferenceUpdate) WithBody(v io.Reader) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.Body = v + } +} + +// WithTaskType - the task type. +func (f InferenceUpdate) WithTaskType(v string) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.TaskType = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceUpdate) WithPretty() func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceUpdate) WithHuman() func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceUpdate) WithErrorTrace() func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceUpdate) WithFilterPath(v ...string) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceUpdate) WithHeader(h map[string]string) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceUpdate) WithOpaqueID(s string) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.info.go b/esapi/api.info.go index 5ade00d083..e92bb85bed 100644 --- a/esapi/api.info.go +++ b/esapi/api.info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.ingest.delete_geoip_database.go b/esapi/api.ingest.delete_geoip_database.go index 0cd85dcf6e..96e31112fb 100644 --- a/esapi/api.ingest.delete_geoip_database.go +++ b/esapi/api.ingest.delete_geoip_database.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -24,6 +24,7 @@ import ( "errors" "net/http" "strings" + "time" ) func newIngestDeleteGeoipDatabaseFunc(t Transport) IngestDeleteGeoipDatabase { @@ -52,6 +53,9 @@ type IngestDeleteGeoipDatabase func(id []string, o ...func(*IngestDeleteGeoipDat type IngestDeleteGeoipDatabaseRequest struct { DocumentID []string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -103,6 +107,14 @@ func (r IngestDeleteGeoipDatabaseRequest) Do(providedCtx context.Context, transp params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -181,6 +193,20 @@ func (f IngestDeleteGeoipDatabase) WithContext(v context.Context) func(*IngestDe } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f IngestDeleteGeoipDatabase) WithMasterTimeout(v time.Duration) func(*IngestDeleteGeoipDatabaseRequest) { + return func(r *IngestDeleteGeoipDatabaseRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IngestDeleteGeoipDatabase) WithTimeout(v time.Duration) func(*IngestDeleteGeoipDatabaseRequest) { + return func(r *IngestDeleteGeoipDatabaseRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f IngestDeleteGeoipDatabase) WithPretty() func(*IngestDeleteGeoipDatabaseRequest) { return func(r *IngestDeleteGeoipDatabaseRequest) { diff --git a/esapi/api.ingest.delete_ip_location_database.go b/esapi/api.ingest.delete_ip_location_database.go index 434d4d66eb..837a248f4f 100644 --- a/esapi/api.ingest.delete_ip_location_database.go +++ b/esapi/api.ingest.delete_ip_location_database.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -24,6 +24,7 @@ import ( "errors" "net/http" "strings" + "time" ) func newIngestDeleteIPLocationDatabaseFunc(t Transport) IngestDeleteIPLocationDatabase { @@ -52,6 +53,9 @@ type IngestDeleteIPLocationDatabase func(id []string, o ...func(*IngestDeleteIPL type IngestDeleteIPLocationDatabaseRequest struct { DocumentID []string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -103,6 +107,14 @@ func (r IngestDeleteIPLocationDatabaseRequest) Do(providedCtx context.Context, t params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -181,6 +193,20 @@ func (f IngestDeleteIPLocationDatabase) WithContext(v context.Context) func(*Ing } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f IngestDeleteIPLocationDatabase) WithMasterTimeout(v time.Duration) func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IngestDeleteIPLocationDatabase) WithTimeout(v time.Duration) func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f IngestDeleteIPLocationDatabase) WithPretty() func(*IngestDeleteIPLocationDatabaseRequest) { return func(r *IngestDeleteIPLocationDatabaseRequest) { diff --git a/esapi/api.ingest.delete_pipeline.go b/esapi/api.ingest.delete_pipeline.go index 77dd8c0489..2de01a292f 100644 --- a/esapi/api.ingest.delete_pipeline.go +++ b/esapi/api.ingest.delete_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.ingest.geo_ip_stats.go b/esapi/api.ingest.geo_ip_stats.go index 9165dec507..9b3bd77ee7 100644 --- a/esapi/api.ingest.geo_ip_stats.go +++ b/esapi/api.ingest.geo_ip_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.ingest.get_geoip_database.go b/esapi/api.ingest.get_geoip_database.go index a1bc588800..7972c17511 100644 --- a/esapi/api.ingest.get_geoip_database.go +++ b/esapi/api.ingest.get_geoip_database.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.ingest.get_ip_location_database.go b/esapi/api.ingest.get_ip_location_database.go index fdb37096d4..4cc728c553 100644 --- a/esapi/api.ingest.get_ip_location_database.go +++ b/esapi/api.ingest.get_ip_location_database.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.ingest.get_pipeline.go b/esapi/api.ingest.get_pipeline.go index 73e9727661..3dca038382 100644 --- a/esapi/api.ingest.get_pipeline.go +++ b/esapi/api.ingest.get_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.ingest.processor_grok.go b/esapi/api.ingest.processor_grok.go index 809fef64c5..7c30cd80f3 100644 --- a/esapi/api.ingest.processor_grok.go +++ b/esapi/api.ingest.processor_grok.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.ingest.put_geoip_database.go b/esapi/api.ingest.put_geoip_database.go index 0c0c5ad4cc..7174117e96 100644 --- a/esapi/api.ingest.put_geoip_database.go +++ b/esapi/api.ingest.put_geoip_database.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -24,6 +24,7 @@ import ( "io" "net/http" "strings" + "time" ) func newIngestPutGeoipDatabaseFunc(t Transport) IngestPutGeoipDatabase { @@ -54,6 +55,9 @@ type IngestPutGeoipDatabaseRequest struct { Body io.Reader + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -101,6 +105,14 @@ func (r IngestPutGeoipDatabaseRequest) Do(providedCtx context.Context, transport params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -186,6 +198,20 @@ func (f IngestPutGeoipDatabase) WithContext(v context.Context) func(*IngestPutGe } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f IngestPutGeoipDatabase) WithMasterTimeout(v time.Duration) func(*IngestPutGeoipDatabaseRequest) { + return func(r *IngestPutGeoipDatabaseRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IngestPutGeoipDatabase) WithTimeout(v time.Duration) func(*IngestPutGeoipDatabaseRequest) { + return func(r *IngestPutGeoipDatabaseRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f IngestPutGeoipDatabase) WithPretty() func(*IngestPutGeoipDatabaseRequest) { return func(r *IngestPutGeoipDatabaseRequest) { diff --git a/esapi/api.ingest.put_ip_location_database.go b/esapi/api.ingest.put_ip_location_database.go index 0c3fac6fb4..af6e9befd5 100644 --- a/esapi/api.ingest.put_ip_location_database.go +++ b/esapi/api.ingest.put_ip_location_database.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -24,6 +24,7 @@ import ( "io" "net/http" "strings" + "time" ) func newIngestPutIPLocationDatabaseFunc(t Transport) IngestPutIPLocationDatabase { @@ -54,6 +55,9 @@ type IngestPutIPLocationDatabaseRequest struct { Body io.Reader + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -101,6 +105,14 @@ func (r IngestPutIPLocationDatabaseRequest) Do(providedCtx context.Context, tran params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -186,6 +198,20 @@ func (f IngestPutIPLocationDatabase) WithContext(v context.Context) func(*Ingest } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f IngestPutIPLocationDatabase) WithMasterTimeout(v time.Duration) func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IngestPutIPLocationDatabase) WithTimeout(v time.Duration) func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f IngestPutIPLocationDatabase) WithPretty() func(*IngestPutIPLocationDatabaseRequest) { return func(r *IngestPutIPLocationDatabaseRequest) { diff --git a/esapi/api.ingest.put_pipeline.go b/esapi/api.ingest.put_pipeline.go index 833b2cb50b..0231bef7a4 100644 --- a/esapi/api.ingest.put_pipeline.go +++ b/esapi/api.ingest.put_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.ingest.simulate.go b/esapi/api.ingest.simulate.go index 1ca53dc154..a1d0255191 100644 --- a/esapi/api.ingest.simulate.go +++ b/esapi/api.ingest.simulate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.knn_search.go b/esapi/api.knn_search.go index 71c2980cb8..e345d0514e 100644 --- a/esapi/api.knn_search.go +++ b/esapi/api.knn_search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.mget.go b/esapi/api.mget.go index 6a6990631a..1db1144779 100644 --- a/esapi/api.mget.go +++ b/esapi/api.mget.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.msearch.go b/esapi/api.msearch.go index 60308eb986..b171cb7a5c 100644 --- a/esapi/api.msearch.go +++ b/esapi/api.msearch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.msearch_template.go b/esapi/api.msearch_template.go index 5a5cdb765d..74b0b6deac 100644 --- a/esapi/api.msearch_template.go +++ b/esapi/api.msearch_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.mtermvectors.go b/esapi/api.mtermvectors.go index 9bb3f2eb45..0d5f4d5d40 100644 --- a/esapi/api.mtermvectors.go +++ b/esapi/api.mtermvectors.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.nodes.clear_repositories_metering_archive.go b/esapi/api.nodes.clear_repositories_metering_archive.go index f9e4551e7f..53381ad3b8 100644 --- a/esapi/api.nodes.clear_repositories_metering_archive.go +++ b/esapi/api.nodes.clear_repositories_metering_archive.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.nodes.get_repositories_metering_info.go b/esapi/api.nodes.get_repositories_metering_info.go index 2a6e7dc3d2..46e559b654 100644 --- a/esapi/api.nodes.get_repositories_metering_info.go +++ b/esapi/api.nodes.get_repositories_metering_info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.nodes.hot_threads.go b/esapi/api.nodes.hot_threads.go index d2092113df..7ca44a2552 100644 --- a/esapi/api.nodes.hot_threads.go +++ b/esapi/api.nodes.hot_threads.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.nodes.info.go b/esapi/api.nodes.info.go index 0bf18f5e02..7567bab275 100644 --- a/esapi/api.nodes.info.go +++ b/esapi/api.nodes.info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.nodes.reload_secure_settings.go b/esapi/api.nodes.reload_secure_settings.go index 591c966cfc..555bedca8f 100644 --- a/esapi/api.nodes.reload_secure_settings.go +++ b/esapi/api.nodes.reload_secure_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.nodes.stats.go b/esapi/api.nodes.stats.go index a256c094a6..05cf00a200 100644 --- a/esapi/api.nodes.stats.go +++ b/esapi/api.nodes.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.nodes.usage.go b/esapi/api.nodes.usage.go index 0a589f062d..d23c1bc624 100644 --- a/esapi/api.nodes.usage.go +++ b/esapi/api.nodes.usage.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.ping.go b/esapi/api.ping.go index 1e4397a5a2..dd0659ba96 100644 --- a/esapi/api.ping.go +++ b/esapi/api.ping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.profiling.stacktraces.go b/esapi/api.profiling.stacktraces.go index 9cdafeec3a..424829ae72 100644 --- a/esapi/api.profiling.stacktraces.go +++ b/esapi/api.profiling.stacktraces.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.profiling.status.go b/esapi/api.profiling.status.go index 9441d9b872..d4034f200f 100644 --- a/esapi/api.profiling.status.go +++ b/esapi/api.profiling.status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.profiling.topn_functions.go b/esapi/api.profiling.topn_functions.go index 3287a1e994..5a1098f78c 100644 --- a/esapi/api.profiling.topn_functions.go +++ b/esapi/api.profiling.topn_functions.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.put_script.go b/esapi/api.put_script.go index bc6ba719af..1d1e72435b 100644 --- a/esapi/api.put_script.go +++ b/esapi/api.put_script.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.query_rules.delete_rule.go b/esapi/api.query_rules.delete_rule.go index ab1dc71fec..b50aa9cecf 100644 --- a/esapi/api.query_rules.delete_rule.go +++ b/esapi/api.query_rules.delete_rule.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.query_rules.delete_ruleset.go b/esapi/api.query_rules.delete_ruleset.go index 45dd5843ee..a853add793 100644 --- a/esapi/api.query_rules.delete_ruleset.go +++ b/esapi/api.query_rules.delete_ruleset.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.query_rules.get_rule.go b/esapi/api.query_rules.get_rule.go index fb5518a615..ba423669f8 100644 --- a/esapi/api.query_rules.get_rule.go +++ b/esapi/api.query_rules.get_rule.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.query_rules.get_ruleset.go b/esapi/api.query_rules.get_ruleset.go index 8ac5f6b6d2..218f512d64 100644 --- a/esapi/api.query_rules.get_ruleset.go +++ b/esapi/api.query_rules.get_ruleset.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.query_rules.list_rulesets.go b/esapi/api.query_rules.list_rulesets.go index d21e4d6cae..de150ca761 100644 --- a/esapi/api.query_rules.list_rulesets.go +++ b/esapi/api.query_rules.list_rulesets.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.query_rules.put_rule.go b/esapi/api.query_rules.put_rule.go index 3492271068..f4b33ca6fb 100644 --- a/esapi/api.query_rules.put_rule.go +++ b/esapi/api.query_rules.put_rule.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.query_rules.put_ruleset.go b/esapi/api.query_rules.put_ruleset.go index a171e4cca7..7951273e14 100644 --- a/esapi/api.query_rules.put_ruleset.go +++ b/esapi/api.query_rules.put_ruleset.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.query_rules.test.go b/esapi/api.query_rules.test.go index 573adf9184..19e0dcc019 100644 --- a/esapi/api.query_rules.test.go +++ b/esapi/api.query_rules.test.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.rank_eval.go b/esapi/api.rank_eval.go index 5aef194437..b7a10066a7 100644 --- a/esapi/api.rank_eval.go +++ b/esapi/api.rank_eval.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.reindex.go b/esapi/api.reindex.go index 9a8e9d8eb5..38bd48c640 100644 --- a/esapi/api.reindex.go +++ b/esapi/api.reindex.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.reindex_rethrottle.go b/esapi/api.reindex_rethrottle.go index 7d36716df0..1763d04daa 100644 --- a/esapi/api.reindex_rethrottle.go +++ b/esapi/api.reindex_rethrottle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.render_search_template.go b/esapi/api.render_search_template.go index 0d6b0ae027..e0d1824425 100644 --- a/esapi/api.render_search_template.go +++ b/esapi/api.render_search_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.scripts_painless_execute.go b/esapi/api.scripts_painless_execute.go index bef9a22ffb..fb767621e9 100644 --- a/esapi/api.scripts_painless_execute.go +++ b/esapi/api.scripts_painless_execute.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.scroll.go b/esapi/api.scroll.go index 52cc8aca0d..0b9459ee50 100644 --- a/esapi/api.scroll.go +++ b/esapi/api.scroll.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.search.go b/esapi/api.search.go index 3c93cd5e58..ac3ea29237 100644 --- a/esapi/api.search.go +++ b/esapi/api.search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -75,7 +75,6 @@ type SearchRequest struct { IncludeNamedQueriesScore *bool Lenient *bool MaxConcurrentShardRequests *int - MinCompatibleShardNode string Preference string PreFilterShardSize *int Query string @@ -220,10 +219,6 @@ func (r SearchRequest) Do(providedCtx context.Context, transport Transport) (*Re params["max_concurrent_shard_requests"] = strconv.FormatInt(int64(*r.MaxConcurrentShardRequests), 10) } - if r.MinCompatibleShardNode != "" { - params["min_compatible_shard_node"] = r.MinCompatibleShardNode - } - if r.Preference != "" { params["preference"] = r.Preference } @@ -553,13 +548,6 @@ func (f Search) WithMaxConcurrentShardRequests(v int) func(*SearchRequest) { } } -// WithMinCompatibleShardNode - the minimum compatible version that all shards involved in search should have for this request to be successful. -func (f Search) WithMinCompatibleShardNode(v string) func(*SearchRequest) { - return func(r *SearchRequest) { - r.MinCompatibleShardNode = v - } -} - // WithPreference - specify the node or shard the operation should be performed on (default: random). func (f Search) WithPreference(v string) func(*SearchRequest) { return func(r *SearchRequest) { diff --git a/esapi/api.search_application.delete.go b/esapi/api.search_application.delete.go index a36a2cd170..bb9db6e2a0 100644 --- a/esapi/api.search_application.delete.go +++ b/esapi/api.search_application.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.delete_behavioral_analytics.go b/esapi/api.search_application.delete_behavioral_analytics.go index 61a8c9055b..f136b860d0 100644 --- a/esapi/api.search_application.delete_behavioral_analytics.go +++ b/esapi/api.search_application.delete_behavioral_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.get.go b/esapi/api.search_application.get.go index 478c96e5f0..371196ee23 100644 --- a/esapi/api.search_application.get.go +++ b/esapi/api.search_application.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.get_behavioral_analytics.go b/esapi/api.search_application.get_behavioral_analytics.go index da881ea819..bea60c5c83 100644 --- a/esapi/api.search_application.get_behavioral_analytics.go +++ b/esapi/api.search_application.get_behavioral_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.list.go b/esapi/api.search_application.list.go index 8caa449804..699b402b88 100644 --- a/esapi/api.search_application.list.go +++ b/esapi/api.search_application.list.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.post_behavioral_analytics_event.go b/esapi/api.search_application.post_behavioral_analytics_event.go index 1f58229eab..727382cb54 100644 --- a/esapi/api.search_application.post_behavioral_analytics_event.go +++ b/esapi/api.search_application.post_behavioral_analytics_event.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.put.go b/esapi/api.search_application.put.go index 1f100d03a8..7438a690cd 100644 --- a/esapi/api.search_application.put.go +++ b/esapi/api.search_application.put.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.put_behavioral_analytics.go b/esapi/api.search_application.put_behavioral_analytics.go index 9b7ff92754..6f78d7a277 100644 --- a/esapi/api.search_application.put_behavioral_analytics.go +++ b/esapi/api.search_application.put_behavioral_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.render_query.go b/esapi/api.search_application.render_query.go index 817eca1b16..301480a859 100644 --- a/esapi/api.search_application.render_query.go +++ b/esapi/api.search_application.render_query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.search.go b/esapi/api.search_application.search.go index 4b19fa834c..7c98e480e7 100644 --- a/esapi/api.search_application.search.go +++ b/esapi/api.search_application.search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_mvt.go b/esapi/api.search_mvt.go index 055fe7ba9b..d027b67606 100644 --- a/esapi/api.search_mvt.go +++ b/esapi/api.search_mvt.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_shards.go b/esapi/api.search_shards.go index 8e2dd573ff..b7f237d5b7 100644 --- a/esapi/api.search_shards.go +++ b/esapi/api.search_shards.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_template.go b/esapi/api.search_template.go index b2beeaa0f1..e1955fd388 100644 --- a/esapi/api.search_template.go +++ b/esapi/api.search_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.shutdown.delete_node.go b/esapi/api.shutdown.delete_node.go index 4e7306a09c..2c7e3e5ed7 100644 --- a/esapi/api.shutdown.delete_node.go +++ b/esapi/api.shutdown.delete_node.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newShutdownDeleteNodeFunc(t Transport) ShutdownDeleteNode { @@ -51,6 +52,9 @@ type ShutdownDeleteNode func(node_id string, o ...func(*ShutdownDeleteNodeReques type ShutdownDeleteNodeRequest struct { NodeID string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -96,6 +100,14 @@ func (r ShutdownDeleteNodeRequest) Do(providedCtx context.Context, transport Tra params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -174,6 +186,20 @@ func (f ShutdownDeleteNode) WithContext(v context.Context) func(*ShutdownDeleteN } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ShutdownDeleteNode) WithMasterTimeout(v time.Duration) func(*ShutdownDeleteNodeRequest) { + return func(r *ShutdownDeleteNodeRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ShutdownDeleteNode) WithTimeout(v time.Duration) func(*ShutdownDeleteNodeRequest) { + return func(r *ShutdownDeleteNodeRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f ShutdownDeleteNode) WithPretty() func(*ShutdownDeleteNodeRequest) { return func(r *ShutdownDeleteNodeRequest) { diff --git a/esapi/api.shutdown.get_node.go b/esapi/api.shutdown.get_node.go index 10706afac3..e27a53fd4b 100644 --- a/esapi/api.shutdown.get_node.go +++ b/esapi/api.shutdown.get_node.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.shutdown.put_node.go b/esapi/api.shutdown.put_node.go index 779427b6d6..852b1aa5c5 100644 --- a/esapi/api.shutdown.put_node.go +++ b/esapi/api.shutdown.put_node.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -24,6 +24,7 @@ import ( "io" "net/http" "strings" + "time" ) func newShutdownPutNodeFunc(t Transport) ShutdownPutNode { @@ -54,6 +55,9 @@ type ShutdownPutNodeRequest struct { NodeID string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -99,6 +103,14 @@ func (r ShutdownPutNodeRequest) Do(providedCtx context.Context, transport Transp params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -184,6 +196,20 @@ func (f ShutdownPutNode) WithContext(v context.Context) func(*ShutdownPutNodeReq } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ShutdownPutNode) WithMasterTimeout(v time.Duration) func(*ShutdownPutNodeRequest) { + return func(r *ShutdownPutNodeRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ShutdownPutNode) WithTimeout(v time.Duration) func(*ShutdownPutNodeRequest) { + return func(r *ShutdownPutNodeRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f ShutdownPutNode) WithPretty() func(*ShutdownPutNodeRequest) { return func(r *ShutdownPutNodeRequest) { diff --git a/esapi/api.simulate.ingest.go b/esapi/api.simulate.ingest.go index 47c58fc696..52184191b8 100644 --- a/esapi/api.simulate.ingest.go +++ b/esapi/api.simulate.ingest.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.cleanup_repository.go b/esapi/api.snapshot.cleanup_repository.go index fe72c15be2..3f37089645 100644 --- a/esapi/api.snapshot.cleanup_repository.go +++ b/esapi/api.snapshot.cleanup_repository.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.clone.go b/esapi/api.snapshot.clone.go index 8526d2e663..ff722d8796 100644 --- a/esapi/api.snapshot.clone.go +++ b/esapi/api.snapshot.clone.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.create.go b/esapi/api.snapshot.create.go index 4f316f5030..a915e8a56e 100644 --- a/esapi/api.snapshot.create.go +++ b/esapi/api.snapshot.create.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.create_repository.go b/esapi/api.snapshot.create_repository.go index 7506d05f29..bbd051b938 100644 --- a/esapi/api.snapshot.create_repository.go +++ b/esapi/api.snapshot.create_repository.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.delete.go b/esapi/api.snapshot.delete.go index 0beff6ce2e..81c2658d61 100644 --- a/esapi/api.snapshot.delete.go +++ b/esapi/api.snapshot.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.delete_repository.go b/esapi/api.snapshot.delete_repository.go index 46e262e8f6..ae24c9e6c5 100644 --- a/esapi/api.snapshot.delete_repository.go +++ b/esapi/api.snapshot.delete_repository.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.get.go b/esapi/api.snapshot.get.go index a5a1d42656..dc3744970c 100644 --- a/esapi/api.snapshot.get.go +++ b/esapi/api.snapshot.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.get_repository.go b/esapi/api.snapshot.get_repository.go index f52cf023b9..0894f28984 100644 --- a/esapi/api.snapshot.get_repository.go +++ b/esapi/api.snapshot.get_repository.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.repository_analyze.go b/esapi/api.snapshot.repository_analyze.go index cc958fe3a8..1c081ba675 100644 --- a/esapi/api.snapshot.repository_analyze.go +++ b/esapi/api.snapshot.repository_analyze.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.repository_verify_integrity.go b/esapi/api.snapshot.repository_verify_integrity.go index 4e2fe352f3..7f02fb0297 100644 --- a/esapi/api.snapshot.repository_verify_integrity.go +++ b/esapi/api.snapshot.repository_verify_integrity.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.restore.go b/esapi/api.snapshot.restore.go index ca4234a819..b17900dd26 100644 --- a/esapi/api.snapshot.restore.go +++ b/esapi/api.snapshot.restore.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.status.go b/esapi/api.snapshot.status.go index fcee081d9d..96054e4239 100644 --- a/esapi/api.snapshot.status.go +++ b/esapi/api.snapshot.status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.verify_repository.go b/esapi/api.snapshot.verify_repository.go index c0919673b0..2bb42eddc4 100644 --- a/esapi/api.snapshot.verify_repository.go +++ b/esapi/api.snapshot.verify_repository.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.synonyms.delete_synonym.go b/esapi/api.synonyms.delete_synonym.go index 6f2eb72d4d..a09ce3cb33 100644 --- a/esapi/api.synonyms.delete_synonym.go +++ b/esapi/api.synonyms.delete_synonym.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.synonyms.delete_synonym_rule.go b/esapi/api.synonyms.delete_synonym_rule.go index 390122712b..2cf4a5668a 100644 --- a/esapi/api.synonyms.delete_synonym_rule.go +++ b/esapi/api.synonyms.delete_synonym_rule.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.synonyms.get_synonym.go b/esapi/api.synonyms.get_synonym.go index 05f4fec4bc..6ddaa76849 100644 --- a/esapi/api.synonyms.get_synonym.go +++ b/esapi/api.synonyms.get_synonym.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.synonyms.get_synonym_rule.go b/esapi/api.synonyms.get_synonym_rule.go index c9225d981e..bfe711133d 100644 --- a/esapi/api.synonyms.get_synonym_rule.go +++ b/esapi/api.synonyms.get_synonym_rule.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.synonyms.get_synonyms_sets.go b/esapi/api.synonyms.get_synonyms_sets.go index f0b21a02f1..588b5d1094 100644 --- a/esapi/api.synonyms.get_synonyms_sets.go +++ b/esapi/api.synonyms.get_synonyms_sets.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.synonyms.put_synonym.go b/esapi/api.synonyms.put_synonym.go index 4ec9236325..ebe89c2ab3 100644 --- a/esapi/api.synonyms.put_synonym.go +++ b/esapi/api.synonyms.put_synonym.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.synonyms.put_synonym_rule.go b/esapi/api.synonyms.put_synonym_rule.go index 6645dc37a9..b086912824 100644 --- a/esapi/api.synonyms.put_synonym_rule.go +++ b/esapi/api.synonyms.put_synonym_rule.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.tasks.cancel.go b/esapi/api.tasks.cancel.go index 0af16f51cc..0e6e4b1eab 100644 --- a/esapi/api.tasks.cancel.go +++ b/esapi/api.tasks.cancel.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.tasks.get.go b/esapi/api.tasks.get.go index a32f48ca4f..440d08245a 100644 --- a/esapi/api.tasks.get.go +++ b/esapi/api.tasks.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.tasks.list.go b/esapi/api.tasks.list.go index 944bdd752d..b0e3046b50 100644 --- a/esapi/api.tasks.list.go +++ b/esapi/api.tasks.list.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.terms_enum.go b/esapi/api.terms_enum.go index f244464c90..5a7696eed8 100644 --- a/esapi/api.terms_enum.go +++ b/esapi/api.terms_enum.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.termvectors.go b/esapi/api.termvectors.go index 1ff91335f9..92b7f6fa32 100644 --- a/esapi/api.termvectors.go +++ b/esapi/api.termvectors.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.update.go b/esapi/api.update.go index 2ace2ab3de..239f73c844 100644 --- a/esapi/api.update.go +++ b/esapi/api.update.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -57,18 +57,19 @@ type UpdateRequest struct { Body io.Reader - IfPrimaryTerm *int - IfSeqNo *int - Lang string - Refresh string - RequireAlias *bool - RetryOnConflict *int - Routing string - Source []string - SourceExcludes []string - SourceIncludes []string - Timeout time.Duration - WaitForActiveShards string + IfPrimaryTerm *int + IfSeqNo *int + IncludeSourceOnError *bool + Lang string + Refresh string + RequireAlias *bool + RetryOnConflict *int + Routing string + Source []string + SourceExcludes []string + SourceIncludes []string + Timeout time.Duration + WaitForActiveShards string Pretty bool Human bool @@ -126,6 +127,10 @@ func (r UpdateRequest) Do(providedCtx context.Context, transport Transport) (*Re params["if_seq_no"] = strconv.FormatInt(int64(*r.IfSeqNo), 10) } + if r.IncludeSourceOnError != nil { + params["include_source_on_error"] = strconv.FormatBool(*r.IncludeSourceOnError) + } + if r.Lang != "" { params["lang"] = r.Lang } @@ -265,6 +270,13 @@ func (f Update) WithIfSeqNo(v int) func(*UpdateRequest) { } } +// WithIncludeSourceOnError - true or false if to include the document source in the error message in case of parsing errors. defaults to true.. +func (f Update) WithIncludeSourceOnError(v bool) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.IncludeSourceOnError = &v + } +} + // WithLang - the script language (default: painless). func (f Update) WithLang(v string) func(*UpdateRequest) { return func(r *UpdateRequest) { diff --git a/esapi/api.update_by_query.go b/esapi/api.update_by_query.go index 204e8f988e..4d40487444 100644 --- a/esapi/api.update_by_query.go +++ b/esapi/api.update_by_query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.update_by_query_rethrottle.go b/esapi/api.update_by_query_rethrottle.go index bd2a4e808c..cb81209989 100644 --- a/esapi/api.update_by_query_rethrottle.go +++ b/esapi/api.update_by_query_rethrottle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.async_search.delete.go b/esapi/api.xpack.async_search.delete.go index 13b0b4ad04..5d348ad4dc 100644 --- a/esapi/api.xpack.async_search.delete.go +++ b/esapi/api.xpack.async_search.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.async_search.get.go b/esapi/api.xpack.async_search.get.go index 20d89af457..24c918a6e7 100644 --- a/esapi/api.xpack.async_search.get.go +++ b/esapi/api.xpack.async_search.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.async_search.status.go b/esapi/api.xpack.async_search.status.go index 42c903652f..f0868293cd 100644 --- a/esapi/api.xpack.async_search.status.go +++ b/esapi/api.xpack.async_search.status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.async_search.submit.go b/esapi/api.xpack.async_search.submit.go index c04014c0f8..3b1d7a9e4a 100644 --- a/esapi/api.xpack.async_search.submit.go +++ b/esapi/api.xpack.async_search.submit.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -62,6 +62,7 @@ type AsyncSearchSubmitRequest struct { Analyzer string AnalyzeWildcard *bool BatchedReduceSize *int + CcsMinimizeRoundtrips *bool DefaultOperator string Df string DocvalueFields []string @@ -77,6 +78,7 @@ type AsyncSearchSubmitRequest struct { Preference string Query string RequestCache *bool + RestTotalHitsAsInt *bool Routing []string SearchType string SeqNoPrimaryTerm *bool @@ -164,6 +166,10 @@ func (r AsyncSearchSubmitRequest) Do(providedCtx context.Context, transport Tran params["batched_reduce_size"] = strconv.FormatInt(int64(*r.BatchedReduceSize), 10) } + if r.CcsMinimizeRoundtrips != nil { + params["ccs_minimize_roundtrips"] = strconv.FormatBool(*r.CcsMinimizeRoundtrips) + } + if r.DefaultOperator != "" { params["default_operator"] = r.DefaultOperator } @@ -224,6 +230,10 @@ func (r AsyncSearchSubmitRequest) Do(providedCtx context.Context, transport Tran params["request_cache"] = strconv.FormatBool(*r.RequestCache) } + if r.RestTotalHitsAsInt != nil { + params["rest_total_hits_as_int"] = strconv.FormatBool(*r.RestTotalHitsAsInt) + } + if len(r.Routing) > 0 { params["routing"] = strings.Join(r.Routing, ",") } @@ -442,6 +452,13 @@ func (f AsyncSearchSubmit) WithBatchedReduceSize(v int) func(*AsyncSearchSubmitR } } +// WithCcsMinimizeRoundtrips - when doing a cross-cluster search, setting it to true may improve overall search latency, particularly when searching clusters with a large number of shards. however, when set to true, the progress of searches on the remote clusters will not be received until the search finishes on all clusters.. +func (f AsyncSearchSubmit) WithCcsMinimizeRoundtrips(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.CcsMinimizeRoundtrips = &v + } +} + // WithDefaultOperator - the default operator for query string query (and or or). func (f AsyncSearchSubmit) WithDefaultOperator(v string) func(*AsyncSearchSubmitRequest) { return func(r *AsyncSearchSubmitRequest) { @@ -547,6 +564,13 @@ func (f AsyncSearchSubmit) WithRequestCache(v bool) func(*AsyncSearchSubmitReque } } +// WithRestTotalHitsAsInt - indicates whether hits.total should be rendered as an integer or an object in the rest search response. +func (f AsyncSearchSubmit) WithRestTotalHitsAsInt(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.RestTotalHitsAsInt = &v + } +} + // WithRouting - a list of specific routing values. func (f AsyncSearchSubmit) WithRouting(v ...string) func(*AsyncSearchSubmitRequest) { return func(r *AsyncSearchSubmitRequest) { diff --git a/esapi/api.xpack.autoscaling.delete_autoscaling_policy.go b/esapi/api.xpack.autoscaling.delete_autoscaling_policy.go index a1045820ce..6f14212418 100644 --- a/esapi/api.xpack.autoscaling.delete_autoscaling_policy.go +++ b/esapi/api.xpack.autoscaling.delete_autoscaling_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.autoscaling.get_autoscaling_capacity.go b/esapi/api.xpack.autoscaling.get_autoscaling_capacity.go index 0ed535ad36..400134cc2e 100644 --- a/esapi/api.xpack.autoscaling.get_autoscaling_capacity.go +++ b/esapi/api.xpack.autoscaling.get_autoscaling_capacity.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.autoscaling.get_autoscaling_policy.go b/esapi/api.xpack.autoscaling.get_autoscaling_policy.go index 050f7ec064..6692739c15 100644 --- a/esapi/api.xpack.autoscaling.get_autoscaling_policy.go +++ b/esapi/api.xpack.autoscaling.get_autoscaling_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.autoscaling.put_autoscaling_policy.go b/esapi/api.xpack.autoscaling.put_autoscaling_policy.go index e16d51dcab..fee793ee19 100644 --- a/esapi/api.xpack.autoscaling.put_autoscaling_policy.go +++ b/esapi/api.xpack.autoscaling.put_autoscaling_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.cat.ml_data_frame_analytics.go b/esapi/api.xpack.cat.ml_data_frame_analytics.go index 4de4da7d2c..304703660a 100644 --- a/esapi/api.xpack.cat.ml_data_frame_analytics.go +++ b/esapi/api.xpack.cat.ml_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.cat.ml_datafeeds.go b/esapi/api.xpack.cat.ml_datafeeds.go index 42fa88dee0..5da0b87454 100644 --- a/esapi/api.xpack.cat.ml_datafeeds.go +++ b/esapi/api.xpack.cat.ml_datafeeds.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.cat.ml_jobs.go b/esapi/api.xpack.cat.ml_jobs.go index 4806a3b0f4..730e794ac2 100644 --- a/esapi/api.xpack.cat.ml_jobs.go +++ b/esapi/api.xpack.cat.ml_jobs.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.cat.ml_trained_models.go b/esapi/api.xpack.cat.ml_trained_models.go index b540c68315..10ca213fcb 100644 --- a/esapi/api.xpack.cat.ml_trained_models.go +++ b/esapi/api.xpack.cat.ml_trained_models.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.cat.transforms.go b/esapi/api.xpack.cat.transforms.go index cf0ce6d972..6976606bf2 100644 --- a/esapi/api.xpack.cat.transforms.go +++ b/esapi/api.xpack.cat.transforms.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.delete_auto_follow_pattern.go b/esapi/api.xpack.ccr.delete_auto_follow_pattern.go index aeebabaea0..e09ea332a7 100644 --- a/esapi/api.xpack.ccr.delete_auto_follow_pattern.go +++ b/esapi/api.xpack.ccr.delete_auto_follow_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.follow.go b/esapi/api.xpack.ccr.follow.go index 5ea0cae439..189c8591dd 100644 --- a/esapi/api.xpack.ccr.follow.go +++ b/esapi/api.xpack.ccr.follow.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.follow_info.go b/esapi/api.xpack.ccr.follow_info.go index 6e281fea18..99b5cc9cbd 100644 --- a/esapi/api.xpack.ccr.follow_info.go +++ b/esapi/api.xpack.ccr.follow_info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.follow_stats.go b/esapi/api.xpack.ccr.follow_stats.go index 17d20f6a9e..117f99fcb7 100644 --- a/esapi/api.xpack.ccr.follow_stats.go +++ b/esapi/api.xpack.ccr.follow_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.forget_follower.go b/esapi/api.xpack.ccr.forget_follower.go index 13ae28fb8e..0164a7a484 100644 --- a/esapi/api.xpack.ccr.forget_follower.go +++ b/esapi/api.xpack.ccr.forget_follower.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.get_auto_follow_pattern.go b/esapi/api.xpack.ccr.get_auto_follow_pattern.go index 9d0e575a5e..1fc2bc37c9 100644 --- a/esapi/api.xpack.ccr.get_auto_follow_pattern.go +++ b/esapi/api.xpack.ccr.get_auto_follow_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.pause_auto_follow_pattern.go b/esapi/api.xpack.ccr.pause_auto_follow_pattern.go index 2874991d9c..c74ffd00b6 100644 --- a/esapi/api.xpack.ccr.pause_auto_follow_pattern.go +++ b/esapi/api.xpack.ccr.pause_auto_follow_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.pause_follow.go b/esapi/api.xpack.ccr.pause_follow.go index bf2018ea40..4afe0b88b3 100644 --- a/esapi/api.xpack.ccr.pause_follow.go +++ b/esapi/api.xpack.ccr.pause_follow.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.put_auto_follow_pattern.go b/esapi/api.xpack.ccr.put_auto_follow_pattern.go index aaaf1eb0ae..465afe009b 100644 --- a/esapi/api.xpack.ccr.put_auto_follow_pattern.go +++ b/esapi/api.xpack.ccr.put_auto_follow_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.resume_auto_follow_pattern.go b/esapi/api.xpack.ccr.resume_auto_follow_pattern.go index b00bd3da24..c04087c7ee 100644 --- a/esapi/api.xpack.ccr.resume_auto_follow_pattern.go +++ b/esapi/api.xpack.ccr.resume_auto_follow_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.resume_follow.go b/esapi/api.xpack.ccr.resume_follow.go index 5daa658bee..b501b17f9e 100644 --- a/esapi/api.xpack.ccr.resume_follow.go +++ b/esapi/api.xpack.ccr.resume_follow.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.stats.go b/esapi/api.xpack.ccr.stats.go index f64c68f070..92b5f93901 100644 --- a/esapi/api.xpack.ccr.stats.go +++ b/esapi/api.xpack.ccr.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.unfollow.go b/esapi/api.xpack.ccr.unfollow.go index 10b44195bd..b7603b5382 100644 --- a/esapi/api.xpack.ccr.unfollow.go +++ b/esapi/api.xpack.ccr.unfollow.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.close_point_in_time.go b/esapi/api.xpack.close_point_in_time.go index 7946091da1..ed9dfdde82 100644 --- a/esapi/api.xpack.close_point_in_time.go +++ b/esapi/api.xpack.close_point_in_time.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.enrich.delete_policy.go b/esapi/api.xpack.enrich.delete_policy.go index fd28aa72e0..4a6a90a4d6 100644 --- a/esapi/api.xpack.enrich.delete_policy.go +++ b/esapi/api.xpack.enrich.delete_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.enrich.execute_policy.go b/esapi/api.xpack.enrich.execute_policy.go index 946e53fb7c..64ff3dcc4f 100644 --- a/esapi/api.xpack.enrich.execute_policy.go +++ b/esapi/api.xpack.enrich.execute_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.enrich.get_policy.go b/esapi/api.xpack.enrich.get_policy.go index f19dba4982..13b01ab81d 100644 --- a/esapi/api.xpack.enrich.get_policy.go +++ b/esapi/api.xpack.enrich.get_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -190,7 +190,7 @@ func (f EnrichGetPolicy) WithName(v ...string) func(*EnrichGetPolicyRequest) { } } -// WithMasterTimeout - timeout for processing on master node. +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. func (f EnrichGetPolicy) WithMasterTimeout(v time.Duration) func(*EnrichGetPolicyRequest) { return func(r *EnrichGetPolicyRequest) { r.MasterTimeout = v diff --git a/esapi/api.xpack.enrich.put_policy.go b/esapi/api.xpack.enrich.put_policy.go index 4003185845..7abe406abc 100644 --- a/esapi/api.xpack.enrich.put_policy.go +++ b/esapi/api.xpack.enrich.put_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.enrich.stats.go b/esapi/api.xpack.enrich.stats.go index fa1476a17c..bcc249dd8a 100644 --- a/esapi/api.xpack.enrich.stats.go +++ b/esapi/api.xpack.enrich.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.eql.delete.go b/esapi/api.xpack.eql.delete.go index 1d56a0fe98..c6f235f9a3 100644 --- a/esapi/api.xpack.eql.delete.go +++ b/esapi/api.xpack.eql.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.eql.get.go b/esapi/api.xpack.eql.get.go index 983bbe6162..1550116b36 100644 --- a/esapi/api.xpack.eql.get.go +++ b/esapi/api.xpack.eql.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.eql.get_status.go b/esapi/api.xpack.eql.get_status.go index 6263993aa7..32571a2449 100644 --- a/esapi/api.xpack.eql.get_status.go +++ b/esapi/api.xpack.eql.get_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.eql.search.go b/esapi/api.xpack.eql.search.go index 4637a11cbd..b4c8500111 100644 --- a/esapi/api.xpack.eql.search.go +++ b/esapi/api.xpack.eql.search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -56,9 +56,11 @@ type EqlSearchRequest struct { Body io.Reader - KeepAlive time.Duration - KeepOnCompletion *bool - WaitForCompletionTimeout time.Duration + AllowPartialSearchResults *bool + AllowPartialSequenceResults *bool + KeepAlive time.Duration + KeepOnCompletion *bool + WaitForCompletionTimeout time.Duration Pretty bool Human bool @@ -105,6 +107,14 @@ func (r EqlSearchRequest) Do(providedCtx context.Context, transport Transport) ( params = make(map[string]string) + if r.AllowPartialSearchResults != nil { + params["allow_partial_search_results"] = strconv.FormatBool(*r.AllowPartialSearchResults) + } + + if r.AllowPartialSequenceResults != nil { + params["allow_partial_sequence_results"] = strconv.FormatBool(*r.AllowPartialSequenceResults) + } + if r.KeepAlive != 0 { params["keep_alive"] = formatDuration(r.KeepAlive) } @@ -202,6 +212,20 @@ func (f EqlSearch) WithContext(v context.Context) func(*EqlSearchRequest) { } } +// WithAllowPartialSearchResults - control whether the query should keep running in case of shard failures, and return partial results. +func (f EqlSearch) WithAllowPartialSearchResults(v bool) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.AllowPartialSearchResults = &v + } +} + +// WithAllowPartialSequenceResults - control whether a sequence query should return partial results or no results at all in case of shard failures. this option has effect only if [allow_partial_search_results] is true.. +func (f EqlSearch) WithAllowPartialSequenceResults(v bool) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.AllowPartialSequenceResults = &v + } +} + // WithKeepAlive - update the time interval in which the results (partial or final) for this search will be available. func (f EqlSearch) WithKeepAlive(v time.Duration) func(*EqlSearchRequest) { return func(r *EqlSearchRequest) { diff --git a/esapi/api.xpack.esql.async_query.go b/esapi/api.xpack.esql.async_query.go index 3a61e36a36..8cc38ea529 100644 --- a/esapi/api.xpack.esql.async_query.go +++ b/esapi/api.xpack.esql.async_query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.esql.async_query_delete.go b/esapi/api.xpack.esql.async_query_delete.go new file mode 100644 index 0000000000..46aad7df4e --- /dev/null +++ b/esapi/api.xpack.esql.async_query_delete.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.0.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newEsqlAsyncQueryDeleteFunc(t Transport) EsqlAsyncQueryDelete { + return func(id string, o ...func(*EsqlAsyncQueryDeleteRequest)) (*Response, error) { + var r = EsqlAsyncQueryDeleteRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EsqlAsyncQueryDelete - Delete an async query request given its ID. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-delete-api.html. +type EsqlAsyncQueryDelete func(id string, o ...func(*EsqlAsyncQueryDeleteRequest)) (*Response, error) + +// EsqlAsyncQueryDeleteRequest configures the Esql Async Query Delete API request. +type EsqlAsyncQueryDeleteRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EsqlAsyncQueryDeleteRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_query") + 1 + len("async") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query_delete") + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query_delete") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EsqlAsyncQueryDelete) WithContext(v context.Context) func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EsqlAsyncQueryDelete) WithPretty() func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EsqlAsyncQueryDelete) WithHuman() func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EsqlAsyncQueryDelete) WithErrorTrace() func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EsqlAsyncQueryDelete) WithFilterPath(v ...string) func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EsqlAsyncQueryDelete) WithHeader(h map[string]string) func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EsqlAsyncQueryDelete) WithOpaqueID(s string) func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.xpack.esql.async_query_get.go b/esapi/api.xpack.esql.async_query_get.go index b2f644c9bc..5b3db1388b 100644 --- a/esapi/api.xpack.esql.async_query_get.go +++ b/esapi/api.xpack.esql.async_query_get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.esql.async_query_stop.go b/esapi/api.xpack.esql.async_query_stop.go new file mode 100644 index 0000000000..a4b44a24d8 --- /dev/null +++ b/esapi/api.xpack.esql.async_query_stop.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.0.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newEsqlAsyncQueryStopFunc(t Transport) EsqlAsyncQueryStop { + return func(id string, o ...func(*EsqlAsyncQueryStopRequest)) (*Response, error) { + var r = EsqlAsyncQueryStopRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EsqlAsyncQueryStop - Stops a previously submitted async query request given its ID and collects the results. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-stop-api.html. +type EsqlAsyncQueryStop func(id string, o ...func(*EsqlAsyncQueryStopRequest)) (*Response, error) + +// EsqlAsyncQueryStopRequest configures the Esql Async Query Stop API request. +type EsqlAsyncQueryStopRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EsqlAsyncQueryStopRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_query") + 1 + len("async") + 1 + len(r.DocumentID) + 1 + len("stop")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + path.WriteString("/") + path.WriteString("stop") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query_stop") + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query_stop") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EsqlAsyncQueryStop) WithContext(v context.Context) func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EsqlAsyncQueryStop) WithPretty() func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EsqlAsyncQueryStop) WithHuman() func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EsqlAsyncQueryStop) WithErrorTrace() func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EsqlAsyncQueryStop) WithFilterPath(v ...string) func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EsqlAsyncQueryStop) WithHeader(h map[string]string) func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EsqlAsyncQueryStop) WithOpaqueID(s string) func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.xpack.esql.query.go b/esapi/api.xpack.esql.query.go index cc4b59bfe4..194873be57 100644 --- a/esapi/api.xpack.esql.query.go +++ b/esapi/api.xpack.esql.query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.graph.explore.go b/esapi/api.xpack.graph.explore.go index 6300452260..4c60470c62 100644 --- a/esapi/api.xpack.graph.explore.go +++ b/esapi/api.xpack.graph.explore.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ilm.delete_lifecycle.go b/esapi/api.xpack.ilm.delete_lifecycle.go index 5c99d4fe6b..6e5bb3b6da 100644 --- a/esapi/api.xpack.ilm.delete_lifecycle.go +++ b/esapi/api.xpack.ilm.delete_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newILMDeleteLifecycleFunc(t Transport) ILMDeleteLifecycle { @@ -51,6 +52,9 @@ type ILMDeleteLifecycle func(policy string, o ...func(*ILMDeleteLifecycleRequest type ILMDeleteLifecycleRequest struct { Policy string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -96,6 +100,14 @@ func (r ILMDeleteLifecycleRequest) Do(providedCtx context.Context, transport Tra params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -174,6 +186,20 @@ func (f ILMDeleteLifecycle) WithContext(v context.Context) func(*ILMDeleteLifecy } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ILMDeleteLifecycle) WithMasterTimeout(v time.Duration) func(*ILMDeleteLifecycleRequest) { + return func(r *ILMDeleteLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ILMDeleteLifecycle) WithTimeout(v time.Duration) func(*ILMDeleteLifecycleRequest) { + return func(r *ILMDeleteLifecycleRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f ILMDeleteLifecycle) WithPretty() func(*ILMDeleteLifecycleRequest) { return func(r *ILMDeleteLifecycleRequest) { diff --git a/esapi/api.xpack.ilm.explain_lifecycle.go b/esapi/api.xpack.ilm.explain_lifecycle.go index 372f9d9dd8..9dbb9fcee9 100644 --- a/esapi/api.xpack.ilm.explain_lifecycle.go +++ b/esapi/api.xpack.ilm.explain_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -24,6 +24,7 @@ import ( "net/http" "strconv" "strings" + "time" ) func newILMExplainLifecycleFunc(t Transport) ILMExplainLifecycle { @@ -52,8 +53,9 @@ type ILMExplainLifecycle func(index string, o ...func(*ILMExplainLifecycleReques type ILMExplainLifecycleRequest struct { Index string - OnlyErrors *bool - OnlyManaged *bool + MasterTimeout time.Duration + OnlyErrors *bool + OnlyManaged *bool Pretty bool Human bool @@ -100,6 +102,10 @@ func (r ILMExplainLifecycleRequest) Do(providedCtx context.Context, transport Tr params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + if r.OnlyErrors != nil { params["only_errors"] = strconv.FormatBool(*r.OnlyErrors) } @@ -186,6 +192,13 @@ func (f ILMExplainLifecycle) WithContext(v context.Context) func(*ILMExplainLife } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ILMExplainLifecycle) WithMasterTimeout(v time.Duration) func(*ILMExplainLifecycleRequest) { + return func(r *ILMExplainLifecycleRequest) { + r.MasterTimeout = v + } +} + // WithOnlyErrors - filters the indices included in the response to ones in an ilm error state, implies only_managed. func (f ILMExplainLifecycle) WithOnlyErrors(v bool) func(*ILMExplainLifecycleRequest) { return func(r *ILMExplainLifecycleRequest) { diff --git a/esapi/api.xpack.ilm.get_lifecycle.go b/esapi/api.xpack.ilm.get_lifecycle.go index df4bafa5c1..d681557925 100644 --- a/esapi/api.xpack.ilm.get_lifecycle.go +++ b/esapi/api.xpack.ilm.get_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newILMGetLifecycleFunc(t Transport) ILMGetLifecycle { @@ -51,6 +52,9 @@ type ILMGetLifecycle func(o ...func(*ILMGetLifecycleRequest)) (*Response, error) type ILMGetLifecycleRequest struct { Policy string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -98,6 +102,14 @@ func (r ILMGetLifecycleRequest) Do(providedCtx context.Context, transport Transp params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -183,6 +195,20 @@ func (f ILMGetLifecycle) WithPolicy(v string) func(*ILMGetLifecycleRequest) { } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ILMGetLifecycle) WithMasterTimeout(v time.Duration) func(*ILMGetLifecycleRequest) { + return func(r *ILMGetLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ILMGetLifecycle) WithTimeout(v time.Duration) func(*ILMGetLifecycleRequest) { + return func(r *ILMGetLifecycleRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f ILMGetLifecycle) WithPretty() func(*ILMGetLifecycleRequest) { return func(r *ILMGetLifecycleRequest) { diff --git a/esapi/api.xpack.ilm.get_status.go b/esapi/api.xpack.ilm.get_status.go index 507b66f38a..ea22f5c7ae 100644 --- a/esapi/api.xpack.ilm.get_status.go +++ b/esapi/api.xpack.ilm.get_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ilm.migrate_to_data_tiers.go b/esapi/api.xpack.ilm.migrate_to_data_tiers.go index 7d4fae2f46..fbce376dfa 100644 --- a/esapi/api.xpack.ilm.migrate_to_data_tiers.go +++ b/esapi/api.xpack.ilm.migrate_to_data_tiers.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -25,6 +25,7 @@ import ( "net/http" "strconv" "strings" + "time" ) func newILMMigrateToDataTiersFunc(t Transport) ILMMigrateToDataTiers { @@ -53,7 +54,8 @@ type ILMMigrateToDataTiers func(o ...func(*ILMMigrateToDataTiersRequest)) (*Resp type ILMMigrateToDataTiersRequest struct { Body io.Reader - DryRun *bool + DryRun *bool + MasterTimeout time.Duration Pretty bool Human bool @@ -96,6 +98,10 @@ func (r ILMMigrateToDataTiersRequest) Do(providedCtx context.Context, transport params["dry_run"] = strconv.FormatBool(*r.DryRun) } + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -195,6 +201,13 @@ func (f ILMMigrateToDataTiers) WithDryRun(v bool) func(*ILMMigrateToDataTiersReq } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ILMMigrateToDataTiers) WithMasterTimeout(v time.Duration) func(*ILMMigrateToDataTiersRequest) { + return func(r *ILMMigrateToDataTiersRequest) { + r.MasterTimeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f ILMMigrateToDataTiers) WithPretty() func(*ILMMigrateToDataTiersRequest) { return func(r *ILMMigrateToDataTiersRequest) { diff --git a/esapi/api.xpack.ilm.move_to_step.go b/esapi/api.xpack.ilm.move_to_step.go index b00a1c3a30..682cad17b4 100644 --- a/esapi/api.xpack.ilm.move_to_step.go +++ b/esapi/api.xpack.ilm.move_to_step.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ilm.put_lifecycle.go b/esapi/api.xpack.ilm.put_lifecycle.go index 299f80a481..1e0db048a8 100644 --- a/esapi/api.xpack.ilm.put_lifecycle.go +++ b/esapi/api.xpack.ilm.put_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -24,6 +24,7 @@ import ( "io" "net/http" "strings" + "time" ) func newILMPutLifecycleFunc(t Transport) ILMPutLifecycle { @@ -54,6 +55,9 @@ type ILMPutLifecycleRequest struct { Policy string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -99,6 +103,14 @@ func (r ILMPutLifecycleRequest) Do(providedCtx context.Context, transport Transp params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -191,6 +203,20 @@ func (f ILMPutLifecycle) WithBody(v io.Reader) func(*ILMPutLifecycleRequest) { } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ILMPutLifecycle) WithMasterTimeout(v time.Duration) func(*ILMPutLifecycleRequest) { + return func(r *ILMPutLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ILMPutLifecycle) WithTimeout(v time.Duration) func(*ILMPutLifecycleRequest) { + return func(r *ILMPutLifecycleRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f ILMPutLifecycle) WithPretty() func(*ILMPutLifecycleRequest) { return func(r *ILMPutLifecycleRequest) { diff --git a/esapi/api.xpack.ilm.remove_policy.go b/esapi/api.xpack.ilm.remove_policy.go index a5ec25158a..88a89ee686 100644 --- a/esapi/api.xpack.ilm.remove_policy.go +++ b/esapi/api.xpack.ilm.remove_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ilm.retry.go b/esapi/api.xpack.ilm.retry.go index 94f0cee4f6..e93cda0a7d 100644 --- a/esapi/api.xpack.ilm.retry.go +++ b/esapi/api.xpack.ilm.retry.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ilm.start.go b/esapi/api.xpack.ilm.start.go index 429336f3f8..b711888edc 100644 --- a/esapi/api.xpack.ilm.start.go +++ b/esapi/api.xpack.ilm.start.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newILMStartFunc(t Transport) ILMStart { @@ -49,6 +50,9 @@ type ILMStart func(o ...func(*ILMStartRequest)) (*Response, error) // ILMStartRequest configures the ILM Start API request. type ILMStartRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -86,6 +90,14 @@ func (r ILMStartRequest) Do(providedCtx context.Context, transport Transport) (* params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -164,6 +176,20 @@ func (f ILMStart) WithContext(v context.Context) func(*ILMStartRequest) { } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ILMStart) WithMasterTimeout(v time.Duration) func(*ILMStartRequest) { + return func(r *ILMStartRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ILMStart) WithTimeout(v time.Duration) func(*ILMStartRequest) { + return func(r *ILMStartRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f ILMStart) WithPretty() func(*ILMStartRequest) { return func(r *ILMStartRequest) { diff --git a/esapi/api.xpack.ilm.stop.go b/esapi/api.xpack.ilm.stop.go index 1d1a772187..e5655f44e0 100644 --- a/esapi/api.xpack.ilm.stop.go +++ b/esapi/api.xpack.ilm.stop.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newILMStopFunc(t Transport) ILMStop { @@ -49,6 +50,9 @@ type ILMStop func(o ...func(*ILMStopRequest)) (*Response, error) // ILMStopRequest configures the ILM Stop API request. type ILMStopRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -86,6 +90,14 @@ func (r ILMStopRequest) Do(providedCtx context.Context, transport Transport) (*R params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -164,6 +176,20 @@ func (f ILMStop) WithContext(v context.Context) func(*ILMStopRequest) { } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ILMStop) WithMasterTimeout(v time.Duration) func(*ILMStopRequest) { + return func(r *ILMStopRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ILMStop) WithTimeout(v time.Duration) func(*ILMStopRequest) { + return func(r *ILMStopRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f ILMStop) WithPretty() func(*ILMStopRequest) { return func(r *ILMStopRequest) { diff --git a/esapi/api.xpack.indices.create_data_stream.go b/esapi/api.xpack.indices.create_data_stream.go index be1d521e64..7937baaf7b 100644 --- a/esapi/api.xpack.indices.create_data_stream.go +++ b/esapi/api.xpack.indices.create_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.indices.data_streams_stats.go b/esapi/api.xpack.indices.data_streams_stats.go index 0576f1e67f..962ff2443d 100644 --- a/esapi/api.xpack.indices.data_streams_stats.go +++ b/esapi/api.xpack.indices.data_streams_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.indices.delete_data_stream.go b/esapi/api.xpack.indices.delete_data_stream.go index 4efa32233b..1c693a2b83 100644 --- a/esapi/api.xpack.indices.delete_data_stream.go +++ b/esapi/api.xpack.indices.delete_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.indices.get_data_stream.go b/esapi/api.xpack.indices.get_data_stream.go index 5ac9ed2da4..d10920e73c 100644 --- a/esapi/api.xpack.indices.get_data_stream.go +++ b/esapi/api.xpack.indices.get_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.indices.migrate_to_data_stream.go b/esapi/api.xpack.indices.migrate_to_data_stream.go index bcfc3d818c..0e4b4561a6 100644 --- a/esapi/api.xpack.indices.migrate_to_data_stream.go +++ b/esapi/api.xpack.indices.migrate_to_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.indices.promote_data_stream.go b/esapi/api.xpack.indices.promote_data_stream.go index 253c709d10..305c59207d 100644 --- a/esapi/api.xpack.indices.promote_data_stream.go +++ b/esapi/api.xpack.indices.promote_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.indices.reload_search_analyzers.go b/esapi/api.xpack.indices.reload_search_analyzers.go index 1833d962a6..e083eed309 100644 --- a/esapi/api.xpack.indices.reload_search_analyzers.go +++ b/esapi/api.xpack.indices.reload_search_analyzers.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.license.delete.go b/esapi/api.xpack.license.delete.go index 993bcc9cde..bd6f85593d 100644 --- a/esapi/api.xpack.license.delete.go +++ b/esapi/api.xpack.license.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.license.get.go b/esapi/api.xpack.license.get.go index 073573e57a..b806f3a23a 100644 --- a/esapi/api.xpack.license.get.go +++ b/esapi/api.xpack.license.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.license.get_basic_status.go b/esapi/api.xpack.license.get_basic_status.go index f5768acd6c..b4b326aaad 100644 --- a/esapi/api.xpack.license.get_basic_status.go +++ b/esapi/api.xpack.license.get_basic_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.license.get_trial_status.go b/esapi/api.xpack.license.get_trial_status.go index 50aecfcbe1..749a363482 100644 --- a/esapi/api.xpack.license.get_trial_status.go +++ b/esapi/api.xpack.license.get_trial_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.license.post.go b/esapi/api.xpack.license.post.go index da5ca77004..8112d14b1b 100644 --- a/esapi/api.xpack.license.post.go +++ b/esapi/api.xpack.license.post.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.license.post_start_basic.go b/esapi/api.xpack.license.post_start_basic.go index 691ce447c7..f892acb190 100644 --- a/esapi/api.xpack.license.post_start_basic.go +++ b/esapi/api.xpack.license.post_start_basic.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.license.post_start_trial.go b/esapi/api.xpack.license.post_start_trial.go index c71b21ddfb..2cb52cdc8b 100644 --- a/esapi/api.xpack.license.post_start_trial.go +++ b/esapi/api.xpack.license.post_start_trial.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -53,7 +53,6 @@ type LicensePostStartTrial func(o ...func(*LicensePostStartTrialRequest)) (*Resp type LicensePostStartTrialRequest struct { Acknowledge *bool MasterTimeout time.Duration - Timeout time.Duration DocumentType string Pretty bool @@ -101,10 +100,6 @@ func (r LicensePostStartTrialRequest) Do(providedCtx context.Context, transport params["master_timeout"] = formatDuration(r.MasterTimeout) } - if r.Timeout != 0 { - params["timeout"] = formatDuration(r.Timeout) - } - if r.DocumentType != "" { params["type"] = r.DocumentType } @@ -201,13 +196,6 @@ func (f LicensePostStartTrial) WithMasterTimeout(v time.Duration) func(*LicenseP } } -// WithTimeout - timeout for acknowledgement of update from all nodes in cluster. -func (f LicensePostStartTrial) WithTimeout(v time.Duration) func(*LicensePostStartTrialRequest) { - return func(r *LicensePostStartTrialRequest) { - r.Timeout = v - } -} - // WithDocumentType - the type of trial license to generate (default: "trial"). func (f LicensePostStartTrial) WithDocumentType(v string) func(*LicensePostStartTrialRequest) { return func(r *LicensePostStartTrialRequest) { diff --git a/esapi/api.xpack.logstash.delete_pipeline.go b/esapi/api.xpack.logstash.delete_pipeline.go index 343a02f717..eb957de848 100644 --- a/esapi/api.xpack.logstash.delete_pipeline.go +++ b/esapi/api.xpack.logstash.delete_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.logstash.get_pipeline.go b/esapi/api.xpack.logstash.get_pipeline.go index 7bdc831ca6..f107ebccc2 100644 --- a/esapi/api.xpack.logstash.get_pipeline.go +++ b/esapi/api.xpack.logstash.get_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.logstash.put_pipeline.go b/esapi/api.xpack.logstash.put_pipeline.go index 0daabf6a49..a253b2ef1a 100644 --- a/esapi/api.xpack.logstash.put_pipeline.go +++ b/esapi/api.xpack.logstash.put_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.migration.deprecations.go b/esapi/api.xpack.migration.deprecations.go index cf67b78591..6bf7cb7c95 100644 --- a/esapi/api.xpack.migration.deprecations.go +++ b/esapi/api.xpack.migration.deprecations.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.migration.get_feature_upgrade_status.go b/esapi/api.xpack.migration.get_feature_upgrade_status.go index 77ed8afad4..5c4b64d609 100644 --- a/esapi/api.xpack.migration.get_feature_upgrade_status.go +++ b/esapi/api.xpack.migration.get_feature_upgrade_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.migration.post_feature_upgrade.go b/esapi/api.xpack.migration.post_feature_upgrade.go index cda22ef994..24abbe4afa 100644 --- a/esapi/api.xpack.migration.post_feature_upgrade.go +++ b/esapi/api.xpack.migration.post_feature_upgrade.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.clear_trained_model_deployment_cache.go b/esapi/api.xpack.ml.clear_trained_model_deployment_cache.go index 63d100b96d..ce297891c3 100644 --- a/esapi/api.xpack.ml.clear_trained_model_deployment_cache.go +++ b/esapi/api.xpack.ml.clear_trained_model_deployment_cache.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.close_job.go b/esapi/api.xpack.ml.close_job.go index 3e9cf686d4..c598598cec 100644 --- a/esapi/api.xpack.ml.close_job.go +++ b/esapi/api.xpack.ml.close_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_calendar.go b/esapi/api.xpack.ml.delete_calendar.go index 194693a113..03a8a468a8 100644 --- a/esapi/api.xpack.ml.delete_calendar.go +++ b/esapi/api.xpack.ml.delete_calendar.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_calendar_event.go b/esapi/api.xpack.ml.delete_calendar_event.go index d0dec58aa7..a1ddc43f3e 100644 --- a/esapi/api.xpack.ml.delete_calendar_event.go +++ b/esapi/api.xpack.ml.delete_calendar_event.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_calendar_job.go b/esapi/api.xpack.ml.delete_calendar_job.go index 5f3cecaaf1..101fc614c4 100644 --- a/esapi/api.xpack.ml.delete_calendar_job.go +++ b/esapi/api.xpack.ml.delete_calendar_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_data_frame_analytics.go b/esapi/api.xpack.ml.delete_data_frame_analytics.go index 91b14b8bd4..ec6613daaf 100644 --- a/esapi/api.xpack.ml.delete_data_frame_analytics.go +++ b/esapi/api.xpack.ml.delete_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_datafeed.go b/esapi/api.xpack.ml.delete_datafeed.go index 5b6a52a489..d405272537 100644 --- a/esapi/api.xpack.ml.delete_datafeed.go +++ b/esapi/api.xpack.ml.delete_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_expired_data.go b/esapi/api.xpack.ml.delete_expired_data.go index f63ccc8041..e0101ae7f4 100644 --- a/esapi/api.xpack.ml.delete_expired_data.go +++ b/esapi/api.xpack.ml.delete_expired_data.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_filter.go b/esapi/api.xpack.ml.delete_filter.go index 4f9f55db54..4d5b81e33a 100644 --- a/esapi/api.xpack.ml.delete_filter.go +++ b/esapi/api.xpack.ml.delete_filter.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_forecast.go b/esapi/api.xpack.ml.delete_forecast.go index f7e48836f6..6442f751f9 100644 --- a/esapi/api.xpack.ml.delete_forecast.go +++ b/esapi/api.xpack.ml.delete_forecast.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_job.go b/esapi/api.xpack.ml.delete_job.go index 0f3653430c..fbc5b20af3 100644 --- a/esapi/api.xpack.ml.delete_job.go +++ b/esapi/api.xpack.ml.delete_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_model_snapshot.go b/esapi/api.xpack.ml.delete_model_snapshot.go index 4fd11e5c3f..b8fa6612ea 100644 --- a/esapi/api.xpack.ml.delete_model_snapshot.go +++ b/esapi/api.xpack.ml.delete_model_snapshot.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_trained_model.go b/esapi/api.xpack.ml.delete_trained_model.go index 660ddf1b44..e64c4285cf 100644 --- a/esapi/api.xpack.ml.delete_trained_model.go +++ b/esapi/api.xpack.ml.delete_trained_model.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_trained_model_alias.go b/esapi/api.xpack.ml.delete_trained_model_alias.go index 855f44bb9e..60f77d9de3 100644 --- a/esapi/api.xpack.ml.delete_trained_model_alias.go +++ b/esapi/api.xpack.ml.delete_trained_model_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.estimate_model_memory.go b/esapi/api.xpack.ml.estimate_model_memory.go index 90abe9b67e..0ceb55f2ef 100644 --- a/esapi/api.xpack.ml.estimate_model_memory.go +++ b/esapi/api.xpack.ml.estimate_model_memory.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.evaluate_data_frame.go b/esapi/api.xpack.ml.evaluate_data_frame.go index 4f68841e75..27941d3d97 100644 --- a/esapi/api.xpack.ml.evaluate_data_frame.go +++ b/esapi/api.xpack.ml.evaluate_data_frame.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.explain_data_frame_analytics.go b/esapi/api.xpack.ml.explain_data_frame_analytics.go index 0e8d5d7306..7d33716b46 100644 --- a/esapi/api.xpack.ml.explain_data_frame_analytics.go +++ b/esapi/api.xpack.ml.explain_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.flush_job.go b/esapi/api.xpack.ml.flush_job.go index 5f2394c643..2a178c293c 100644 --- a/esapi/api.xpack.ml.flush_job.go +++ b/esapi/api.xpack.ml.flush_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.forecast.go b/esapi/api.xpack.ml.forecast.go index c290711c38..7f2a59c429 100644 --- a/esapi/api.xpack.ml.forecast.go +++ b/esapi/api.xpack.ml.forecast.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_buckets.go b/esapi/api.xpack.ml.get_buckets.go index aae1350c18..cafa6d089b 100644 --- a/esapi/api.xpack.ml.get_buckets.go +++ b/esapi/api.xpack.ml.get_buckets.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_calendar_events.go b/esapi/api.xpack.ml.get_calendar_events.go index 319b35e6dd..e0b93f0530 100644 --- a/esapi/api.xpack.ml.get_calendar_events.go +++ b/esapi/api.xpack.ml.get_calendar_events.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_calendars.go b/esapi/api.xpack.ml.get_calendars.go index 4ffe1501b2..9ee7b1efc0 100644 --- a/esapi/api.xpack.ml.get_calendars.go +++ b/esapi/api.xpack.ml.get_calendars.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_categories.go b/esapi/api.xpack.ml.get_categories.go index c30c544438..8eec561a9a 100644 --- a/esapi/api.xpack.ml.get_categories.go +++ b/esapi/api.xpack.ml.get_categories.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_data_frame_analytics.go b/esapi/api.xpack.ml.get_data_frame_analytics.go index c25a904213..0e3b45dbf7 100644 --- a/esapi/api.xpack.ml.get_data_frame_analytics.go +++ b/esapi/api.xpack.ml.get_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_data_frame_analytics_stats.go b/esapi/api.xpack.ml.get_data_frame_analytics_stats.go index fdcb8d41c5..0422e2cd64 100644 --- a/esapi/api.xpack.ml.get_data_frame_analytics_stats.go +++ b/esapi/api.xpack.ml.get_data_frame_analytics_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_datafeed_stats.go b/esapi/api.xpack.ml.get_datafeed_stats.go index 3fc774f3e1..fdbdf6dfbf 100644 --- a/esapi/api.xpack.ml.get_datafeed_stats.go +++ b/esapi/api.xpack.ml.get_datafeed_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_datafeeds.go b/esapi/api.xpack.ml.get_datafeeds.go index f9fc08027e..99bc1bed5c 100644 --- a/esapi/api.xpack.ml.get_datafeeds.go +++ b/esapi/api.xpack.ml.get_datafeeds.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_filters.go b/esapi/api.xpack.ml.get_filters.go index daf0410e88..a413aa9b87 100644 --- a/esapi/api.xpack.ml.get_filters.go +++ b/esapi/api.xpack.ml.get_filters.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_influencers.go b/esapi/api.xpack.ml.get_influencers.go index 01024e2d89..82eed0c19c 100644 --- a/esapi/api.xpack.ml.get_influencers.go +++ b/esapi/api.xpack.ml.get_influencers.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_job_stats.go b/esapi/api.xpack.ml.get_job_stats.go index 0e24ef70b3..1bd2e77992 100644 --- a/esapi/api.xpack.ml.get_job_stats.go +++ b/esapi/api.xpack.ml.get_job_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_jobs.go b/esapi/api.xpack.ml.get_jobs.go index 76df09282e..46991b594e 100644 --- a/esapi/api.xpack.ml.get_jobs.go +++ b/esapi/api.xpack.ml.get_jobs.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_memory_stats.go b/esapi/api.xpack.ml.get_memory_stats.go index 5b9c3b5ef8..45e00df5e3 100644 --- a/esapi/api.xpack.ml.get_memory_stats.go +++ b/esapi/api.xpack.ml.get_memory_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_model_snapshot_upgrade_stats.go b/esapi/api.xpack.ml.get_model_snapshot_upgrade_stats.go index bb30dd2f3e..d81838954f 100644 --- a/esapi/api.xpack.ml.get_model_snapshot_upgrade_stats.go +++ b/esapi/api.xpack.ml.get_model_snapshot_upgrade_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_model_snapshots.go b/esapi/api.xpack.ml.get_model_snapshots.go index f3b607fec2..0ff5aa6f30 100644 --- a/esapi/api.xpack.ml.get_model_snapshots.go +++ b/esapi/api.xpack.ml.get_model_snapshots.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_overall_buckets.go b/esapi/api.xpack.ml.get_overall_buckets.go index 5e98222931..455bdb493e 100644 --- a/esapi/api.xpack.ml.get_overall_buckets.go +++ b/esapi/api.xpack.ml.get_overall_buckets.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_records.go b/esapi/api.xpack.ml.get_records.go index 5d7e04fe81..bd7ab339b1 100644 --- a/esapi/api.xpack.ml.get_records.go +++ b/esapi/api.xpack.ml.get_records.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_trained_models.go b/esapi/api.xpack.ml.get_trained_models.go index 9889f6f1f6..15ba1e1f92 100644 --- a/esapi/api.xpack.ml.get_trained_models.go +++ b/esapi/api.xpack.ml.get_trained_models.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -52,14 +52,13 @@ type MLGetTrainedModels func(o ...func(*MLGetTrainedModelsRequest)) (*Response, type MLGetTrainedModelsRequest struct { ModelID string - AllowNoMatch *bool - DecompressDefinition *bool - ExcludeGenerated *bool - From *int - Include string - IncludeModelDefinition *bool - Size *int - Tags []string + AllowNoMatch *bool + DecompressDefinition *bool + ExcludeGenerated *bool + From *int + Include string + Size *int + Tags []string Pretty bool Human bool @@ -128,10 +127,6 @@ func (r MLGetTrainedModelsRequest) Do(providedCtx context.Context, transport Tra params["include"] = r.Include } - if r.IncludeModelDefinition != nil { - params["include_model_definition"] = strconv.FormatBool(*r.IncludeModelDefinition) - } - if r.Size != nil { params["size"] = strconv.FormatInt(int64(*r.Size), 10) } @@ -260,13 +255,6 @@ func (f MLGetTrainedModels) WithInclude(v string) func(*MLGetTrainedModelsReques } } -// WithIncludeModelDefinition - should the full model definition be included in the results. these definitions can be large. so be cautious when including them. defaults to false.. -func (f MLGetTrainedModels) WithIncludeModelDefinition(v bool) func(*MLGetTrainedModelsRequest) { - return func(r *MLGetTrainedModelsRequest) { - r.IncludeModelDefinition = &v - } -} - // WithSize - specifies a max number of trained models to get. func (f MLGetTrainedModels) WithSize(v int) func(*MLGetTrainedModelsRequest) { return func(r *MLGetTrainedModelsRequest) { diff --git a/esapi/api.xpack.ml.get_trained_models_stats.go b/esapi/api.xpack.ml.get_trained_models_stats.go index 0d51c06d5c..e498a2c51c 100644 --- a/esapi/api.xpack.ml.get_trained_models_stats.go +++ b/esapi/api.xpack.ml.get_trained_models_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.infer_trained_model.go b/esapi/api.xpack.ml.infer_trained_model.go index e7e4619b9e..966bb01faf 100644 --- a/esapi/api.xpack.ml.infer_trained_model.go +++ b/esapi/api.xpack.ml.infer_trained_model.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -88,7 +88,7 @@ func (r MLInferTrainedModelRequest) Do(providedCtx context.Context, transport Tr method = "POST" - path.Grow(7 + 1 + len("_ml") + 1 + len("trained_models") + 1 + len(r.ModelID) + 1 + len("deployment") + 1 + len("_infer")) + path.Grow(7 + 1 + len("_ml") + 1 + len("trained_models") + 1 + len(r.ModelID) + 1 + len("_infer")) path.WriteString("http://") path.WriteString("/") path.WriteString("_ml") @@ -100,8 +100,6 @@ func (r MLInferTrainedModelRequest) Do(providedCtx context.Context, transport Tr instrument.RecordPathPart(ctx, "model_id", r.ModelID) } path.WriteString("/") - path.WriteString("deployment") - path.WriteString("/") path.WriteString("_infer") params = make(map[string]string) diff --git a/esapi/api.xpack.ml.info.go b/esapi/api.xpack.ml.info.go index a3c238084e..ae4bbddc1a 100644 --- a/esapi/api.xpack.ml.info.go +++ b/esapi/api.xpack.ml.info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.open_job.go b/esapi/api.xpack.ml.open_job.go index 1f03cce36c..e0a936c7d0 100644 --- a/esapi/api.xpack.ml.open_job.go +++ b/esapi/api.xpack.ml.open_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.post_calendar_events.go b/esapi/api.xpack.ml.post_calendar_events.go index d0cc1a479f..adef4f8e89 100644 --- a/esapi/api.xpack.ml.post_calendar_events.go +++ b/esapi/api.xpack.ml.post_calendar_events.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.post_data.go b/esapi/api.xpack.ml.post_data.go index cf3767351d..fb248e5805 100644 --- a/esapi/api.xpack.ml.post_data.go +++ b/esapi/api.xpack.ml.post_data.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.preview_data_frame_analytics.go b/esapi/api.xpack.ml.preview_data_frame_analytics.go index 7de623cf56..e86ee793b8 100644 --- a/esapi/api.xpack.ml.preview_data_frame_analytics.go +++ b/esapi/api.xpack.ml.preview_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.preview_datafeed.go b/esapi/api.xpack.ml.preview_datafeed.go index ec9dac2462..23dab75552 100644 --- a/esapi/api.xpack.ml.preview_datafeed.go +++ b/esapi/api.xpack.ml.preview_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_calendar.go b/esapi/api.xpack.ml.put_calendar.go index c7efd600b2..d0049bf9d8 100644 --- a/esapi/api.xpack.ml.put_calendar.go +++ b/esapi/api.xpack.ml.put_calendar.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_calendar_job.go b/esapi/api.xpack.ml.put_calendar_job.go index d955776cbd..2d7cace718 100644 --- a/esapi/api.xpack.ml.put_calendar_job.go +++ b/esapi/api.xpack.ml.put_calendar_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_data_frame_analytics.go b/esapi/api.xpack.ml.put_data_frame_analytics.go index dd6086abf2..d26e21519b 100644 --- a/esapi/api.xpack.ml.put_data_frame_analytics.go +++ b/esapi/api.xpack.ml.put_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_datafeed.go b/esapi/api.xpack.ml.put_datafeed.go index 46f550d63e..50a7b3c4f7 100644 --- a/esapi/api.xpack.ml.put_datafeed.go +++ b/esapi/api.xpack.ml.put_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_filter.go b/esapi/api.xpack.ml.put_filter.go index 83479aa417..61c8f2d4c0 100644 --- a/esapi/api.xpack.ml.put_filter.go +++ b/esapi/api.xpack.ml.put_filter.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_job.go b/esapi/api.xpack.ml.put_job.go index 47e6316c8e..c4f9ce9067 100644 --- a/esapi/api.xpack.ml.put_job.go +++ b/esapi/api.xpack.ml.put_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_trained_model.go b/esapi/api.xpack.ml.put_trained_model.go index bcfcd79136..0f4f7c3762 100644 --- a/esapi/api.xpack.ml.put_trained_model.go +++ b/esapi/api.xpack.ml.put_trained_model.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_trained_model_alias.go b/esapi/api.xpack.ml.put_trained_model_alias.go index b008226531..1951a89155 100644 --- a/esapi/api.xpack.ml.put_trained_model_alias.go +++ b/esapi/api.xpack.ml.put_trained_model_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_trained_model_definition_part.go b/esapi/api.xpack.ml.put_trained_model_definition_part.go index 8e806cf9d0..0fae9e88ff 100644 --- a/esapi/api.xpack.ml.put_trained_model_definition_part.go +++ b/esapi/api.xpack.ml.put_trained_model_definition_part.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_trained_model_vocabulary.go b/esapi/api.xpack.ml.put_trained_model_vocabulary.go index 48f08dc0bf..a6fdd834fc 100644 --- a/esapi/api.xpack.ml.put_trained_model_vocabulary.go +++ b/esapi/api.xpack.ml.put_trained_model_vocabulary.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.reset_job.go b/esapi/api.xpack.ml.reset_job.go index 1999f9bf6c..ef9e246fc0 100644 --- a/esapi/api.xpack.ml.reset_job.go +++ b/esapi/api.xpack.ml.reset_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.revert_model_snapshot.go b/esapi/api.xpack.ml.revert_model_snapshot.go index bc6b3fa63d..14365be333 100644 --- a/esapi/api.xpack.ml.revert_model_snapshot.go +++ b/esapi/api.xpack.ml.revert_model_snapshot.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.set_upgrade_mode.go b/esapi/api.xpack.ml.set_upgrade_mode.go index 0f45117209..08d6cf25b1 100644 --- a/esapi/api.xpack.ml.set_upgrade_mode.go +++ b/esapi/api.xpack.ml.set_upgrade_mode.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.start_data_frame_analytics.go b/esapi/api.xpack.ml.start_data_frame_analytics.go index dbc57ef4d2..ac329b1d8f 100644 --- a/esapi/api.xpack.ml.start_data_frame_analytics.go +++ b/esapi/api.xpack.ml.start_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.start_datafeed.go b/esapi/api.xpack.ml.start_datafeed.go index 8f02eadd4c..2591309737 100644 --- a/esapi/api.xpack.ml.start_datafeed.go +++ b/esapi/api.xpack.ml.start_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.start_trained_model_deployment.go b/esapi/api.xpack.ml.start_trained_model_deployment.go index fc008407e3..c78ef2cdd7 100644 --- a/esapi/api.xpack.ml.start_trained_model_deployment.go +++ b/esapi/api.xpack.ml.start_trained_model_deployment.go @@ -15,12 +15,13 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi import ( "context" + "io" "net/http" "strconv" "strings" @@ -51,6 +52,8 @@ type MLStartTrainedModelDeployment func(model_id string, o ...func(*MLStartTrain // MLStartTrainedModelDeploymentRequest configures the ML Start Trained Model Deployment API request. type MLStartTrainedModelDeploymentRequest struct { + Body io.Reader + ModelID string CacheSize string @@ -159,7 +162,7 @@ func (r MLStartTrainedModelDeploymentRequest) Do(providedCtx context.Context, tr params["filter_path"] = strings.Join(r.FilterPath, ",") } - req, err := newRequest(method, path.String(), nil) + req, err := newRequest(method, path.String(), r.Body) if err != nil { if instrument, ok := r.instrument.(Instrumentation); ok { instrument.RecordError(ctx, err) @@ -187,12 +190,19 @@ func (r MLStartTrainedModelDeploymentRequest) Do(providedCtx context.Context, tr } } + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + if ctx != nil { req = req.WithContext(ctx) } if instrument, ok := r.instrument.(Instrumentation); ok { instrument.BeforeRequest(req, "ml.start_trained_model_deployment") + if reader := instrument.RecordRequestBody(ctx, "ml.start_trained_model_deployment", r.Body); reader != nil { + req.Body = reader + } } res, err := transport.Perform(req) if instrument, ok := r.instrument.(Instrumentation); ok { @@ -221,6 +231,13 @@ func (f MLStartTrainedModelDeployment) WithContext(v context.Context) func(*MLSt } } +// WithBody - The settings for the trained model deployment. +func (f MLStartTrainedModelDeployment) WithBody(v io.Reader) func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.Body = v + } +} + // WithCacheSize - a byte-size value for configuring the inference cache size. for example, 20mb.. func (f MLStartTrainedModelDeployment) WithCacheSize(v string) func(*MLStartTrainedModelDeploymentRequest) { return func(r *MLStartTrainedModelDeploymentRequest) { diff --git a/esapi/api.xpack.ml.stop_data_frame_analytics.go b/esapi/api.xpack.ml.stop_data_frame_analytics.go index a7aa00bc2f..58ab23d061 100644 --- a/esapi/api.xpack.ml.stop_data_frame_analytics.go +++ b/esapi/api.xpack.ml.stop_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.stop_datafeed.go b/esapi/api.xpack.ml.stop_datafeed.go index 0a51e2b5f0..07f131387f 100644 --- a/esapi/api.xpack.ml.stop_datafeed.go +++ b/esapi/api.xpack.ml.stop_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -56,10 +56,9 @@ type MLStopDatafeedRequest struct { DatafeedID string - AllowNoDatafeeds *bool - AllowNoMatch *bool - Force *bool - Timeout time.Duration + AllowNoMatch *bool + Force *bool + Timeout time.Duration Pretty bool Human bool @@ -108,10 +107,6 @@ func (r MLStopDatafeedRequest) Do(providedCtx context.Context, transport Transpo params = make(map[string]string) - if r.AllowNoDatafeeds != nil { - params["allow_no_datafeeds"] = strconv.FormatBool(*r.AllowNoDatafeeds) - } - if r.AllowNoMatch != nil { params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) } @@ -216,13 +211,6 @@ func (f MLStopDatafeed) WithBody(v io.Reader) func(*MLStopDatafeedRequest) { } } -// WithAllowNoDatafeeds - whether to ignore if a wildcard expression matches no datafeeds. (this includes `_all` string or when no datafeeds have been specified). -func (f MLStopDatafeed) WithAllowNoDatafeeds(v bool) func(*MLStopDatafeedRequest) { - return func(r *MLStopDatafeedRequest) { - r.AllowNoDatafeeds = &v - } -} - // WithAllowNoMatch - whether to ignore if a wildcard expression matches no datafeeds. (this includes `_all` string or when no datafeeds have been specified). func (f MLStopDatafeed) WithAllowNoMatch(v bool) func(*MLStopDatafeedRequest) { return func(r *MLStopDatafeedRequest) { diff --git a/esapi/api.xpack.ml.stop_trained_model_deployment.go b/esapi/api.xpack.ml.stop_trained_model_deployment.go index 4a25f46955..12aa127be1 100644 --- a/esapi/api.xpack.ml.stop_trained_model_deployment.go +++ b/esapi/api.xpack.ml.stop_trained_model_deployment.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.update_data_frame_analytics.go b/esapi/api.xpack.ml.update_data_frame_analytics.go index 9a38e1a897..8a5410a396 100644 --- a/esapi/api.xpack.ml.update_data_frame_analytics.go +++ b/esapi/api.xpack.ml.update_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.update_datafeed.go b/esapi/api.xpack.ml.update_datafeed.go index 486c7dc1b7..ba47db7ed0 100644 --- a/esapi/api.xpack.ml.update_datafeed.go +++ b/esapi/api.xpack.ml.update_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.update_filter.go b/esapi/api.xpack.ml.update_filter.go index 90e3930452..d02604d2c4 100644 --- a/esapi/api.xpack.ml.update_filter.go +++ b/esapi/api.xpack.ml.update_filter.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.update_job.go b/esapi/api.xpack.ml.update_job.go index 540f2145e3..33b1f76177 100644 --- a/esapi/api.xpack.ml.update_job.go +++ b/esapi/api.xpack.ml.update_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.update_model_snapshot.go b/esapi/api.xpack.ml.update_model_snapshot.go index ec5078297c..f7d7303f31 100644 --- a/esapi/api.xpack.ml.update_model_snapshot.go +++ b/esapi/api.xpack.ml.update_model_snapshot.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.update_trained_model_deployment.go b/esapi/api.xpack.ml.update_trained_model_deployment.go index fb5345319f..1c375ecae1 100644 --- a/esapi/api.xpack.ml.update_trained_model_deployment.go +++ b/esapi/api.xpack.ml.update_trained_model_deployment.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.upgrade_job_snapshot.go b/esapi/api.xpack.ml.upgrade_job_snapshot.go index 33b6a18933..690a0fe336 100644 --- a/esapi/api.xpack.ml.upgrade_job_snapshot.go +++ b/esapi/api.xpack.ml.upgrade_job_snapshot.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.validate.go b/esapi/api.xpack.ml.validate.go index 851acb6790..934e29cc17 100644 --- a/esapi/api.xpack.ml.validate.go +++ b/esapi/api.xpack.ml.validate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.validate_detector.go b/esapi/api.xpack.ml.validate_detector.go index a74f8ffd1d..ab21d394d3 100644 --- a/esapi/api.xpack.ml.validate_detector.go +++ b/esapi/api.xpack.ml.validate_detector.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.monitoring.bulk.go b/esapi/api.xpack.monitoring.bulk.go index f80c59d3c7..36e12bc029 100644 --- a/esapi/api.xpack.monitoring.bulk.go +++ b/esapi/api.xpack.monitoring.bulk.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.open_point_in_time.go b/esapi/api.xpack.open_point_in_time.go index 210b811228..79091ae1d5 100644 --- a/esapi/api.xpack.open_point_in_time.go +++ b/esapi/api.xpack.open_point_in_time.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -56,11 +56,12 @@ type OpenPointInTimeRequest struct { Body io.Reader - ExpandWildcards string - IgnoreUnavailable *bool - KeepAlive string - Preference string - Routing string + AllowPartialSearchResults *bool + ExpandWildcards string + IgnoreUnavailable *bool + KeepAlive string + Preference string + Routing string Pretty bool Human bool @@ -109,6 +110,10 @@ func (r OpenPointInTimeRequest) Do(providedCtx context.Context, transport Transp params = make(map[string]string) + if r.AllowPartialSearchResults != nil { + params["allow_partial_search_results"] = strconv.FormatBool(*r.AllowPartialSearchResults) + } + if r.ExpandWildcards != "" { params["expand_wildcards"] = r.ExpandWildcards } @@ -221,6 +226,13 @@ func (f OpenPointInTime) WithBody(v io.Reader) func(*OpenPointInTimeRequest) { } } +// WithAllowPartialSearchResults - specify whether to tolerate shards missing when creating the point-in-time, or otherwise throw an exception. (default: false). +func (f OpenPointInTime) WithAllowPartialSearchResults(v bool) func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + r.AllowPartialSearchResults = &v + } +} + // WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. func (f OpenPointInTime) WithExpandWildcards(v string) func(*OpenPointInTimeRequest) { return func(r *OpenPointInTimeRequest) { diff --git a/esapi/api.xpack.profiling.flamegraph.go b/esapi/api.xpack.profiling.flamegraph.go index 7335792c01..3afee07e83 100644 --- a/esapi/api.xpack.profiling.flamegraph.go +++ b/esapi/api.xpack.profiling.flamegraph.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.rollup.delete_job.go b/esapi/api.xpack.rollup.delete_job.go index 2fa13e96e7..a8bb6dc3a2 100644 --- a/esapi/api.xpack.rollup.delete_job.go +++ b/esapi/api.xpack.rollup.delete_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.rollup.get_jobs.go b/esapi/api.xpack.rollup.get_jobs.go index 42d0827dd4..9a3f5792b1 100644 --- a/esapi/api.xpack.rollup.get_jobs.go +++ b/esapi/api.xpack.rollup.get_jobs.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.rollup.get_rollup_caps.go b/esapi/api.xpack.rollup.get_rollup_caps.go index eeb2beba5f..ccaaacff51 100644 --- a/esapi/api.xpack.rollup.get_rollup_caps.go +++ b/esapi/api.xpack.rollup.get_rollup_caps.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.rollup.get_rollup_index_caps.go b/esapi/api.xpack.rollup.get_rollup_index_caps.go index dc97b76eb1..637be55287 100644 --- a/esapi/api.xpack.rollup.get_rollup_index_caps.go +++ b/esapi/api.xpack.rollup.get_rollup_index_caps.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.rollup.put_job.go b/esapi/api.xpack.rollup.put_job.go index 9ad730413d..7d8e4f75f4 100644 --- a/esapi/api.xpack.rollup.put_job.go +++ b/esapi/api.xpack.rollup.put_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.rollup.rollup_search.go b/esapi/api.xpack.rollup.rollup_search.go index 9d37d79b24..4a2616e014 100644 --- a/esapi/api.xpack.rollup.rollup_search.go +++ b/esapi/api.xpack.rollup.rollup_search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.rollup.start_job.go b/esapi/api.xpack.rollup.start_job.go index bd28c9be20..3385151b8c 100644 --- a/esapi/api.xpack.rollup.start_job.go +++ b/esapi/api.xpack.rollup.start_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.rollup.stop_job.go b/esapi/api.xpack.rollup.stop_job.go index 540c069c4d..69fe025192 100644 --- a/esapi/api.xpack.rollup.stop_job.go +++ b/esapi/api.xpack.rollup.stop_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.searchable_snapshots.cache_stats.go b/esapi/api.xpack.searchable_snapshots.cache_stats.go index b881ddce9a..c3a24c4857 100644 --- a/esapi/api.xpack.searchable_snapshots.cache_stats.go +++ b/esapi/api.xpack.searchable_snapshots.cache_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.searchable_snapshots.clear_cache.go b/esapi/api.xpack.searchable_snapshots.clear_cache.go index cd0a69f875..4ab644520f 100644 --- a/esapi/api.xpack.searchable_snapshots.clear_cache.go +++ b/esapi/api.xpack.searchable_snapshots.clear_cache.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.searchable_snapshots.mount.go b/esapi/api.xpack.searchable_snapshots.mount.go index 91aa50429a..9952cc9e74 100644 --- a/esapi/api.xpack.searchable_snapshots.mount.go +++ b/esapi/api.xpack.searchable_snapshots.mount.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.searchable_snapshots.stats.go b/esapi/api.xpack.searchable_snapshots.stats.go index a2762f1e39..d579331885 100644 --- a/esapi/api.xpack.searchable_snapshots.stats.go +++ b/esapi/api.xpack.searchable_snapshots.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.activate_user_profile.go b/esapi/api.xpack.security.activate_user_profile.go index 5be9dc3254..8a3ae50eb3 100644 --- a/esapi/api.xpack.security.activate_user_profile.go +++ b/esapi/api.xpack.security.activate_user_profile.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.authenticate.go b/esapi/api.xpack.security.authenticate.go index 1cf6539414..89e360003e 100644 --- a/esapi/api.xpack.security.authenticate.go +++ b/esapi/api.xpack.security.authenticate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.bulk_delete_role.go b/esapi/api.xpack.security.bulk_delete_role.go index 3790b9b590..b8dd939c71 100644 --- a/esapi/api.xpack.security.bulk_delete_role.go +++ b/esapi/api.xpack.security.bulk_delete_role.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.bulk_put_role.go b/esapi/api.xpack.security.bulk_put_role.go index 5c5e4a5202..8980061fdc 100644 --- a/esapi/api.xpack.security.bulk_put_role.go +++ b/esapi/api.xpack.security.bulk_put_role.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.bulk_update_api_keys.go b/esapi/api.xpack.security.bulk_update_api_keys.go index 248e7f128f..d5aa5dfe56 100644 --- a/esapi/api.xpack.security.bulk_update_api_keys.go +++ b/esapi/api.xpack.security.bulk_update_api_keys.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.change_password.go b/esapi/api.xpack.security.change_password.go index f2cab71623..69144eaaac 100644 --- a/esapi/api.xpack.security.change_password.go +++ b/esapi/api.xpack.security.change_password.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.clear_api_key_cache.go b/esapi/api.xpack.security.clear_api_key_cache.go index 3fee069eda..d0aed336b4 100644 --- a/esapi/api.xpack.security.clear_api_key_cache.go +++ b/esapi/api.xpack.security.clear_api_key_cache.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.clear_cached_privileges.go b/esapi/api.xpack.security.clear_cached_privileges.go index 5ed1a4778f..1ee7e27f5d 100644 --- a/esapi/api.xpack.security.clear_cached_privileges.go +++ b/esapi/api.xpack.security.clear_cached_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.clear_cached_realms.go b/esapi/api.xpack.security.clear_cached_realms.go index 506d038de6..be8a0f1416 100644 --- a/esapi/api.xpack.security.clear_cached_realms.go +++ b/esapi/api.xpack.security.clear_cached_realms.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.clear_cached_roles.go b/esapi/api.xpack.security.clear_cached_roles.go index 9f1041738d..34eef68c44 100644 --- a/esapi/api.xpack.security.clear_cached_roles.go +++ b/esapi/api.xpack.security.clear_cached_roles.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.clear_cached_service_tokens.go b/esapi/api.xpack.security.clear_cached_service_tokens.go index 3933616bbd..3c4168a8ce 100644 --- a/esapi/api.xpack.security.clear_cached_service_tokens.go +++ b/esapi/api.xpack.security.clear_cached_service_tokens.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.create_api_key.go b/esapi/api.xpack.security.create_api_key.go index 76f57e254b..7fcdfe6ffe 100644 --- a/esapi/api.xpack.security.create_api_key.go +++ b/esapi/api.xpack.security.create_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.create_cross_cluster_api_key.go b/esapi/api.xpack.security.create_cross_cluster_api_key.go index aca03ed95e..bfc60e9d81 100644 --- a/esapi/api.xpack.security.create_cross_cluster_api_key.go +++ b/esapi/api.xpack.security.create_cross_cluster_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.create_service_token.go b/esapi/api.xpack.security.create_service_token.go index 5ef29cf4f3..9848241bf9 100644 --- a/esapi/api.xpack.security.create_service_token.go +++ b/esapi/api.xpack.security.create_service_token.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.delegate_pki.go b/esapi/api.xpack.security.delegate_pki.go new file mode 100644 index 0000000000..30ad9d0b1d --- /dev/null +++ b/esapi/api.xpack.security.delegate_pki.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.0.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityDelegatePkiFunc(t Transport) SecurityDelegatePki { + return func(body io.Reader, o ...func(*SecurityDelegatePkiRequest)) (*Response, error) { + var r = SecurityDelegatePkiRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityDelegatePki - Delegate PKI authentication. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delegate-pki-authentication.html. +type SecurityDelegatePki func(body io.Reader, o ...func(*SecurityDelegatePkiRequest)) (*Response, error) + +// SecurityDelegatePkiRequest configures the Security Delegate Pki API request. +type SecurityDelegatePkiRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityDelegatePkiRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delegate_pki") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/delegate_pki")) + path.WriteString("http://") + path.WriteString("/_security/delegate_pki") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.delegate_pki") + if reader := instrument.RecordRequestBody(ctx, "security.delegate_pki", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.delegate_pki") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityDelegatePki) WithContext(v context.Context) func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityDelegatePki) WithPretty() func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityDelegatePki) WithHuman() func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityDelegatePki) WithErrorTrace() func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityDelegatePki) WithFilterPath(v ...string) func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityDelegatePki) WithHeader(h map[string]string) func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityDelegatePki) WithOpaqueID(s string) func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.xpack.security.delete_privileges.go b/esapi/api.xpack.security.delete_privileges.go index fd6f199c5c..114aa15053 100644 --- a/esapi/api.xpack.security.delete_privileges.go +++ b/esapi/api.xpack.security.delete_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.delete_role.go b/esapi/api.xpack.security.delete_role.go index e01f8930d6..9e973ac168 100644 --- a/esapi/api.xpack.security.delete_role.go +++ b/esapi/api.xpack.security.delete_role.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.delete_role_mapping.go b/esapi/api.xpack.security.delete_role_mapping.go index 978423920f..35adad7278 100644 --- a/esapi/api.xpack.security.delete_role_mapping.go +++ b/esapi/api.xpack.security.delete_role_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.delete_service_token.go b/esapi/api.xpack.security.delete_service_token.go index 55cf6e8ccb..e4bd054015 100644 --- a/esapi/api.xpack.security.delete_service_token.go +++ b/esapi/api.xpack.security.delete_service_token.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.delete_user.go b/esapi/api.xpack.security.delete_user.go index 81a9bf39c0..d8985ade5f 100644 --- a/esapi/api.xpack.security.delete_user.go +++ b/esapi/api.xpack.security.delete_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.disable_user.go b/esapi/api.xpack.security.disable_user.go index a119a31d33..46b90e3407 100644 --- a/esapi/api.xpack.security.disable_user.go +++ b/esapi/api.xpack.security.disable_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.disable_user_profile.go b/esapi/api.xpack.security.disable_user_profile.go index 04575c5df6..44f9801ea4 100644 --- a/esapi/api.xpack.security.disable_user_profile.go +++ b/esapi/api.xpack.security.disable_user_profile.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.enable_user.go b/esapi/api.xpack.security.enable_user.go index ae798b8871..8ced3d7a22 100644 --- a/esapi/api.xpack.security.enable_user.go +++ b/esapi/api.xpack.security.enable_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.enable_user_profile.go b/esapi/api.xpack.security.enable_user_profile.go index 3f10e39a5e..58a0abe4f4 100644 --- a/esapi/api.xpack.security.enable_user_profile.go +++ b/esapi/api.xpack.security.enable_user_profile.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.enroll_kibana.go b/esapi/api.xpack.security.enroll_kibana.go index b65c1f7477..3c7762112a 100644 --- a/esapi/api.xpack.security.enroll_kibana.go +++ b/esapi/api.xpack.security.enroll_kibana.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.enroll_node.go b/esapi/api.xpack.security.enroll_node.go index 50e3962d03..bc3f49adee 100644 --- a/esapi/api.xpack.security.enroll_node.go +++ b/esapi/api.xpack.security.enroll_node.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_api_key.go b/esapi/api.xpack.security.get_api_key.go index c1fa14e21a..863dfd8587 100644 --- a/esapi/api.xpack.security.get_api_key.go +++ b/esapi/api.xpack.security.get_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_builtin_privileges.go b/esapi/api.xpack.security.get_builtin_privileges.go index abdb3b39bb..4c39e44a5c 100644 --- a/esapi/api.xpack.security.get_builtin_privileges.go +++ b/esapi/api.xpack.security.get_builtin_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_privileges.go b/esapi/api.xpack.security.get_privileges.go index fa21bccf53..de188538cc 100644 --- a/esapi/api.xpack.security.get_privileges.go +++ b/esapi/api.xpack.security.get_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_role.go b/esapi/api.xpack.security.get_role.go index 4b4d0fe91d..70f1bee077 100644 --- a/esapi/api.xpack.security.get_role.go +++ b/esapi/api.xpack.security.get_role.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_role_mapping.go b/esapi/api.xpack.security.get_role_mapping.go index b89c0e6dfc..35c948c19c 100644 --- a/esapi/api.xpack.security.get_role_mapping.go +++ b/esapi/api.xpack.security.get_role_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_service_accounts.go b/esapi/api.xpack.security.get_service_accounts.go index e125bf767d..0a127198e2 100644 --- a/esapi/api.xpack.security.get_service_accounts.go +++ b/esapi/api.xpack.security.get_service_accounts.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_service_credentials.go b/esapi/api.xpack.security.get_service_credentials.go index 11e54f96cd..1bc7e659a9 100644 --- a/esapi/api.xpack.security.get_service_credentials.go +++ b/esapi/api.xpack.security.get_service_credentials.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_settings.go b/esapi/api.xpack.security.get_settings.go index 16cdde250e..8aa603b809 100644 --- a/esapi/api.xpack.security.get_settings.go +++ b/esapi/api.xpack.security.get_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_token.go b/esapi/api.xpack.security.get_token.go index a7581472ad..5555c7ebc0 100644 --- a/esapi/api.xpack.security.get_token.go +++ b/esapi/api.xpack.security.get_token.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_user.go b/esapi/api.xpack.security.get_user.go index 71b11ee7f7..301c5181da 100644 --- a/esapi/api.xpack.security.get_user.go +++ b/esapi/api.xpack.security.get_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_user_privileges.go b/esapi/api.xpack.security.get_user_privileges.go index f96994ab9a..f5780e0840 100644 --- a/esapi/api.xpack.security.get_user_privileges.go +++ b/esapi/api.xpack.security.get_user_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_user_profile.go b/esapi/api.xpack.security.get_user_profile.go index cfd7be6ec3..866ac73a29 100644 --- a/esapi/api.xpack.security.get_user_profile.go +++ b/esapi/api.xpack.security.get_user_profile.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.grant_api_key.go b/esapi/api.xpack.security.grant_api_key.go index 7f64353aff..46e74f6cb1 100644 --- a/esapi/api.xpack.security.grant_api_key.go +++ b/esapi/api.xpack.security.grant_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.has_privileges.go b/esapi/api.xpack.security.has_privileges.go index f460ab1c91..d906468aea 100644 --- a/esapi/api.xpack.security.has_privileges.go +++ b/esapi/api.xpack.security.has_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.has_privileges_user_profile.go b/esapi/api.xpack.security.has_privileges_user_profile.go index bc073c75e7..e2dc51162e 100644 --- a/esapi/api.xpack.security.has_privileges_user_profile.go +++ b/esapi/api.xpack.security.has_privileges_user_profile.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.invalidate_api_key.go b/esapi/api.xpack.security.invalidate_api_key.go index 7d30c1aac1..4090918c49 100644 --- a/esapi/api.xpack.security.invalidate_api_key.go +++ b/esapi/api.xpack.security.invalidate_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.invalidate_token.go b/esapi/api.xpack.security.invalidate_token.go index 166ffb84d4..003ebb5a08 100644 --- a/esapi/api.xpack.security.invalidate_token.go +++ b/esapi/api.xpack.security.invalidate_token.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.oidc_authenticate.go b/esapi/api.xpack.security.oidc_authenticate.go index 3c60956280..85b9cbfbf5 100644 --- a/esapi/api.xpack.security.oidc_authenticate.go +++ b/esapi/api.xpack.security.oidc_authenticate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.oidc_logout.go b/esapi/api.xpack.security.oidc_logout.go index 9e9ea8e5f9..1652899337 100644 --- a/esapi/api.xpack.security.oidc_logout.go +++ b/esapi/api.xpack.security.oidc_logout.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.oidc_prepare_authentication.go b/esapi/api.xpack.security.oidc_prepare_authentication.go index 2fc9c971b1..6b244d0637 100644 --- a/esapi/api.xpack.security.oidc_prepare_authentication.go +++ b/esapi/api.xpack.security.oidc_prepare_authentication.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.put_privileges.go b/esapi/api.xpack.security.put_privileges.go index 85be74ceb9..c8c4d6e393 100644 --- a/esapi/api.xpack.security.put_privileges.go +++ b/esapi/api.xpack.security.put_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.put_role.go b/esapi/api.xpack.security.put_role.go index 0b1da344de..6989542aa5 100644 --- a/esapi/api.xpack.security.put_role.go +++ b/esapi/api.xpack.security.put_role.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.put_role_mapping.go b/esapi/api.xpack.security.put_role_mapping.go index 798367f4ba..cd99366488 100644 --- a/esapi/api.xpack.security.put_role_mapping.go +++ b/esapi/api.xpack.security.put_role_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.put_user.go b/esapi/api.xpack.security.put_user.go index bd2f770c1d..cf5299a201 100644 --- a/esapi/api.xpack.security.put_user.go +++ b/esapi/api.xpack.security.put_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.query_api_keys.go b/esapi/api.xpack.security.query_api_keys.go index 0608a3459d..dae052a0a7 100644 --- a/esapi/api.xpack.security.query_api_keys.go +++ b/esapi/api.xpack.security.query_api_keys.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.query_role.go b/esapi/api.xpack.security.query_role.go index dbeb9d5f50..e1f09510f9 100644 --- a/esapi/api.xpack.security.query_role.go +++ b/esapi/api.xpack.security.query_role.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.query_user.go b/esapi/api.xpack.security.query_user.go index 4f91b12eeb..a3999d4ffa 100644 --- a/esapi/api.xpack.security.query_user.go +++ b/esapi/api.xpack.security.query_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.saml_authenticate.go b/esapi/api.xpack.security.saml_authenticate.go index 39323896e9..e41c8f4608 100644 --- a/esapi/api.xpack.security.saml_authenticate.go +++ b/esapi/api.xpack.security.saml_authenticate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.saml_complete_logout.go b/esapi/api.xpack.security.saml_complete_logout.go index 41e2771016..812caf27ca 100644 --- a/esapi/api.xpack.security.saml_complete_logout.go +++ b/esapi/api.xpack.security.saml_complete_logout.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.saml_invalidate.go b/esapi/api.xpack.security.saml_invalidate.go index d6ec95b008..a1a2225a97 100644 --- a/esapi/api.xpack.security.saml_invalidate.go +++ b/esapi/api.xpack.security.saml_invalidate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.saml_logout.go b/esapi/api.xpack.security.saml_logout.go index 4df212a621..c12ea2d861 100644 --- a/esapi/api.xpack.security.saml_logout.go +++ b/esapi/api.xpack.security.saml_logout.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.saml_prepare_authentication.go b/esapi/api.xpack.security.saml_prepare_authentication.go index c3eaad279c..ffcd832884 100644 --- a/esapi/api.xpack.security.saml_prepare_authentication.go +++ b/esapi/api.xpack.security.saml_prepare_authentication.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.saml_service_provider_metadata.go b/esapi/api.xpack.security.saml_service_provider_metadata.go index 1027e6c23f..1fad4330ac 100644 --- a/esapi/api.xpack.security.saml_service_provider_metadata.go +++ b/esapi/api.xpack.security.saml_service_provider_metadata.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.suggest_user_profiles.go b/esapi/api.xpack.security.suggest_user_profiles.go index ab8b23821a..4ba8cfdeef 100644 --- a/esapi/api.xpack.security.suggest_user_profiles.go +++ b/esapi/api.xpack.security.suggest_user_profiles.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.update_api_key.go b/esapi/api.xpack.security.update_api_key.go index fec4bd5730..bf9e42d838 100644 --- a/esapi/api.xpack.security.update_api_key.go +++ b/esapi/api.xpack.security.update_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.update_cross_cluster_api_key.go b/esapi/api.xpack.security.update_cross_cluster_api_key.go index 6e0e73967f..8d9adeed70 100644 --- a/esapi/api.xpack.security.update_cross_cluster_api_key.go +++ b/esapi/api.xpack.security.update_cross_cluster_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.update_settings.go b/esapi/api.xpack.security.update_settings.go index 041803e1bd..48d72daa58 100644 --- a/esapi/api.xpack.security.update_settings.go +++ b/esapi/api.xpack.security.update_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.update_user_profile_data.go b/esapi/api.xpack.security.update_user_profile_data.go index 2fba8f349e..e5718c342f 100644 --- a/esapi/api.xpack.security.update_user_profile_data.go +++ b/esapi/api.xpack.security.update_user_profile_data.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.slm.delete_lifecycle.go b/esapi/api.xpack.slm.delete_lifecycle.go index 7e05d77c86..8ece2d6c7b 100644 --- a/esapi/api.xpack.slm.delete_lifecycle.go +++ b/esapi/api.xpack.slm.delete_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newSlmDeleteLifecycleFunc(t Transport) SlmDeleteLifecycle { @@ -51,6 +52,9 @@ type SlmDeleteLifecycle func(policy_id string, o ...func(*SlmDeleteLifecycleRequ type SlmDeleteLifecycleRequest struct { PolicyID string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -96,6 +100,14 @@ func (r SlmDeleteLifecycleRequest) Do(providedCtx context.Context, transport Tra params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -174,6 +186,20 @@ func (f SlmDeleteLifecycle) WithContext(v context.Context) func(*SlmDeleteLifecy } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmDeleteLifecycle) WithMasterTimeout(v time.Duration) func(*SlmDeleteLifecycleRequest) { + return func(r *SlmDeleteLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmDeleteLifecycle) WithTimeout(v time.Duration) func(*SlmDeleteLifecycleRequest) { + return func(r *SlmDeleteLifecycleRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f SlmDeleteLifecycle) WithPretty() func(*SlmDeleteLifecycleRequest) { return func(r *SlmDeleteLifecycleRequest) { diff --git a/esapi/api.xpack.slm.execute_lifecycle.go b/esapi/api.xpack.slm.execute_lifecycle.go index d18186a1f5..56c9bae23d 100644 --- a/esapi/api.xpack.slm.execute_lifecycle.go +++ b/esapi/api.xpack.slm.execute_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newSlmExecuteLifecycleFunc(t Transport) SlmExecuteLifecycle { @@ -51,6 +52,9 @@ type SlmExecuteLifecycle func(policy_id string, o ...func(*SlmExecuteLifecycleRe type SlmExecuteLifecycleRequest struct { PolicyID string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -98,6 +102,14 @@ func (r SlmExecuteLifecycleRequest) Do(providedCtx context.Context, transport Tr params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -176,6 +188,20 @@ func (f SlmExecuteLifecycle) WithContext(v context.Context) func(*SlmExecuteLife } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmExecuteLifecycle) WithMasterTimeout(v time.Duration) func(*SlmExecuteLifecycleRequest) { + return func(r *SlmExecuteLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmExecuteLifecycle) WithTimeout(v time.Duration) func(*SlmExecuteLifecycleRequest) { + return func(r *SlmExecuteLifecycleRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f SlmExecuteLifecycle) WithPretty() func(*SlmExecuteLifecycleRequest) { return func(r *SlmExecuteLifecycleRequest) { diff --git a/esapi/api.xpack.slm.execute_retention.go b/esapi/api.xpack.slm.execute_retention.go index 3007d49f8a..a3eddad328 100644 --- a/esapi/api.xpack.slm.execute_retention.go +++ b/esapi/api.xpack.slm.execute_retention.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newSlmExecuteRetentionFunc(t Transport) SlmExecuteRetention { @@ -49,6 +50,9 @@ type SlmExecuteRetention func(o ...func(*SlmExecuteRetentionRequest)) (*Response // SlmExecuteRetentionRequest configures the Slm Execute Retention API request. type SlmExecuteRetentionRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -86,6 +90,14 @@ func (r SlmExecuteRetentionRequest) Do(providedCtx context.Context, transport Tr params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -164,6 +176,20 @@ func (f SlmExecuteRetention) WithContext(v context.Context) func(*SlmExecuteRete } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmExecuteRetention) WithMasterTimeout(v time.Duration) func(*SlmExecuteRetentionRequest) { + return func(r *SlmExecuteRetentionRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmExecuteRetention) WithTimeout(v time.Duration) func(*SlmExecuteRetentionRequest) { + return func(r *SlmExecuteRetentionRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f SlmExecuteRetention) WithPretty() func(*SlmExecuteRetentionRequest) { return func(r *SlmExecuteRetentionRequest) { diff --git a/esapi/api.xpack.slm.get_lifecycle.go b/esapi/api.xpack.slm.get_lifecycle.go index 842e455fcc..e40614e67f 100644 --- a/esapi/api.xpack.slm.get_lifecycle.go +++ b/esapi/api.xpack.slm.get_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newSlmGetLifecycleFunc(t Transport) SlmGetLifecycle { @@ -51,6 +52,9 @@ type SlmGetLifecycle func(o ...func(*SlmGetLifecycleRequest)) (*Response, error) type SlmGetLifecycleRequest struct { PolicyID []string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -98,6 +102,14 @@ func (r SlmGetLifecycleRequest) Do(providedCtx context.Context, transport Transp params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -183,6 +195,20 @@ func (f SlmGetLifecycle) WithPolicyID(v ...string) func(*SlmGetLifecycleRequest) } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmGetLifecycle) WithMasterTimeout(v time.Duration) func(*SlmGetLifecycleRequest) { + return func(r *SlmGetLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmGetLifecycle) WithTimeout(v time.Duration) func(*SlmGetLifecycleRequest) { + return func(r *SlmGetLifecycleRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f SlmGetLifecycle) WithPretty() func(*SlmGetLifecycleRequest) { return func(r *SlmGetLifecycleRequest) { diff --git a/esapi/api.xpack.slm.get_stats.go b/esapi/api.xpack.slm.get_stats.go index 962e26afd3..ecac0f3d7a 100644 --- a/esapi/api.xpack.slm.get_stats.go +++ b/esapi/api.xpack.slm.get_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newSlmGetStatsFunc(t Transport) SlmGetStats { @@ -49,6 +50,9 @@ type SlmGetStats func(o ...func(*SlmGetStatsRequest)) (*Response, error) // SlmGetStatsRequest configures the Slm Get Stats API request. type SlmGetStatsRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -86,6 +90,14 @@ func (r SlmGetStatsRequest) Do(providedCtx context.Context, transport Transport) params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -164,6 +176,20 @@ func (f SlmGetStats) WithContext(v context.Context) func(*SlmGetStatsRequest) { } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmGetStats) WithMasterTimeout(v time.Duration) func(*SlmGetStatsRequest) { + return func(r *SlmGetStatsRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmGetStats) WithTimeout(v time.Duration) func(*SlmGetStatsRequest) { + return func(r *SlmGetStatsRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f SlmGetStats) WithPretty() func(*SlmGetStatsRequest) { return func(r *SlmGetStatsRequest) { diff --git a/esapi/api.xpack.slm.get_status.go b/esapi/api.xpack.slm.get_status.go index 7920546515..d211fd76dc 100644 --- a/esapi/api.xpack.slm.get_status.go +++ b/esapi/api.xpack.slm.get_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newSlmGetStatusFunc(t Transport) SlmGetStatus { @@ -49,6 +50,9 @@ type SlmGetStatus func(o ...func(*SlmGetStatusRequest)) (*Response, error) // SlmGetStatusRequest configures the Slm Get Status API request. type SlmGetStatusRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -86,6 +90,14 @@ func (r SlmGetStatusRequest) Do(providedCtx context.Context, transport Transport params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -164,6 +176,20 @@ func (f SlmGetStatus) WithContext(v context.Context) func(*SlmGetStatusRequest) } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmGetStatus) WithMasterTimeout(v time.Duration) func(*SlmGetStatusRequest) { + return func(r *SlmGetStatusRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmGetStatus) WithTimeout(v time.Duration) func(*SlmGetStatusRequest) { + return func(r *SlmGetStatusRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f SlmGetStatus) WithPretty() func(*SlmGetStatusRequest) { return func(r *SlmGetStatusRequest) { diff --git a/esapi/api.xpack.slm.put_lifecycle.go b/esapi/api.xpack.slm.put_lifecycle.go index 0ed61d6aee..98049cae25 100644 --- a/esapi/api.xpack.slm.put_lifecycle.go +++ b/esapi/api.xpack.slm.put_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -24,6 +24,7 @@ import ( "io" "net/http" "strings" + "time" ) func newSlmPutLifecycleFunc(t Transport) SlmPutLifecycle { @@ -54,6 +55,9 @@ type SlmPutLifecycleRequest struct { PolicyID string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -99,6 +103,14 @@ func (r SlmPutLifecycleRequest) Do(providedCtx context.Context, transport Transp params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -191,6 +203,20 @@ func (f SlmPutLifecycle) WithBody(v io.Reader) func(*SlmPutLifecycleRequest) { } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmPutLifecycle) WithMasterTimeout(v time.Duration) func(*SlmPutLifecycleRequest) { + return func(r *SlmPutLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmPutLifecycle) WithTimeout(v time.Duration) func(*SlmPutLifecycleRequest) { + return func(r *SlmPutLifecycleRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f SlmPutLifecycle) WithPretty() func(*SlmPutLifecycleRequest) { return func(r *SlmPutLifecycleRequest) { diff --git a/esapi/api.xpack.slm.start.go b/esapi/api.xpack.slm.start.go index 09152462eb..10fc774b04 100644 --- a/esapi/api.xpack.slm.start.go +++ b/esapi/api.xpack.slm.start.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.slm.stop.go b/esapi/api.xpack.slm.stop.go index 3081996977..76508581b4 100644 --- a/esapi/api.xpack.slm.stop.go +++ b/esapi/api.xpack.slm.stop.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.sql.clear_cursor.go b/esapi/api.xpack.sql.clear_cursor.go index b9cde5ee1e..2994f995e2 100644 --- a/esapi/api.xpack.sql.clear_cursor.go +++ b/esapi/api.xpack.sql.clear_cursor.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.sql.delete_async.go b/esapi/api.xpack.sql.delete_async.go index d8ddf80acb..0a96a83f46 100644 --- a/esapi/api.xpack.sql.delete_async.go +++ b/esapi/api.xpack.sql.delete_async.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.sql.get_async.go b/esapi/api.xpack.sql.get_async.go index c6629044d8..70bfd07cbc 100644 --- a/esapi/api.xpack.sql.get_async.go +++ b/esapi/api.xpack.sql.get_async.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.sql.get_async_status.go b/esapi/api.xpack.sql.get_async_status.go index afb0d33885..ab41f7d735 100644 --- a/esapi/api.xpack.sql.get_async_status.go +++ b/esapi/api.xpack.sql.get_async_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.sql.query.go b/esapi/api.xpack.sql.query.go index 3c95c0360c..47227819b8 100644 --- a/esapi/api.xpack.sql.query.go +++ b/esapi/api.xpack.sql.query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.sql.translate.go b/esapi/api.xpack.sql.translate.go index 9370a8704e..676792cbb4 100644 --- a/esapi/api.xpack.sql.translate.go +++ b/esapi/api.xpack.sql.translate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ssl.certificates.go b/esapi/api.xpack.ssl.certificates.go index 30abe1ded3..014a712314 100644 --- a/esapi/api.xpack.ssl.certificates.go +++ b/esapi/api.xpack.ssl.certificates.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.text_structure.find_field_structure.go b/esapi/api.xpack.text_structure.find_field_structure.go index 1cf9d124b1..b4e2737d7f 100644 --- a/esapi/api.xpack.text_structure.find_field_structure.go +++ b/esapi/api.xpack.text_structure.find_field_structure.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi @@ -47,7 +47,7 @@ func newTextStructureFindFieldStructureFunc(t Transport) TextStructureFindFieldS // TextStructureFindFieldStructure - Finds the structure of a text field in an index. // // See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/find-field-structure.html. -type TextStructureFindFieldStructure func(index string, field string, o ...func(*TextStructureFindFieldStructureRequest)) (*Response, error) +type TextStructureFindFieldStructure func(field string, index string, o ...func(*TextStructureFindFieldStructureRequest)) (*Response, error) // TextStructureFindFieldStructureRequest configures the Text Structure Find Field Structure API request. type TextStructureFindFieldStructureRequest struct { diff --git a/esapi/api.xpack.text_structure.find_message_structure.go b/esapi/api.xpack.text_structure.find_message_structure.go index 2226a60e3d..3ee1c67374 100644 --- a/esapi/api.xpack.text_structure.find_message_structure.go +++ b/esapi/api.xpack.text_structure.find_message_structure.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.text_structure.find_structure.go b/esapi/api.xpack.text_structure.find_structure.go index 12610d9fd1..0ffe754740 100644 --- a/esapi/api.xpack.text_structure.find_structure.go +++ b/esapi/api.xpack.text_structure.find_structure.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.text_structure.test_grok_pattern.go b/esapi/api.xpack.text_structure.test_grok_pattern.go index 0110dfa0eb..c62754e1c9 100644 --- a/esapi/api.xpack.text_structure.test_grok_pattern.go +++ b/esapi/api.xpack.text_structure.test_grok_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.delete_transform.go b/esapi/api.xpack.transform.delete_transform.go index 57b66632b6..7ec39c0b60 100644 --- a/esapi/api.xpack.transform.delete_transform.go +++ b/esapi/api.xpack.transform.delete_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.get_node_stats.go b/esapi/api.xpack.transform.get_node_stats.go index 3e646cfbd3..bc099dcb4b 100644 --- a/esapi/api.xpack.transform.get_node_stats.go +++ b/esapi/api.xpack.transform.get_node_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.get_transform.go b/esapi/api.xpack.transform.get_transform.go index e37c22f26c..a51b895be2 100644 --- a/esapi/api.xpack.transform.get_transform.go +++ b/esapi/api.xpack.transform.get_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.get_transform_stats.go b/esapi/api.xpack.transform.get_transform_stats.go index 378ec6c159..799e6bab9b 100644 --- a/esapi/api.xpack.transform.get_transform_stats.go +++ b/esapi/api.xpack.transform.get_transform_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.preview_transform.go b/esapi/api.xpack.transform.preview_transform.go index d53d6ad745..c8b0a4531f 100644 --- a/esapi/api.xpack.transform.preview_transform.go +++ b/esapi/api.xpack.transform.preview_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.put_transform.go b/esapi/api.xpack.transform.put_transform.go index e66b3994fc..d429573427 100644 --- a/esapi/api.xpack.transform.put_transform.go +++ b/esapi/api.xpack.transform.put_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.reset_transform.go b/esapi/api.xpack.transform.reset_transform.go index 61db94fe58..5c17688f10 100644 --- a/esapi/api.xpack.transform.reset_transform.go +++ b/esapi/api.xpack.transform.reset_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.schedule_now_transform.go b/esapi/api.xpack.transform.schedule_now_transform.go index a6a39e47d3..68d97fde91 100644 --- a/esapi/api.xpack.transform.schedule_now_transform.go +++ b/esapi/api.xpack.transform.schedule_now_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.start_transform.go b/esapi/api.xpack.transform.start_transform.go index 441ced3b94..d72c9378be 100644 --- a/esapi/api.xpack.transform.start_transform.go +++ b/esapi/api.xpack.transform.start_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.stop_transform.go b/esapi/api.xpack.transform.stop_transform.go index 02816a03be..51a9c10cf6 100644 --- a/esapi/api.xpack.transform.stop_transform.go +++ b/esapi/api.xpack.transform.stop_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.update_transform.go b/esapi/api.xpack.transform.update_transform.go index 56916b5b46..7f09703999 100644 --- a/esapi/api.xpack.transform.update_transform.go +++ b/esapi/api.xpack.transform.update_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.upgrade_transforms.go b/esapi/api.xpack.transform.upgrade_transforms.go index 4a409c7f25..43916a2a74 100644 --- a/esapi/api.xpack.transform.upgrade_transforms.go +++ b/esapi/api.xpack.transform.upgrade_transforms.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.ack_watch.go b/esapi/api.xpack.watcher.ack_watch.go index 5368d8816a..49af6dc894 100644 --- a/esapi/api.xpack.watcher.ack_watch.go +++ b/esapi/api.xpack.watcher.ack_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.activate_watch.go b/esapi/api.xpack.watcher.activate_watch.go index 7895dab8c0..58ad090fb8 100644 --- a/esapi/api.xpack.watcher.activate_watch.go +++ b/esapi/api.xpack.watcher.activate_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.deactivate_watch.go b/esapi/api.xpack.watcher.deactivate_watch.go index a2ce74fc75..30409d4451 100644 --- a/esapi/api.xpack.watcher.deactivate_watch.go +++ b/esapi/api.xpack.watcher.deactivate_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.delete_watch.go b/esapi/api.xpack.watcher.delete_watch.go index 6f51851f7c..1bc17e659a 100644 --- a/esapi/api.xpack.watcher.delete_watch.go +++ b/esapi/api.xpack.watcher.delete_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.execute_watch.go b/esapi/api.xpack.watcher.execute_watch.go index 13487baab2..a0e9a26f19 100644 --- a/esapi/api.xpack.watcher.execute_watch.go +++ b/esapi/api.xpack.watcher.execute_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.get_settings.go b/esapi/api.xpack.watcher.get_settings.go index 5504455e1b..c82ced233d 100644 --- a/esapi/api.xpack.watcher.get_settings.go +++ b/esapi/api.xpack.watcher.get_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.get_watch.go b/esapi/api.xpack.watcher.get_watch.go index 0352f2c0e6..70adb67dec 100644 --- a/esapi/api.xpack.watcher.get_watch.go +++ b/esapi/api.xpack.watcher.get_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.put_watch.go b/esapi/api.xpack.watcher.put_watch.go index d4cacf7395..405e61ccb8 100644 --- a/esapi/api.xpack.watcher.put_watch.go +++ b/esapi/api.xpack.watcher.put_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.query_watches.go b/esapi/api.xpack.watcher.query_watches.go index 5808d4e743..64acae65e2 100644 --- a/esapi/api.xpack.watcher.query_watches.go +++ b/esapi/api.xpack.watcher.query_watches.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.start.go b/esapi/api.xpack.watcher.start.go index 277a484c9a..bcee0234f7 100644 --- a/esapi/api.xpack.watcher.start.go +++ b/esapi/api.xpack.watcher.start.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.stats.go b/esapi/api.xpack.watcher.stats.go index 6850110ea1..2e654c8097 100644 --- a/esapi/api.xpack.watcher.stats.go +++ b/esapi/api.xpack.watcher.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.stop.go b/esapi/api.xpack.watcher.stop.go index d6ca0dc919..9ec6730942 100644 --- a/esapi/api.xpack.watcher.stop.go +++ b/esapi/api.xpack.watcher.stop.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.update_settings.go b/esapi/api.xpack.watcher.update_settings.go index c5ec219b2b..f40d333202 100644 --- a/esapi/api.xpack.watcher.update_settings.go +++ b/esapi/api.xpack.watcher.update_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.xpack.info.go b/esapi/api.xpack.xpack.info.go index f44342be90..44693f6743 100644 --- a/esapi/api.xpack.xpack.info.go +++ b/esapi/api.xpack.xpack.info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.xpack.usage.go b/esapi/api.xpack.xpack.usage.go index f6943cd208..3d3bf068f2 100644 --- a/esapi/api.xpack.xpack.usage.go +++ b/esapi/api.xpack.xpack.usage.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 9.0.0: DO NOT EDIT package esapi diff --git a/esapi/test/go.mod b/esapi/test/go.mod index a12b3bc039..8b569f2176 100644 --- a/esapi/test/go.mod +++ b/esapi/test/go.mod @@ -7,7 +7,7 @@ toolchain go1.22.0 replace github.com/elastic/go-elasticsearch/v8 => ../../ require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 + github.com/elastic/elastic-transport-go/v8 v8.6.1 github.com/elastic/go-elasticsearch/v8 v8.0.0-20210817150010-57d659deaca7 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/internal/build/cmd/generate/commands/gensource/model.go b/internal/build/cmd/generate/commands/gensource/model.go index 38e25f75c7..e827f12e12 100644 --- a/internal/build/cmd/generate/commands/gensource/model.go +++ b/internal/build/cmd/generate/commands/gensource/model.go @@ -43,7 +43,6 @@ func init() { } // NewEndpoint creates a new API endpoint. -// func NewEndpoint(f io.Reader) (*Endpoint, error) { var endpoint Endpoint var spec map[string]Endpoint @@ -101,6 +100,20 @@ func NewEndpoint(f io.Reader) (*Endpoint, error) { endpoint.URL.Params = endpoint.Params } + // These are implemented statically. + paramSkipList := map[string]bool{ + "human": true, + "pretty": true, + "error_trace": true, + "filter_path": true, + } + for name, _ := range endpoint.Params { + // remove from endpoint if it's in the skip list + if _, ok := paramSkipList[name]; ok { + delete(endpoint.Params, name) + } + } + if fpath, ok := f.(*os.File); ok { if strings.Contains(fpath.Name(), "x-pack") { endpoint.Type = "xpack" @@ -221,7 +234,6 @@ func NewEndpoint(f io.Reader) (*Endpoint, error) { } // Endpoint represents an API endpoint. -// type Endpoint struct { Name string `json:"-"` Type string `json:"-"` @@ -239,7 +251,6 @@ type Endpoint struct { } // URL represents API endpoint URL. -// type URL struct { Endpoint *Endpoint `json:"-"` @@ -268,7 +279,6 @@ type Path struct { } // Part represents part of the API endpoint URL. -// type Part struct { Endpoint *Endpoint `json:"-"` @@ -283,7 +293,6 @@ type Part struct { } // Param represents API endpoint parameter. -// type Param struct { Endpoint *Endpoint `json:"-"` @@ -297,7 +306,6 @@ type Param struct { } // Body represents API endpoint body. -// type Body struct { Endpoint *Endpoint `json:"-"` @@ -307,7 +315,6 @@ type Body struct { } // MethodArgument represents a method argument for API endpoint. -// type MethodArgument struct { Endpoint *Endpoint @@ -320,14 +327,12 @@ type MethodArgument struct { } // Namespace returns the API endpoint namespace. -// func (e *Endpoint) Namespace() string { ep := strings.Split(e.Name, ".") return utils.NameToGo(ep[0]) } // MethodName returns the API endpoint method name. -// func (e *Endpoint) MethodName() string { ep := strings.Split(e.Name, ".") ep = append(ep[:0], ep[1:]...) @@ -344,13 +349,11 @@ func (e *Endpoint) MethodName() string { } // MethodWithNamespace returns the API endpoint method name with namespace. -// func (e *Endpoint) MethodWithNamespace() string { return utils.APIToGo(e.Name) } // HumanMethodWithNamespace returns the API endpoint method name in humanized form. -// func (e *Endpoint) HumanMethodWithNamespace() string { var ( src = e.MethodWithNamespace() @@ -371,7 +374,6 @@ func (e *Endpoint) HumanMethodWithNamespace() string { } // RequiredArguments return the list of required method arguments. -// func (e *Endpoint) RequiredArguments() []MethodArgument { var args = make([]MethodArgument, 0) var prominentArgs = []string{ @@ -468,7 +470,6 @@ func (e *Endpoint) RequiredArguments() []MethodArgument { } // GoName returns a Go name for part. -// func (p *Part) GoName() string { switch { case p.Name == "context": @@ -479,13 +480,11 @@ func (p *Part) GoName() string { } // GoType returns a Go type for part. -// func (p *Part) GoType(comment ...bool) string { return utils.TypeToGo(p.Type) } // GoName returns a Go name for parameter. -// func (p *Param) GoName() string { switch { case p.Name == "context": @@ -498,7 +497,6 @@ func (p *Param) GoName() string { } // GoType returns a Go type for parameter. -// func (p *Param) GoType(comment ...bool) string { if f := (&Generator{Endpoint: p.Endpoint}).GetOverride("polymorphic-param", p.Endpoint.Name); f != nil { if out := f(p.Endpoint, p.Name); out != "" { @@ -509,13 +507,11 @@ func (p *Param) GoType(comment ...bool) string { } // GoName returns a Go name for method argument. -// func (p *MethodArgument) GoName() string { return utils.NameToGo(p.Name, p.Endpoint.MethodWithNamespace()) } // GoType returns a Go type for method argument. -// func (p *MethodArgument) GoType(comment ...bool) string { return utils.TypeToGo(p.Type) } diff --git a/internal/build/cmd/generate/commands/gentests/skips.go b/internal/build/cmd/generate/commands/gentests/skips.go index c5b9255d27..141d0aacb2 100644 --- a/internal/build/cmd/generate/commands/gentests/skips.go +++ b/internal/build/cmd/generate/commands/gentests/skips.go @@ -67,7 +67,15 @@ var skipFiles = []string{ ".*esql\\/.*.yml", "deprecation/10_basic.yml", // incompatible test generation "search/520_fetch_fields.yml", // disabled for inconsistency + "search.vectors/90_sparse_vector.yml", + "indices.create/21_synthetic_source_stored.yml", + "indices.create/20_synthetic_source.yml", + "indices.recovery/20_synthetic_source.yml", "ingest_geoip/20_geoip_processor.yml", + "range/20_synthetic_source.yml", + "search/600_flattened_ignore_above.yml", + "search/540_ignore_above_synthetic_source.yml", + "update/100_synthetic_source.yml", } // TODO: Comments into descriptions for `Skip()` @@ -465,6 +473,7 @@ nodes.stats/11_indices_metrics.yml: - Metric - blank for indices shards - Metric - _all for indices shards - indices shards total count test + - indices mappings exact count test for indices level - Lucene segment level fields stats data_stream/10_data_stream_resolvability.yml: @@ -530,12 +539,22 @@ get/100_synthetic_source.yml: - non-indexed dense vectors - fields with ignore_malformed - flattened field with ignore_above + - fetch without refresh also produces synthetic source + - doc values keyword with ignore_above + - stored keyword with ignore_above + - flattened field + - flattened field with ignore_above and arrays indices.stats/70_write_load.yml: - Write load average is tracked at shard level search/400_synthetic_source.yml: - stored keyword without sibling fields + - doc values keyword with ignore_above + - stored keyword with ignore_above + +search/140_pre_filter_search_shards.yml: + - pre_filter_shard_size with shards that have no hit health/10_usage.yml: - Usage stats on the health API @@ -562,6 +581,7 @@ logsdb/10_settings.yml: - override sort order settings - override sort missing settings - override sort mode settings + - default ignore dynamic beyond limit and default sorting with hostname # expects map, got nil search/520_fetch_fields.yml: @@ -571,29 +591,46 @@ search/520_fetch_fields.yml: spatial/140_synthetic_source.yml: - point -search.suggest/20_phrase.yml: - - breaks ties by sorting terms +analysis-common/40_token_filters.yml: + - stemmer_override file access -search/600_flattened_ignore_above.yml: - - flattened ignore_above single-value field +cluster.stats/30_ccs_stats.yml: + - cross-cluster search stats search -ingest_geoip/50_ip_lookup_processor.yml: - - Test ip_location processor with defaults +cluster.stats/40_source_modes.yml: + - test source modes -ingest_geoip/60_ip_location_databases.yml: - - Test adding, getting, and removing ip location databases +index/92_metrics_auto_subobjects.yml: + - Metrics object indexing with synthetic source -ingest_geoip/30_geoip_stats.yml: - - Test geoip stats +index/91_metrics_no_subobjects.yml: + - Metrics object indexing with synthetic source ingest_geoip/40_geoip_databases.yml: - Test adding, getting, and removing geoip databases -indices.create/20_synthetic_source.yml: - - synthetic_source with disabled doc_values - - synthetic_source with copy_to - - disabled object contains array +ingest_geoip/30_geoip_stats.yml: + - Test geoip stats -cluster.stats/30_ccs_stats.yml: - - cross-cluster search stats search +ingest_geoip/60_ip_location_databases.yml: + - Test adding, getting, and removing ip location databases + +ingest_geoip/50_ip_lookup_processor.yml: + - Test ip_location processor with defaults + +logsdb/20_source_mapping.yml: + - synthetic _source is default + +search.suggest/20_phrase.yml: + - breaks ties by sorting terms + +migrate/30_create_from.yml: + - Test create_from with remove_index_blocks default of true + +tsdb/25_id_generation.yml: + - delete over _bulk + +tsdb/90_unsupported_operations.yml: + - index with routing over _bulk + - update over _bulk ` diff --git a/internal/build/go.sum b/internal/build/go.sum index b52605bbac..ab37d788ad 100644 --- a/internal/build/go.sum +++ b/internal/build/go.sum @@ -6,6 +6,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E= github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -20,22 +22,14 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/internal/version/version.go b/internal/version/version.go index 1adc3470fd..502415de9c 100644 --- a/internal/version/version.go +++ b/internal/version/version.go @@ -18,4 +18,4 @@ package version // Client returns the client version as a string. -const Client = "8.16.0-SNAPSHOT" +const Client = "9.0.0-SNAPSHOT" diff --git a/typedapi/api._.go b/typedapi/api._.go index caa987cb30..34b284a2ed 100644 --- a/typedapi/api._.go +++ b/typedapi/api._.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package typedapi @@ -95,14 +95,19 @@ import ( connector_put "github.com/elastic/go-elasticsearch/v8/typedapi/connector/put" connector_secret_post "github.com/elastic/go-elasticsearch/v8/typedapi/connector/secretpost" connector_sync_job_cancel "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjobcancel" + connector_sync_job_check_in "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjobcheckin" + connector_sync_job_claim "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjobclaim" connector_sync_job_delete "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjobdelete" + connector_sync_job_error "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjoberror" connector_sync_job_get "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjobget" connector_sync_job_list "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjoblist" connector_sync_job_post "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjobpost" + connector_sync_job_update_stats "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjobupdatestats" connector_update_active_filtering "github.com/elastic/go-elasticsearch/v8/typedapi/connector/updateactivefiltering" connector_update_api_key_id "github.com/elastic/go-elasticsearch/v8/typedapi/connector/updateapikeyid" connector_update_configuration "github.com/elastic/go-elasticsearch/v8/typedapi/connector/updateconfiguration" connector_update_error "github.com/elastic/go-elasticsearch/v8/typedapi/connector/updateerror" + connector_update_features "github.com/elastic/go-elasticsearch/v8/typedapi/connector/updatefeatures" connector_update_filtering "github.com/elastic/go-elasticsearch/v8/typedapi/connector/updatefiltering" connector_update_filtering_validation "github.com/elastic/go-elasticsearch/v8/typedapi/connector/updatefilteringvalidation" connector_update_index_name "github.com/elastic/go-elasticsearch/v8/typedapi/connector/updateindexname" @@ -169,6 +174,9 @@ import ( eql_get_status "github.com/elastic/go-elasticsearch/v8/typedapi/eql/getstatus" eql_search "github.com/elastic/go-elasticsearch/v8/typedapi/eql/search" esql_async_query "github.com/elastic/go-elasticsearch/v8/typedapi/esql/asyncquery" + esql_async_query_delete "github.com/elastic/go-elasticsearch/v8/typedapi/esql/asyncquerydelete" + esql_async_query_get "github.com/elastic/go-elasticsearch/v8/typedapi/esql/asyncqueryget" + esql_async_query_stop "github.com/elastic/go-elasticsearch/v8/typedapi/esql/asyncquerystop" esql_query "github.com/elastic/go-elasticsearch/v8/typedapi/esql/query" features_get_features "github.com/elastic/go-elasticsearch/v8/typedapi/features/getfeatures" features_reset_features "github.com/elastic/go-elasticsearch/v8/typedapi/features/resetfeatures" @@ -190,11 +198,13 @@ import ( ilm_stop "github.com/elastic/go-elasticsearch/v8/typedapi/ilm/stop" indices_add_block "github.com/elastic/go-elasticsearch/v8/typedapi/indices/addblock" indices_analyze "github.com/elastic/go-elasticsearch/v8/typedapi/indices/analyze" + indices_cancel_migrate_reindex "github.com/elastic/go-elasticsearch/v8/typedapi/indices/cancelmigratereindex" indices_clear_cache "github.com/elastic/go-elasticsearch/v8/typedapi/indices/clearcache" indices_clone "github.com/elastic/go-elasticsearch/v8/typedapi/indices/clone" indices_close "github.com/elastic/go-elasticsearch/v8/typedapi/indices/close" indices_create "github.com/elastic/go-elasticsearch/v8/typedapi/indices/create" indices_create_data_stream "github.com/elastic/go-elasticsearch/v8/typedapi/indices/createdatastream" + indices_create_from "github.com/elastic/go-elasticsearch/v8/typedapi/indices/createfrom" indices_data_streams_stats "github.com/elastic/go-elasticsearch/v8/typedapi/indices/datastreamsstats" indices_delete "github.com/elastic/go-elasticsearch/v8/typedapi/indices/delete" indices_delete_alias "github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletealias" @@ -215,12 +225,15 @@ import ( indices_get "github.com/elastic/go-elasticsearch/v8/typedapi/indices/get" indices_get_alias "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getalias" indices_get_data_lifecycle "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatalifecycle" + indices_get_data_lifecycle_stats "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatalifecyclestats" indices_get_data_stream "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatastream" indices_get_field_mapping "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getfieldmapping" indices_get_index_template "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getindextemplate" indices_get_mapping "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getmapping" + indices_get_migrate_reindex_status "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getmigratereindexstatus" indices_get_settings "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getsettings" indices_get_template "github.com/elastic/go-elasticsearch/v8/typedapi/indices/gettemplate" + indices_migrate_reindex "github.com/elastic/go-elasticsearch/v8/typedapi/indices/migratereindex" indices_migrate_to_data_stream "github.com/elastic/go-elasticsearch/v8/typedapi/indices/migratetodatastream" indices_modify_data_stream "github.com/elastic/go-elasticsearch/v8/typedapi/indices/modifydatastream" indices_open "github.com/elastic/go-elasticsearch/v8/typedapi/indices/open" @@ -244,20 +257,30 @@ import ( indices_simulate_template "github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulatetemplate" indices_split "github.com/elastic/go-elasticsearch/v8/typedapi/indices/split" indices_stats "github.com/elastic/go-elasticsearch/v8/typedapi/indices/stats" - indices_unfreeze "github.com/elastic/go-elasticsearch/v8/typedapi/indices/unfreeze" indices_update_aliases "github.com/elastic/go-elasticsearch/v8/typedapi/indices/updatealiases" indices_validate_query "github.com/elastic/go-elasticsearch/v8/typedapi/indices/validatequery" + inference_chat_completion_unified "github.com/elastic/go-elasticsearch/v8/typedapi/inference/chatcompletionunified" + inference_completion "github.com/elastic/go-elasticsearch/v8/typedapi/inference/completion" inference_delete "github.com/elastic/go-elasticsearch/v8/typedapi/inference/delete" inference_get "github.com/elastic/go-elasticsearch/v8/typedapi/inference/get" - inference_inference "github.com/elastic/go-elasticsearch/v8/typedapi/inference/inference" inference_put "github.com/elastic/go-elasticsearch/v8/typedapi/inference/put" + inference_put_openai "github.com/elastic/go-elasticsearch/v8/typedapi/inference/putopenai" + inference_put_watsonx "github.com/elastic/go-elasticsearch/v8/typedapi/inference/putwatsonx" + inference_rerank "github.com/elastic/go-elasticsearch/v8/typedapi/inference/rerank" + inference_sparse_embedding "github.com/elastic/go-elasticsearch/v8/typedapi/inference/sparseembedding" + inference_stream_completion "github.com/elastic/go-elasticsearch/v8/typedapi/inference/streamcompletion" + inference_text_embedding "github.com/elastic/go-elasticsearch/v8/typedapi/inference/textembedding" + inference_update "github.com/elastic/go-elasticsearch/v8/typedapi/inference/update" ingest_delete_geoip_database "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/deletegeoipdatabase" + ingest_delete_ip_location_database "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/deleteiplocationdatabase" ingest_delete_pipeline "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/deletepipeline" ingest_geo_ip_stats "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/geoipstats" ingest_get_geoip_database "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/getgeoipdatabase" + ingest_get_ip_location_database "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/getiplocationdatabase" ingest_get_pipeline "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/getpipeline" ingest_processor_grok "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/processorgrok" ingest_put_geoip_database "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putgeoipdatabase" + ingest_put_ip_location_database "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putiplocationdatabase" ingest_put_pipeline "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putpipeline" ingest_simulate "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/simulate" license_delete "github.com/elastic/go-elasticsearch/v8/typedapi/license/delete" @@ -383,8 +406,10 @@ import ( search_application_get "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/get" search_application_get_behavioral_analytics "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/getbehavioralanalytics" search_application_list "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/list" + search_application_post_behavioral_analytics_event "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/postbehavioralanalyticsevent" search_application_put "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/put" search_application_put_behavioral_analytics "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/putbehavioralanalytics" + search_application_render_query "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/renderquery" search_application_search "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/search" security_activate_user_profile "github.com/elastic/go-elasticsearch/v8/typedapi/security/activateuserprofile" security_authenticate "github.com/elastic/go-elasticsearch/v8/typedapi/security/authenticate" @@ -400,6 +425,7 @@ import ( security_create_api_key "github.com/elastic/go-elasticsearch/v8/typedapi/security/createapikey" security_create_cross_cluster_api_key "github.com/elastic/go-elasticsearch/v8/typedapi/security/createcrossclusterapikey" security_create_service_token "github.com/elastic/go-elasticsearch/v8/typedapi/security/createservicetoken" + security_delegate_pki "github.com/elastic/go-elasticsearch/v8/typedapi/security/delegatepki" security_delete_privileges "github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteprivileges" security_delete_role "github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterole" security_delete_role_mapping "github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterolemapping" @@ -452,6 +478,7 @@ import ( shutdown_delete_node "github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/deletenode" shutdown_get_node "github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/getnode" shutdown_put_node "github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/putnode" + simulate_ingest "github.com/elastic/go-elasticsearch/v8/typedapi/simulate/ingest" slm_delete_lifecycle "github.com/elastic/go-elasticsearch/v8/typedapi/slm/deletelifecycle" slm_execute_lifecycle "github.com/elastic/go-elasticsearch/v8/typedapi/slm/executelifecycle" slm_execute_retention "github.com/elastic/go-elasticsearch/v8/typedapi/slm/executeretention" @@ -469,6 +496,7 @@ import ( snapshot_delete_repository "github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/deleterepository" snapshot_get "github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/get" snapshot_get_repository "github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/getrepository" + snapshot_repository_analyze "github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/repositoryanalyze" snapshot_repository_verify_integrity "github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/repositoryverifyintegrity" snapshot_restore "github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/restore" snapshot_status "github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/status" @@ -532,7 +560,7 @@ type AsyncSearch struct { // specific async search is restricted to: the authenticated user that submitted // the original search request; users that have the `cancel_task` cluster // privilege. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit Delete async_search_delete.NewDelete // Get async search results. // @@ -540,15 +568,18 @@ type AsyncSearch struct { // If the Elasticsearch security features are enabled, access to the results of // a specific async search is restricted to the user or API key that submitted // it. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit Get async_search_get.NewGet // Get the async search status. // // Get the status of a previously submitted async search request given its // identifier, without retrieving search results. - // If the Elasticsearch security features are enabled, use of this API is - // restricted to the `monitoring_user` role. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html + // If the Elasticsearch security features are enabled, the access to the status + // of a specific async search is restricted to: + // + // * The user or API key that submitted the original async search request. + // * Users that have the `monitor` cluster privilege or greater privileges. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit Status async_search_status.NewStatus // Run an async search. // @@ -563,7 +594,7 @@ type AsyncSearch struct { // response larger than 10Mb and an attempt to do this results in an error. // The maximum allowed size for a stored async search response can be set by // changing the `search.max_async_search_response_size` cluster level setting. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit Submit async_search_submit.NewSubmit } @@ -573,7 +604,7 @@ type Autoscaling struct { // NOTE: This feature is designed for indirect use by Elasticsearch Service, // Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not // supported. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-delete-autoscaling-policy.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy DeleteAutoscalingPolicy autoscaling_delete_autoscaling_policy.NewDeleteAutoscalingPolicy // Get the autoscaling capacity. // @@ -598,21 +629,21 @@ type Autoscaling struct { // how and why autoscaling determined a certain capacity was required. // This information is provided for diagnosis only. // Do not use this information to make autoscaling decisions. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-capacity.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity GetAutoscalingCapacity autoscaling_get_autoscaling_capacity.NewGetAutoscalingCapacity // Get an autoscaling policy. // // NOTE: This feature is designed for indirect use by Elasticsearch Service, // Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not // supported. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-capacity.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity GetAutoscalingPolicy autoscaling_get_autoscaling_policy.NewGetAutoscalingPolicy // Create or update an autoscaling policy. // // NOTE: This feature is designed for indirect use by Elasticsearch Service, // Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not // supported. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-put-autoscaling-policy.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy PutAutoscalingPolicy autoscaling_put_autoscaling_policy.NewPutAutoscalingPolicy } @@ -625,54 +656,63 @@ type Capabilities struct { type Cat struct { // Get aliases. - // Retrieves the cluster’s index aliases, including filter and routing - // information. - // The API does not return data stream aliases. // - // CAT APIs are only intended for human consumption using the command line or - // the Kibana console. They are not intended for use by applications. For - // application consumption, use the aliases API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-alias.html + // Get the cluster's index aliases, including filter and routing information. + // This API does not return data stream aliases. + // + // IMPORTANT: CAT APIs are only intended for human consumption using the command + // line or the Kibana console. They are not intended for use by applications. + // For application consumption, use the aliases API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases Aliases cat_aliases.NewAliases - // Provides a snapshot of the number of shards allocated to each data node and - // their disk space. - // IMPORTANT: cat APIs are only intended for human consumption using the command + // Get shard allocation information. + // + // Get a snapshot of the number of shards allocated to each data node and their + // disk space. + // + // IMPORTANT: CAT APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-allocation.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation Allocation cat_allocation.NewAllocation // Get component templates. - // Returns information about component templates in a cluster. + // + // Get information about component templates in a cluster. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. // - // CAT APIs are only intended for human consumption using the command line or - // Kibana console. + // IMPORTANT: CAT APIs are only intended for human consumption using the command + // line or Kibana console. // They are not intended for use by applications. For application consumption, // use the get component template API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-component-templates.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates ComponentTemplates cat_component_templates.NewComponentTemplates // Get a document count. - // Provides quick access to a document count for a data stream, an index, or an + // + // Get quick access to a document count for a data stream, an index, or an // entire cluster. // The document count only includes live documents, not deleted documents which // have not yet been removed by the merge process. // - // CAT APIs are only intended for human consumption using the command line or - // Kibana console. + // IMPORTANT: CAT APIs are only intended for human consumption using the command + // line or Kibana console. // They are not intended for use by applications. For application consumption, // use the count API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-count.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count Count cat_count.NewCount - // Returns the amount of heap memory currently used by the field data cache on - // every data node in the cluster. + // Get field data cache information. + // + // Get the amount of heap memory currently used by the field data cache on every + // data node in the cluster. + // // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. // They are not intended for use by applications. For application consumption, // use the nodes stats API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-fielddata.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata Fielddata cat_fielddata.NewFielddata - // Returns the health status of a cluster, similar to the cluster health API. - // IMPORTANT: cat APIs are only intended for human consumption using the command + // Get the cluster health status. + // + // IMPORTANT: CAT APIs are only intended for human consumption using the command // line or Kibana console. // They are not intended for use by applications. For application consumption, // use the cluster health API. @@ -686,14 +726,16 @@ type Cat struct { // nodes. // You also can use the API to track the recovery of a large cluster over a // longer period of time. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-health.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health Health cat_health.NewHealth // Get CAT help. - // Returns help for the CAT APIs. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat.html + // + // Get help for the CAT APIs. + // https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat Help cat_help.NewHelp // Get index information. - // Returns high-level information about indices in a cluster, including backing + // + // Get high-level information about indices in a cluster, including backing // indices for data streams. // // Use this request to get the following information for each index in a @@ -714,81 +756,98 @@ type Cat struct { // Kibana console. // They are not intended for use by applications. For application consumption, // use an index endpoint. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-indices.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices Indices cat_indices.NewIndices - // Returns information about the master node, including the ID, bound IP - // address, and name. + // Get master node information. + // + // Get information about the master node, including the ID, bound IP address, + // and name. + // // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-master.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master Master cat_master.NewMaster // Get data frame analytics jobs. - // Returns configuration and usage information about data frame analytics jobs. // - // CAT APIs are only intended for human consumption using the Kibana + // Get configuration and usage information about data frame analytics jobs. + // + // IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get data frame analytics jobs statistics // API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-dfanalytics.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics MlDataFrameAnalytics cat_ml_data_frame_analytics.NewMlDataFrameAnalytics // Get datafeeds. - // Returns configuration and usage information about datafeeds. + // + // Get configuration and usage information about datafeeds. // This API returns a maximum of 10,000 datafeeds. // If the Elasticsearch security features are enabled, you must have // `monitor_ml`, `monitor`, `manage_ml`, or `manage` // cluster privileges to use this API. // - // CAT APIs are only intended for human consumption using the Kibana + // IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get datafeed statistics API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-datafeeds.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds MlDatafeeds cat_ml_datafeeds.NewMlDatafeeds // Get anomaly detection jobs. - // Returns configuration and usage information for anomaly detection jobs. + // + // Get configuration and usage information for anomaly detection jobs. // This API returns a maximum of 10,000 jobs. // If the Elasticsearch security features are enabled, you must have // `monitor_ml`, // `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. // - // CAT APIs are only intended for human consumption using the Kibana + // IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get anomaly detection job statistics API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-anomaly-detectors.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs MlJobs cat_ml_jobs.NewMlJobs // Get trained models. - // Returns configuration and usage information about inference trained models. // - // CAT APIs are only intended for human consumption using the Kibana + // Get configuration and usage information about inference trained models. + // + // IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get trained models statistics API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-trained-model.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models MlTrainedModels cat_ml_trained_models.NewMlTrainedModels - // Returns information about custom node attributes. + // Get node attribute information. + // + // Get information about custom node attributes. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodeattrs.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs Nodeattrs cat_nodeattrs.NewNodeattrs - // Returns information about the nodes in a cluster. + // Get node information. + // + // Get information about the nodes in a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodes.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes Nodes cat_nodes.NewNodes - // Returns cluster-level changes that have not yet been executed. + // Get pending task information. + // + // Get information about cluster-level changes that have not yet taken effect. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the pending cluster tasks API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-pending-tasks.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks PendingTasks cat_pending_tasks.NewPendingTasks - // Returns a list of plugins running on each node of a cluster. + // Get plugin information. + // + // Get a list of plugins running on each node of a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-plugins.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins Plugins cat_plugins.NewPlugins - // Returns information about ongoing and completed shard recoveries. + // Get shard recovery information. + // + // Get information about ongoing and completed shard recoveries. // Shard recovery is the process of initializing a shard copy, such as restoring // a primary shard from a snapshot or syncing a replica shard from a primary // shard. When a shard recovery completes, the recovered shard is available for @@ -798,173 +857,330 @@ type Cat struct { // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the index recovery API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-recovery.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery Recovery cat_recovery.NewRecovery - // Returns the snapshot repositories for a cluster. + // Get snapshot repository information. + // + // Get a list of snapshot repositories for a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the get snapshot repository API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-repositories.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories Repositories cat_repositories.NewRepositories - // Returns low-level information about the Lucene segments in index shards. + // Get segment information. + // + // Get low-level information about the Lucene segments in index shards. // For data streams, the API returns information about the backing indices. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the index segments API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-segments.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments Segments cat_segments.NewSegments - // Returns information about the shards in a cluster. + // Get shard information. + // + // Get information about the shards in a cluster. // For data streams, the API returns information about the backing indices. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-shards.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards Shards cat_shards.NewShards - // Returns information about the snapshots stored in one or more repositories. + // Get snapshot information. + // + // Get information about the snapshots stored in one or more repositories. // A snapshot is a backup of an index or running Elasticsearch cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the get snapshot API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-snapshots.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots Snapshots cat_snapshots.NewSnapshots - // Returns information about tasks currently executing in the cluster. + // Get task information. + // + // Get information about tasks currently running in the cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the task management API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks Tasks cat_tasks.NewTasks - // Returns information about index templates in a cluster. + // Get index template information. + // + // Get information about the index templates in a cluster. // You can use index templates to apply index settings and field mappings to new // indices at creation. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the get index template API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-templates.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates Templates cat_templates.NewTemplates - // Returns thread pool statistics for each node in a cluster. + // Get thread pool statistics. + // + // Get thread pool statistics for each node in a cluster. // Returned information includes all built-in thread pools and custom thread // pools. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-thread-pool.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool ThreadPool cat_thread_pool.NewThreadPool - // Get transforms. - // Returns configuration and usage information about transforms. + // Get transform information. + // + // Get configuration and usage information about transforms. // // CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get transform statistics API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-transforms.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms Transforms cat_transforms.NewTransforms } type Ccr struct { - // Deletes auto-follow patterns. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-delete-auto-follow-pattern.html + // Delete auto-follow patterns. + // + // Delete a collection of cross-cluster replication auto-follow patterns. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern DeleteAutoFollowPattern ccr_delete_auto_follow_pattern.NewDeleteAutoFollowPattern - // Creates a new follower index configured to follow the referenced leader - // index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-follow.html + // Create a follower. + // Create a cross-cluster replication follower index that follows a specific + // leader index. + // When the API returns, the follower index exists and cross-cluster replication + // starts replicating operations from the leader index to the follower index. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow Follow ccr_follow.NewFollow - // Retrieves information about all follower indices, including parameters and - // status for each follower index - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-info.html + // Get follower information. + // + // Get information about all cross-cluster replication follower indices. + // For example, the results include follower index names, leader index names, + // replication options, and whether the follower indices are active or paused. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info FollowInfo ccr_follow_info.NewFollowInfo - // Retrieves follower stats. return shard-level stats about the following tasks - // associated with each shard for the specified indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-stats.html + // Get follower stats. + // + // Get cross-cluster replication follower stats. + // The API returns shard-level stats about the "following tasks" associated with + // each shard for the specified indices. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats FollowStats ccr_follow_stats.NewFollowStats - // Removes the follower retention leases from the leader. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-forget-follower.html + // Forget a follower. + // Remove the cross-cluster replication follower retention leases from the + // leader. + // + // A following index takes out retention leases on its leader index. + // These leases are used to increase the likelihood that the shards of the + // leader index retain the history of operations that the shards of the + // following index need to run replication. + // When a follower index is converted to a regular index by the unfollow API + // (either by directly calling the API or by index lifecycle management tasks), + // these leases are removed. + // However, removal of the leases can fail, for example when the remote cluster + // containing the leader index is unavailable. + // While the leases will eventually expire on their own, their extended + // existence can cause the leader index to hold more history than necessary and + // prevent index lifecycle management from performing some operations on the + // leader index. + // This API exists to enable manually removing the leases when the unfollow API + // is unable to do so. + // + // NOTE: This API does not stop replication by a following index. If you use + // this API with a follower index that is still actively following, the + // following index will add back retention leases on the leader. + // The only purpose of this API is to handle the case of failure to remove the + // following retention leases after the unfollow API is invoked. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower ForgetFollower ccr_forget_follower.NewForgetFollower - // Gets configured auto-follow patterns. Returns the specified auto-follow - // pattern collection. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-auto-follow-pattern.html + // Get auto-follow patterns. + // + // Get cross-cluster replication auto-follow patterns. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1 GetAutoFollowPattern ccr_get_auto_follow_pattern.NewGetAutoFollowPattern - // Pauses an auto-follow pattern - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-pause-auto-follow-pattern.html + // Pause an auto-follow pattern. + // + // Pause a cross-cluster replication auto-follow pattern. + // When the API returns, the auto-follow pattern is inactive. + // New indices that are created on the remote cluster and match the auto-follow + // patterns are ignored. + // + // You can resume auto-following with the resume auto-follow pattern API. + // When it resumes, the auto-follow pattern is active again and automatically + // configures follower indices for newly created indices on the remote cluster + // that match its patterns. + // Remote indices that were created while the pattern was paused will also be + // followed, unless they have been deleted or closed in the interim. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern PauseAutoFollowPattern ccr_pause_auto_follow_pattern.NewPauseAutoFollowPattern - // Pauses a follower index. The follower index will not fetch any additional - // operations from the leader index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-pause-follow.html + // Pause a follower. + // + // Pause a cross-cluster replication follower index. + // The follower index will not fetch any additional operations from the leader + // index. + // You can resume following with the resume follower API. + // You can pause and resume a follower index to change the configuration of the + // following task. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow PauseFollow ccr_pause_follow.NewPauseFollow - // Creates a new named collection of auto-follow patterns against a specified - // remote cluster. Newly created indices on the remote cluster matching any of - // the specified patterns will be automatically configured as follower indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-auto-follow-pattern.html + // Create or update auto-follow patterns. + // Create a collection of cross-cluster replication auto-follow patterns for a + // remote cluster. + // Newly created indices on the remote cluster that match any of the patterns + // are automatically configured as follower indices. + // Indices on the remote cluster that were created before the auto-follow + // pattern was created will not be auto-followed even if they match the pattern. + // + // This API can also be used to update auto-follow patterns. + // NOTE: Follower indices that were configured automatically before updating an + // auto-follow pattern will remain unchanged even if they do not match against + // the new patterns. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern PutAutoFollowPattern ccr_put_auto_follow_pattern.NewPutAutoFollowPattern - // Resumes an auto-follow pattern that has been paused - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-resume-auto-follow-pattern.html + // Resume an auto-follow pattern. + // + // Resume a cross-cluster replication auto-follow pattern that was paused. + // The auto-follow pattern will resume configuring following indices for newly + // created indices that match its patterns on the remote cluster. + // Remote indices created while the pattern was paused will also be followed + // unless they have been deleted or closed in the interim. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern ResumeAutoFollowPattern ccr_resume_auto_follow_pattern.NewResumeAutoFollowPattern - // Resumes a follower index that has been paused - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-resume-follow.html + // Resume a follower. + // Resume a cross-cluster replication follower index that was paused. + // The follower index could have been paused with the pause follower API. + // Alternatively it could be paused due to replication that cannot be retried + // due to failures during following tasks. + // When this API returns, the follower index will resume fetching operations + // from the leader index. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow ResumeFollow ccr_resume_follow.NewResumeFollow - // Gets all stats related to cross-cluster replication. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-stats.html + // Get cross-cluster replication stats. + // + // This API returns stats about auto-following and the same shard-level stats as + // the get follower stats API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats Stats ccr_stats.NewStats - // Stops the following task associated with a follower index and removes index - // metadata and settings associated with cross-cluster replication. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-unfollow.html + // Unfollow an index. + // + // Convert a cross-cluster replication follower index to a regular index. + // The API stops the following task associated with a follower index and removes + // index metadata and settings associated with cross-cluster replication. + // The follower index must be paused and closed before you call the unfollow + // API. + // + // > info + // > Currently cross-cluster replication does not support converting an existing + // regular index to a follower index. Converting a follower index to a regular + // index is an irreversible operation. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow Unfollow ccr_unfollow.NewUnfollow } type Cluster struct { - // Provides explanations for shard allocations in the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-allocation-explain.html + // Explain the shard allocations. + // Get explanations for shard allocations in the cluster. + // For unassigned shards, it provides an explanation for why the shard is + // unassigned. + // For assigned shards, it provides an explanation for why the shard is + // remaining on its current node and has not moved or rebalanced to another + // node. + // This API can be very useful when attempting to diagnose why a shard is + // unassigned or why a shard continues to remain on its current node when you + // might expect otherwise. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain AllocationExplain cluster_allocation_explain.NewAllocationExplain // Delete component templates. - // Deletes component templates. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template DeleteComponentTemplate cluster_delete_component_template.NewDeleteComponentTemplate - // Clears cluster voting config exclusions. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/voting-config-exclusions.html + // Clear cluster voting config exclusions. + // Remove master-eligible nodes from the voting configuration exclusion list. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions DeleteVotingConfigExclusions cluster_delete_voting_config_exclusions.NewDeleteVotingConfigExclusions // Check component templates. // Returns information about whether a particular component template exists. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template ExistsComponentTemplate cluster_exists_component_template.NewExistsComponentTemplate // Get component templates. - // Retrieves information about component templates. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html + // Get information about component templates. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template GetComponentTemplate cluster_get_component_template.NewGetComponentTemplate - // Returns cluster-wide settings. + // Get cluster-wide settings. // By default, it returns only settings that have been explicitly defined. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-get-settings.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings GetSettings cluster_get_settings.NewGetSettings - // The cluster health API returns a simple status on the health of the cluster. + // Get the cluster health status. + // // You can also use the API to get the health status of only specified data - // streams and indices. For data streams, the API retrieves the health status of - // the stream’s backing indices. - // The cluster health status is: green, yellow or red. On the shard level, a red - // status indicates that the specific shard is not allocated in the cluster, - // yellow means that the primary shard is allocated but replicas are not, and - // green means that all shards are allocated. The index level status is - // controlled by the worst shard status. The cluster status is controlled by the - // worst index status. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html + // streams and indices. + // For data streams, the API retrieves the health status of the stream’s backing + // indices. + // + // The cluster health status is: green, yellow or red. + // On the shard level, a red status indicates that the specific shard is not + // allocated in the cluster. Yellow means that the primary shard is allocated + // but replicas are not. Green means that all shards are allocated. + // The index level status is controlled by the worst shard status. + // + // One of the main benefits of the API is the ability to wait until the cluster + // reaches a certain high watermark health level. + // The cluster status is controlled by the worst index status. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health Health cluster_health.NewHealth // Get cluster info. // Returns basic information about the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-info.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info Info cluster_info.NewInfo - // Returns cluster-level changes (such as create index, update mapping, allocate - // or fail shard) that have not yet been executed. + // Get the pending cluster tasks. + // Get information about cluster-level changes (such as create index, update + // mapping, allocate or fail shard) that have not yet taken effect. + // // NOTE: This API returns a list of any pending updates to the cluster state. - // These are distinct from the tasks reported by the Task Management API which + // These are distinct from the tasks reported by the task management API which // include periodic tasks and tasks initiated by the user, such as node stats, // search queries, or create index requests. // However, if a user-initiated task such as a create index command causes a // cluster state update, the activity of this task might be reported by both // task api and pending cluster tasks API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-pending.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks PendingTasks cluster_pending_tasks.NewPendingTasks - // Updates the cluster voting config exclusions by node ids or node names. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/voting-config-exclusions.html + // Update voting configuration exclusions. + // Update the cluster voting config exclusions by node IDs or node names. + // By default, if there are more than three master-eligible nodes in the cluster + // and you remove fewer than half of the master-eligible nodes in the cluster at + // once, the voting configuration automatically shrinks. + // If you want to shrink the voting configuration to contain fewer than three + // nodes or to remove half or more of the master-eligible nodes in the cluster + // at once, use this API to remove departing nodes from the voting configuration + // manually. + // The API adds an entry for each specified node to the cluster’s voting + // configuration exclusions list. + // It then waits until the cluster has reconfigured its voting configuration to + // exclude the specified nodes. + // + // Clusters should have no voting configuration exclusions in normal operation. + // Once the excluded nodes have stopped, clear the voting configuration + // exclusions with `DELETE /_cluster/voting_config_exclusions`. + // This API waits for the nodes to be fully removed from the cluster before it + // returns. + // If your cluster has voting configuration exclusions for nodes that you no + // longer intend to remove, use `DELETE + // /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the + // voting configuration exclusions without waiting for the nodes to leave the + // cluster. + // + // A response to `POST /_cluster/voting_config_exclusions` with an HTTP status + // code of 200 OK guarantees that the node has been removed from the voting + // configuration and will not be reinstated until the voting configuration + // exclusions are cleared by calling `DELETE + // /_cluster/voting_config_exclusions`. + // If the call to `POST /_cluster/voting_config_exclusions` fails or returns a + // response with an HTTP status code other than 200 OK then the node may not + // have been removed from the voting configuration. + // In that case, you may safely retry the call. + // + // NOTE: Voting exclusions are required only when you remove at least half of + // the master-eligible nodes from a cluster in a short time period. + // They are not required when removing master-ineligible nodes or when removing + // fewer than half of the master-eligible nodes. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions PostVotingConfigExclusions cluster_post_voting_config_exclusions.NewPostVotingConfigExclusions // Create or update a component template. - // Creates or updates a component template. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. // @@ -987,27 +1203,138 @@ type Cluster struct { // You can use C-style `/* *\/` block comments in component templates. // You can include comments anywhere in the request body except before the // opening curly bracket. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html + // + // **Applying component templates** + // + // You cannot directly apply a component template to a data stream or index. + // To be applied, a component template must be included in an index template's + // `composed_of` list. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template PutComponentTemplate cluster_put_component_template.NewPutComponentTemplate - // Updates the cluster settings. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html + // Update the cluster settings. + // + // Configure and update dynamic settings on a running cluster. + // You can also configure dynamic settings locally on an unstarted or shut down + // node in `elasticsearch.yml`. + // + // Updates made with this API can be persistent, which apply across cluster + // restarts, or transient, which reset after a cluster restart. + // You can also reset transient or persistent settings by assigning them a null + // value. + // + // If you configure the same setting using multiple methods, Elasticsearch + // applies the settings in following order of precedence: 1) Transient setting; + // 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting + // value. + // For example, you can apply a transient setting to override a persistent + // setting or `elasticsearch.yml` setting. + // However, a change to an `elasticsearch.yml` setting will not override a + // defined transient or persistent setting. + // + // TIP: In Elastic Cloud, use the user settings feature to configure all cluster + // settings. This method automatically rejects unsafe settings that could break + // your cluster. + // If you run Elasticsearch on your own hardware, use this API to configure + // dynamic cluster settings. + // Only use `elasticsearch.yml` for static cluster settings and node settings. + // The API doesn’t require a restart and ensures a setting’s value is the same + // on all nodes. + // + // WARNING: Transient cluster settings are no longer recommended. Use persistent + // cluster settings instead. + // If a cluster becomes unstable, transient settings can clear unexpectedly, + // resulting in a potentially undesired cluster configuration. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings PutSettings cluster_put_settings.NewPutSettings - // The cluster remote info API allows you to retrieve all of the configured - // remote cluster information. It returns connection and endpoint information - // keyed by the configured remote cluster alias. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-remote-info.html + // Get remote cluster information. + // + // Get information about configured remote clusters. + // The API returns connection and endpoint information keyed by the configured + // remote cluster alias. + // + // > info + // > This API returns information that reflects current state on the local + // cluster. + // > The `connected` field does not necessarily reflect whether a remote cluster + // is down or unavailable, only whether there is currently an open connection to + // it. + // > Elasticsearch does not spontaneously try to reconnect to a disconnected + // remote cluster. + // > To trigger a reconnection, attempt a cross-cluster search, ES|QL + // cross-cluster search, or try the [resolve cluster + // endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info RemoteInfo cluster_remote_info.NewRemoteInfo - // Allows to manually change the allocation of individual shards in the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-reroute.html + // Reroute the cluster. + // Manually change the allocation of individual shards in the cluster. + // For example, a shard can be moved from one node to another explicitly, an + // allocation can be canceled, and an unassigned shard can be explicitly + // allocated to a specific node. + // + // It is important to note that after processing any reroute commands + // Elasticsearch will perform rebalancing as normal (respecting the values of + // settings such as `cluster.routing.rebalance.enable`) in order to remain in a + // balanced state. + // For example, if the requested allocation includes moving a shard from node1 + // to node2 then this may cause a shard to be moved from node2 back to node1 to + // even things out. + // + // The cluster can be set to disable allocations using the + // `cluster.routing.allocation.enable` setting. + // If allocations are disabled then the only allocations that will be performed + // are explicit ones given using the reroute command, and consequent allocations + // due to rebalancing. + // + // The cluster will attempt to allocate a shard a maximum of + // `index.allocation.max_retries` times in a row (defaults to `5`), before + // giving up and leaving the shard unallocated. + // This scenario can be caused by structural problems such as having an analyzer + // which refers to a stopwords file which doesn’t exist on all nodes. + // + // Once the problem has been corrected, allocation can be manually retried by + // calling the reroute API with the `?retry_failed` URI query parameter, which + // will attempt a single retry round for these shards. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute Reroute cluster_reroute.NewReroute - // Returns a comprehensive information about the state of the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html + // Get the cluster state. + // Get comprehensive information about the state of the cluster. + // + // The cluster state is an internal data structure which keeps track of a + // variety of information needed by every node, including the identity and + // attributes of the other nodes in the cluster; cluster-wide settings; index + // metadata, including the mapping and settings for each index; the location and + // status of every shard copy in the cluster. + // + // The elected master node ensures that every node in the cluster has a copy of + // the same cluster state. + // This API lets you retrieve a representation of this internal state for + // debugging or diagnostic purposes. + // You may need to consult the Elasticsearch source code to determine the + // precise meaning of the response. + // + // By default the API will route requests to the elected master node since this + // node is the authoritative source of cluster states. + // You can also retrieve the cluster state held on the node handling the API + // request by adding the `?local=true` query parameter. + // + // Elasticsearch may need to expend significant effort to compute a response to + // this API in larger clusters, and the response may comprise a very large + // quantity of data. + // If you use this API repeatedly, your cluster may become unstable. + // + // WARNING: The response is a representation of an internal data structure. + // Its format is not subject to the same compatibility guarantees as other more + // stable APIs and may change from version to version. + // Do not query this API using external monitoring tools. + // Instead, obtain the information you require using other more stable cluster + // APIs. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state State cluster_state.NewState - // Returns cluster statistics. - // It returns basic index metrics (shard numbers, store size, memory usage) and + // Get cluster statistics. + // Get basic index metrics (shard numbers, store size, memory usage) and // information about the current nodes that form the cluster (number, roles, os, // jvm versions, memory usage, cpu and installed plugins). - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats Stats cluster_stats.NewStats } @@ -1016,7 +1343,7 @@ type Connector struct { // // Update the `last_seen` field in the connector and set it to the current // timestamp. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/check-in-connector-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in CheckIn connector_check_in.NewCheckIn // Delete a connector. // @@ -1025,23 +1352,23 @@ type Connector struct { // NOTE: This action doesn’t delete any API keys, ingest pipelines, or data // indices associated with the connector. // These need to be removed manually. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-connector-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete Delete connector_delete.NewDelete // Get a connector. // // Get the details about a connector. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-connector-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get Get connector_get.NewGet // Update the connector last sync stats. // // Update the fields related to the last sync of a connector. // This action is used for analytics and monitoring. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-last-sync-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-last-sync LastSync connector_last_sync.NewLastSync // Get all connectors. // // Get information about all connectors. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-connector-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list List connector_list.NewList // Create a connector. // @@ -1052,10 +1379,10 @@ type Connector struct { // Elastic Cloud. // Self-managed connectors (Connector clients) are self-managed on your // infrastructure. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/create-connector-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put Post connector_post.NewPost // Create or update a connector. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/create-connector-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put Put connector_put.NewPut // Creates a secret for a Connector. // @@ -1066,33 +1393,81 @@ type Connector struct { // `cancellation_requested_at` to the current time. // The connector service is then responsible for setting the status of connector // sync jobs to cancelled. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cancel-connector-sync-job-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel SyncJobCancel connector_sync_job_cancel.NewSyncJobCancel + // Check in a connector sync job. + // Check in a connector sync job and set the `last_seen` field to the current + // time before updating it in the internal index. + // + // To sync data using self-managed connectors, you need to deploy the Elastic + // connector service on your own infrastructure. + // This service runs automatically on Elastic Cloud for Elastic managed + // connectors. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in + SyncJobCheckIn connector_sync_job_check_in.NewSyncJobCheckIn + // Claim a connector sync job. + // This action updates the job status to `in_progress` and sets the `last_seen` + // and `started_at` timestamps to the current time. + // Additionally, it can set the `sync_cursor` property for the sync job. + // + // This API is not intended for direct connector management by users. + // It supports the implementation of services that utilize the connector + // protocol to communicate with Elasticsearch. + // + // To sync data using self-managed connectors, you need to deploy the Elastic + // connector service on your own infrastructure. + // This service runs automatically on Elastic Cloud for Elastic managed + // connectors. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim + SyncJobClaim connector_sync_job_claim.NewSyncJobClaim // Delete a connector sync job. // // Remove a connector sync job and its associated data. // This is a destructive action that is not recoverable. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-connector-sync-job-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete SyncJobDelete connector_sync_job_delete.NewSyncJobDelete + // Set a connector sync job error. + // Set the `error` field for a connector sync job and set its `status` to + // `error`. + // + // To sync data using self-managed connectors, you need to deploy the Elastic + // connector service on your own infrastructure. + // This service runs automatically on Elastic Cloud for Elastic managed + // connectors. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error + SyncJobError connector_sync_job_error.NewSyncJobError // Get a connector sync job. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-connector-sync-job-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get SyncJobGet connector_sync_job_get.NewSyncJobGet // Get all connector sync jobs. // // Get information about all stored connector sync jobs listed by their creation // date in ascending order. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-connector-sync-jobs-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list SyncJobList connector_sync_job_list.NewSyncJobList // Create a connector sync job. // // Create a connector sync job document in the internal index and initialize its // counters and timestamps with default values. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/create-connector-sync-job-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post SyncJobPost connector_sync_job_post.NewSyncJobPost + // Set the connector sync job stats. + // Stats include: `deleted_document_count`, `indexed_document_count`, + // `indexed_document_volume`, and `total_document_count`. + // You can also update `last_seen`. + // This API is mainly used by the connector service for updating sync job + // information. + // + // To sync data using self-managed connectors, you need to deploy the Elastic + // connector service on your own infrastructure. + // This service runs automatically on Elastic Cloud for Elastic managed + // connectors. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats + SyncJobUpdateStats connector_sync_job_update_stats.NewSyncJobUpdateStats // Activate the connector draft filter. // // Activates the valid draft filtering for a connector. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering UpdateActiveFiltering connector_update_active_filtering.NewUpdateActiveFiltering // Update the connector API key ID. // @@ -1102,12 +1477,12 @@ type Connector struct { // The connector secret ID is required only for Elastic managed (native) // connectors. // Self-managed connectors (connector clients) do not use this field. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-api-key-id-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id UpdateApiKeyId connector_update_api_key_id.NewUpdateApiKeyId // Update the connector configuration. // // Update the configuration field in the connector document. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-configuration-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration UpdateConfiguration connector_update_configuration.NewUpdateConfiguration // Update the connector error field. // @@ -1116,8 +1491,26 @@ type Connector struct { // is updated to error. // Otherwise, if the error is reset to null, the connector status is updated to // connected. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-error-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error UpdateError connector_update_error.NewUpdateError + // Update the connector features. + // Update the connector features in the connector document. + // This API can be used to control the following aspects of a connector: + // + // * document-level security + // * incremental syncs + // * advanced sync rules + // * basic sync rules + // + // Normally, the running connector service automatically manages these features. + // However, you can use this API to override the default behavior. + // + // To sync data using self-managed connectors, you need to deploy the Elastic + // connector service on your own infrastructure. + // This service runs automatically on Elastic Cloud for Elastic managed + // connectors. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features + UpdateFeatures connector_update_features.NewUpdateFeatures // Update the connector filtering. // // Update the draft filtering configuration of a connector and marks the draft @@ -1126,7 +1519,7 @@ type Connector struct { // connector service. // The filtering property is used to configure sync rules (both basic and // advanced) for a connector. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering UpdateFiltering connector_update_filtering.NewUpdateFiltering // Update the connector draft filtering validation. // @@ -1137,10 +1530,10 @@ type Connector struct { // // Update the `index_name` field of a connector, specifying the index where the // data ingested by the connector is stored. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-index-name-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name UpdateIndexName connector_update_index_name.NewUpdateIndexName // Update the connector name and description. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-name-description-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name UpdateName connector_update_name.NewUpdateName // Update the connector is_native flag. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-native-api.html @@ -1149,32 +1542,184 @@ type Connector struct { // // When you create a new connector, the configuration of an ingest pipeline is // populated with default settings. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-pipeline-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline UpdatePipeline connector_update_pipeline.NewUpdatePipeline // Update the connector scheduling. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-scheduling-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling UpdateScheduling connector_update_scheduling.NewUpdateScheduling // Update the connector service type. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-service-type-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type UpdateServiceType connector_update_service_type.NewUpdateServiceType // Update the connector status. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-status-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status UpdateStatus connector_update_status.NewUpdateStatus } type Core struct { // Bulk index or delete documents. - // Performs multiple indexing or delete operations in a single API call. + // Perform multiple `index`, `create`, `delete`, and `update` actions in a + // single request. // This reduces overhead and can greatly increase indexing speed. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or index alias: + // + // * To use the `create` action, you must have the `create_doc`, `create`, + // `index`, or `write` index privilege. Data streams support only the `create` + // action. + // * To use the `index` action, you must have the `create`, `index`, or `write` + // index privilege. + // * To use the `delete` action, you must have the `delete` or `write` index + // privilege. + // * To use the `update` action, you must have the `index` or `write` index + // privilege. + // * To automatically create a data stream or index with a bulk API request, you + // must have the `auto_configure`, `create_index`, or `manage` index privilege. + // * To make the result of a bulk operation visible to search using the + // `refresh` parameter, you must have the `maintenance` or `manage` index + // privilege. + // + // Automatic data stream creation requires a matching index template with data + // stream enabled. + // + // The actions are specified in the request body using a newline delimited JSON + // (NDJSON) structure: + // + // ``` + // action_and_meta_data\n + // optional_source\n + // action_and_meta_data\n + // optional_source\n + // .... + // action_and_meta_data\n + // optional_source\n + // ``` + // + // The `index` and `create` actions expect a source on the next line and have + // the same semantics as the `op_type` parameter in the standard index API. + // A `create` action fails if a document with the same ID already exists in the + // target + // An `index` action adds or replaces a document as necessary. + // + // NOTE: Data streams support only the `create` action. + // To update or delete a document in a data stream, you must target the backing + // index containing the document. + // + // An `update` action expects that the partial doc, upsert, and script and its + // options are specified on the next line. + // + // A `delete` action does not expect a source on the next line and has the same + // semantics as the standard delete API. + // + // NOTE: The final line of data must end with a newline character (`\n`). + // Each newline character may be preceded by a carriage return (`\r`). + // When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header + // of `application/json` or `application/x-ndjson`. + // Because this format uses literal newline characters (`\n`) as delimiters, + // make sure that the JSON actions and sources are not pretty printed. + // + // If you provide a target in the request path, it is used for any actions that + // don't explicitly specify an `_index` argument. + // + // A note on the format: the idea here is to make processing as fast as + // possible. + // As some of the actions are redirected to other shards on other nodes, only + // `action_meta_data` is parsed on the receiving node side. + // + // Client libraries using this protocol should try and strive to do something + // similar on the client side, and reduce buffering as much as possible. + // + // There is no "correct" number of actions to perform in a single bulk request. + // Experiment with different settings to find the optimal size for your + // particular workload. + // Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by + // default so clients must ensure that no request exceeds this size. + // It is not possible to index a single document that exceeds the size limit, so + // you must pre-process any such documents into smaller pieces before sending + // them to Elasticsearch. + // For instance, split documents into pages or chapters before indexing them, or + // store raw binary data in a system outside Elasticsearch and replace the raw + // data with a link to the external system in the documents that you send to + // Elasticsearch. + // + // **Client suppport for bulk requests** + // + // Some of the officially supported clients provide helpers to assist with bulk + // requests and reindexing: + // + // * Go: Check out `esutil.BulkIndexer` + // * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and + // `Search::Elasticsearch::Client::5_0::Scroll` + // * Python: Check out `elasticsearch.helpers.*` + // * JavaScript: Check out `client.helpers.*` + // * .NET: Check out `BulkAllObservable` + // * PHP: Check out bulk indexing. + // + // **Submitting bulk requests with cURL** + // + // If you're providing text file input to `curl`, you must use the + // `--data-binary` flag instead of plain `-d`. + // The latter doesn't preserve newlines. For example: + // + // ``` + // $ cat requests + // { "index" : { "_index" : "test", "_id" : "1" } } + // { "field1" : "value1" } + // $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk + // --data-binary "@requests"; echo + // {"took":7, "errors": false, + // "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} + // ``` + // + // **Optimistic concurrency control** + // + // Each `index` and `delete` action within a bulk API call may include the + // `if_seq_no` and `if_primary_term` parameters in their respective action and + // meta data lines. + // The `if_seq_no` and `if_primary_term` parameters control how operations are + // run, based on the last modification to existing documents. See Optimistic + // concurrency control for more details. + // + // **Versioning** + // + // Each bulk item can include the version value using the `version` field. + // It automatically follows the behavior of the index or delete operation based + // on the `_version` mapping. + // It also support the `version_type`. + // + // **Routing** + // + // Each bulk item can include the routing value using the `routing` field. + // It automatically follows the behavior of the index or delete operation based + // on the `_routing` mapping. + // + // NOTE: Data streams do not support custom routing unless they were created + // with the `allow_custom_routing` setting enabled in the template. + // + // **Wait for active shards** + // + // When making bulk calls, you can set the `wait_for_active_shards` parameter to + // require a minimum number of shard copies to be active before starting to + // process the bulk request. + // + // **Refresh** + // + // Control when the changes made by this request are visible to search. + // + // NOTE: Only the shards that receive the bulk request will be affected by + // refresh. + // Imagine a `_bulk?refresh=wait_for` request with three documents in it that + // happen to be routed to different shards in an index with five shards. + // The request will only wait for those three shards to refresh. + // The other two shards that make up the index do not participate in the `_bulk` + // request at all. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk Bulk core_bulk.NewBulk // Clear a scrolling search. - // // Clear the search context and results for a scrolling search. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-scroll-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll ClearScroll core_clear_scroll.NewClearScroll // Close a point in time. - // // A point in time must be opened explicitly before being used in search // requests. // The `keep_alive` parameter tells Elasticsearch how long it should persist. @@ -1182,25 +1727,336 @@ type Core struct { // elapsed. // However, keeping points in time has a cost; close them as soon as they are no // longer required for search requests. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time ClosePointInTime core_close_point_in_time.NewClosePointInTime - // Returns number of documents matching a query. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html + // Count search results. + // Get the number of documents matching a query. + // + // The query can be provided either by using a simple query string as a + // parameter, or by defining Query DSL within the request body. + // The query is optional. When no query is provided, the API uses `match_all` to + // count all the documents. + // + // The count API supports multi-target syntax. You can run a single count API + // search across multiple data streams and indices. + // + // The operation is broadcast across all shards. + // For each shard ID group, a replica is chosen and the search is run against + // it. + // This means that replicas increase the scalability of the count. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count Count core_count.NewCount - // Index a document. - // Adds a JSON document to the specified data stream or index and makes it - // searchable. - // If the target is an index and the document already exists, the request - // updates the document and increments its version. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html + // Create a new document in the index. + // + // You can index a new JSON document with the `//_doc/` or + // `//_create/<_id>` APIs + // Using `_create` guarantees that the document is indexed only if it does not + // already exist. + // It returns a 409 response when a document with a same ID already exists in + // the index. + // To update an existing document, you must use the `//_doc/` API. + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or index alias: + // + // * To add a document using the `PUT //_create/<_id>` or `POST + // //_create/<_id>` request formats, you must have the `create_doc`, + // `create`, `index`, or `write` index privilege. + // * To automatically create a data stream or index with this API request, you + // must have the `auto_configure`, `create_index`, or `manage` index privilege. + // + // Automatic data stream creation requires a matching index template with data + // stream enabled. + // + // **Automatically create data streams and indices** + // + // If the request's target doesn't exist and matches an index template with a + // `data_stream` definition, the index operation automatically creates the data + // stream. + // + // If the target doesn't exist and doesn't match a data stream template, the + // operation automatically creates the index and applies any matching index + // templates. + // + // NOTE: Elasticsearch includes several built-in index templates. To avoid + // naming collisions with these templates, refer to index pattern documentation. + // + // If no mapping exists, the index operation creates a dynamic mapping. + // By default, new fields and objects are automatically added to the mapping if + // needed. + // + // Automatic index creation is controlled by the `action.auto_create_index` + // setting. + // If it is `true`, any index can be created automatically. + // You can modify this setting to explicitly allow or block automatic creation + // of indices that match specified patterns or set it to `false` to turn off + // automatic index creation entirely. + // Specify a comma-separated list of patterns you want to allow or prefix each + // pattern with `+` or `-` to indicate whether it should be allowed or blocked. + // When a list is specified, the default behaviour is to disallow. + // + // NOTE: The `action.auto_create_index` setting affects the automatic creation + // of indices only. + // It does not affect the creation of data streams. + // + // **Routing** + // + // By default, shard placement — or routing — is controlled by using a hash of + // the document's ID value. + // For more explicit control, the value fed into the hash function used by the + // router can be directly specified on a per-operation basis using the `routing` + // parameter. + // + // When setting up explicit mapping, you can also use the `_routing` field to + // direct the index operation to extract the routing value from the document + // itself. + // This does come at the (very minimal) cost of an additional document parsing + // pass. + // If the `_routing` mapping is defined and set to be required, the index + // operation will fail if no routing value is provided or extracted. + // + // NOTE: Data streams do not support custom routing unless they were created + // with the `allow_custom_routing` setting enabled in the template. + // + // **Distributed** + // + // The index operation is directed to the primary shard based on its route and + // performed on the actual node containing this shard. + // After the primary shard completes the operation, if needed, the update is + // distributed to applicable replicas. + // + // **Active shards** + // + // To improve the resiliency of writes to the system, indexing operations can be + // configured to wait for a certain number of active shard copies before + // proceeding with the operation. + // If the requisite number of active shard copies are not available, then the + // write operation must wait and retry, until either the requisite shard copies + // have started or a timeout occurs. + // By default, write operations only wait for the primary shards to be active + // before proceeding (that is to say `wait_for_active_shards` is `1`). + // This default can be overridden in the index settings dynamically by setting + // `index.write.wait_for_active_shards`. + // To alter this behavior per operation, use the `wait_for_active_shards + // request` parameter. + // + // Valid values are all or any positive integer up to the total number of + // configured copies per shard in the index (which is `number_of_replicas`+1). + // Specifying a negative value or a number greater than the number of shard + // copies will throw an error. + // + // For example, suppose you have a cluster of three nodes, A, B, and C and you + // create an index index with the number of replicas set to 3 (resulting in 4 + // shard copies, one more copy than there are nodes). + // If you attempt an indexing operation, by default the operation will only + // ensure the primary copy of each shard is available before proceeding. + // This means that even if B and C went down and A hosted the primary shard + // copies, the indexing operation would still proceed with only one copy of the + // data. + // If `wait_for_active_shards` is set on the request to `3` (and all three nodes + // are up), the indexing operation will require 3 active shard copies before + // proceeding. + // This requirement should be met because there are 3 active nodes in the + // cluster, each one holding a copy of the shard. + // However, if you set `wait_for_active_shards` to `all` (or to `4`, which is + // the same in this situation), the indexing operation will not proceed as you + // do not have all 4 copies of each shard active in the index. + // The operation will timeout unless a new node is brought up in the cluster to + // host the fourth copy of the shard. + // + // It is important to note that this setting greatly reduces the chances of the + // write operation not writing to the requisite number of shard copies, but it + // does not completely eliminate the possibility, because this check occurs + // before the write operation starts. + // After the write operation is underway, it is still possible for replication + // to fail on any number of shard copies but still succeed on the primary. + // The `_shards` section of the API response reveals the number of shard copies + // on which replication succeeded and failed. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create Create core_create.NewCreate // Delete a document. - // Removes a JSON document from the specified index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html + // + // Remove a JSON document from the specified index. + // + // NOTE: You cannot send deletion requests directly to a data stream. + // To delete a document in a data stream, you must target the backing index + // containing the document. + // + // **Optimistic concurrency control** + // + // Delete operations can be made conditional and only be performed if the last + // modification to the document was assigned the sequence number and primary + // term specified by the `if_seq_no` and `if_primary_term` parameters. + // If a mismatch is detected, the operation will result in a + // `VersionConflictException` and a status code of `409`. + // + // **Versioning** + // + // Each document indexed is versioned. + // When deleting a document, the version can be specified to make sure the + // relevant document you are trying to delete is actually being deleted and it + // has not changed in the meantime. + // Every write operation run on a document, deletes included, causes its version + // to be incremented. + // The version number of a deleted document remains available for a short time + // after deletion to allow for control of concurrent operations. + // The length of time for which a deleted document's version remains available + // is determined by the `index.gc_deletes` index setting. + // + // **Routing** + // + // If routing is used during indexing, the routing value also needs to be + // specified to delete a document. + // + // If the `_routing` mapping is set to `required` and no routing value is + // specified, the delete API throws a `RoutingMissingException` and rejects the + // request. + // + // For example: + // + // ``` + // DELETE /my-index-000001/_doc/1?routing=shard-1 + // ``` + // + // This request deletes the document with ID 1, but it is routed based on the + // user. + // The document is not deleted if the correct routing is not specified. + // + // **Distributed** + // + // The delete operation gets hashed into a specific shard ID. + // It then gets redirected into the primary shard within that ID group and + // replicated (if needed) to shard replicas within that ID group. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete Delete core_delete.NewDelete // Delete documents. + // // Deletes documents that match the specified query. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or alias: + // + // * `read` + // * `delete` or `write` + // + // You can specify the query criteria in the request URI or the request body + // using the same syntax as the search API. + // When you submit a delete by query request, Elasticsearch gets a snapshot of + // the data stream or index when it begins processing the request and deletes + // matching documents using internal versioning. + // If a document changes between the time that the snapshot is taken and the + // delete operation is processed, it results in a version conflict and the + // delete operation fails. + // + // NOTE: Documents with a version equal to 0 cannot be deleted using delete by + // query because internal versioning does not support 0 as a valid version + // number. + // + // While processing a delete by query request, Elasticsearch performs multiple + // search requests sequentially to find all of the matching documents to delete. + // A bulk delete request is performed for each batch of matching documents. + // If a search or bulk request is rejected, the requests are retried up to 10 + // times, with exponential back off. + // If the maximum retry limit is reached, processing halts and all failed + // requests are returned in the response. + // Any delete requests that completed successfully still stick, they are not + // rolled back. + // + // You can opt to count version conflicts instead of halting and returning by + // setting `conflicts` to `proceed`. + // Note that if you opt to count version conflicts the operation could attempt + // to delete more documents from the source than `max_docs` until it has + // successfully deleted `max_docs documents`, or it has gone through every + // document in the source query. + // + // **Throttling delete requests** + // + // To control the rate at which delete by query issues batches of delete + // operations, you can set `requests_per_second` to any positive decimal number. + // This pads each batch with a wait time to throttle the rate. + // Set `requests_per_second` to `-1` to disable throttling. + // + // Throttling uses a wait time between batches so that the internal scroll + // requests can be given a timeout that takes the request padding into account. + // The padding time is the difference between the batch size divided by the + // `requests_per_second` and the time spent writing. + // By default the batch size is `1000`, so if `requests_per_second` is set to + // `500`: + // + // ``` + // target_time = 1000 / 500 per second = 2 seconds + // wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds + // ``` + // + // Since the batch is issued as a single `_bulk` request, large batch sizes + // cause Elasticsearch to create many requests and wait before starting the next + // set. + // This is "bursty" instead of "smooth". + // + // **Slicing** + // + // Delete by query supports sliced scroll to parallelize the delete process. + // This can improve efficiency and provide a convenient way to break the request + // down into smaller parts. + // + // Setting `slices` to `auto` lets Elasticsearch choose the number of slices to + // use. + // This setting will use one slice per shard, up to a certain limit. + // If there are multiple source data streams or indices, it will choose the + // number of slices based on the index or backing index with the smallest number + // of shards. + // Adding slices to the delete by query operation creates sub-requests which + // means it has some quirks: + // + // * You can see these requests in the tasks APIs. These sub-requests are + // "child" tasks of the task for the request with slices. + // * Fetching the status of the task for the request with slices only contains + // the status of completed slices. + // * These sub-requests are individually addressable for things like + // cancellation and rethrottling. + // * Rethrottling the request with `slices` will rethrottle the unfinished + // sub-request proportionally. + // * Canceling the request with `slices` will cancel each sub-request. + // * Due to the nature of `slices` each sub-request won't get a perfectly even + // portion of the documents. All documents will be addressed, but some slices + // may be larger than others. Expect larger slices to have a more even + // distribution. + // * Parameters like `requests_per_second` and `max_docs` on a request with + // `slices` are distributed proportionally to each sub-request. Combine that + // with the earlier point about distribution being uneven and you should + // conclude that using `max_docs` with `slices` might not result in exactly + // `max_docs` documents being deleted. + // * Each sub-request gets a slightly different snapshot of the source data + // stream or index though these are all taken at approximately the same time. + // + // If you're slicing manually or otherwise tuning automatic slicing, keep in + // mind that: + // + // * Query performance is most efficient when the number of slices is equal to + // the number of shards in the index or backing index. If that number is large + // (for example, 500), choose a lower number as too many `slices` hurts + // performance. Setting `slices` higher than the number of shards generally does + // not improve efficiency and adds overhead. + // * Delete performance scales linearly across available resources with the + // number of slices. + // + // Whether query or delete performance dominates the runtime depends on the + // documents being reindexed and cluster resources. + // + // **Cancel a delete by query operation** + // + // Any delete by query can be canceled using the task cancel API. For example: + // + // ``` + // POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel + // ``` + // + // The task ID can be found by using the get tasks API. + // + // Cancellation should happen quickly but might take a few seconds. + // The get task status API will continue to list the delete by query task until + // this task checks that it has been cancelled and terminates itself. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query DeleteByQuery core_delete_by_query.NewDeleteByQuery // Throttle a delete by query operation. // @@ -1209,24 +2065,54 @@ type Core struct { // Rethrottling that speeds up the query takes effect immediately but // rethrotting that slows down the query takes effect after completing the // current batch to prevent scroll timeouts. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle DeleteByQueryRethrottle core_delete_by_query_rethrottle.NewDeleteByQueryRethrottle // Delete a script or search template. // Deletes a stored script or search template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script DeleteScript core_delete_script.NewDeleteScript // Check a document. - // Checks if a specified document exists. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html + // + // Verify that a document exists. + // For example, check to see if a document with the `_id` 0 exists: + // + // ``` + // HEAD my-index-000001/_doc/0 + // ``` + // + // If the document exists, the API returns a status code of `200 - OK`. + // If the document doesn’t exist, the API returns `404 - Not Found`. + // + // **Versioning support** + // + // You can use the `version` parameter to check the document only if its current + // version is equal to the specified one. + // + // Internally, Elasticsearch has marked the old document as deleted and added an + // entirely new document. + // The old version of the document doesn't disappear immediately, although you + // won't be able to access it. + // Elasticsearch cleans up deleted documents in the background as you continue + // to index more data. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get Exists core_exists.NewExists // Check for a document source. - // Checks if a document's `_source` is stored. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html + // + // Check whether a document source exists in an index. + // For example: + // + // ``` + // HEAD my-index-000001/_source/1 + // ``` + // + // A document's source is not available if it is disabled in the mapping. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get ExistsSource core_exists_source.NewExistsSource // Explain a document match result. - // Returns information about why a specific document matches, or doesn’t match, - // a query. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-explain.html + // Get information about why a specific document matches, or doesn't match, a + // query. + // It computes a score explanation for a query and a specific document. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain Explain core_explain.NewExplain // Get the field capabilities. // @@ -1237,43 +2123,351 @@ type Core struct { // It returns runtime fields like any other field. // For example, a runtime field with a type of keyword is returned the same as // any other field that belongs to the `keyword` family. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps FieldCaps core_field_caps.NewFieldCaps // Get a document by its ID. - // Retrieves the document with the specified ID from an index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html + // + // Get a document and its source or stored fields from an index. + // + // By default, this API is realtime and is not affected by the refresh rate of + // the index (when data will become visible for search). + // In the case where stored fields are requested with the `stored_fields` + // parameter and the document has been updated but is not yet refreshed, the API + // will have to parse and analyze the source to extract the stored fields. + // To turn off realtime behavior, set the `realtime` parameter to false. + // + // **Source filtering** + // + // By default, the API returns the contents of the `_source` field unless you + // have used the `stored_fields` parameter or the `_source` field is turned off. + // You can turn off `_source` retrieval by using the `_source` parameter: + // + // ``` + // GET my-index-000001/_doc/0?_source=false + // ``` + // + // If you only need one or two fields from the `_source`, use the + // `_source_includes` or `_source_excludes` parameters to include or filter out + // particular fields. + // This can be helpful with large documents where partial retrieval can save on + // network overhead + // Both parameters take a comma separated list of fields or wildcard + // expressions. + // For example: + // + // ``` + // GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities + // ``` + // + // If you only want to specify includes, you can use a shorter notation: + // + // ``` + // GET my-index-000001/_doc/0?_source=*.id + // ``` + // + // **Routing** + // + // If routing is used during indexing, the routing value also needs to be + // specified to retrieve a document. + // For example: + // + // ``` + // GET my-index-000001/_doc/2?routing=user1 + // ``` + // + // This request gets the document with ID 2, but it is routed based on the user. + // The document is not fetched if the correct routing is not specified. + // + // **Distributed** + // + // The GET operation is hashed into a specific shard ID. + // It is then redirected to one of the replicas within that shard ID and returns + // the result. + // The replicas are the primary shard and its replicas within that shard ID + // group. + // This means that the more replicas you have, the better your GET scaling will + // be. + // + // **Versioning support** + // + // You can use the `version` parameter to retrieve the document only if its + // current version is equal to the specified one. + // + // Internally, Elasticsearch has marked the old document as deleted and added an + // entirely new document. + // The old version of the document doesn't disappear immediately, although you + // won't be able to access it. + // Elasticsearch cleans up deleted documents in the background as you continue + // to index more data. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get Get core_get.NewGet // Get a script or search template. // Retrieves a stored script or search template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script GetScript core_get_script.NewGetScript // Get script contexts. // // Get a list of supported script contexts and their methods. - // https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-contexts.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context GetScriptContext core_get_script_context.NewGetScriptContext // Get script languages. // // Get a list of available script types, languages, and contexts. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages GetScriptLanguages core_get_script_languages.NewGetScriptLanguages // Get a document's source. - // Returns the source of a document. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html + // + // Get the source of a document. + // For example: + // + // ``` + // GET my-index-000001/_source/1 + // ``` + // + // You can use the source filtering parameters to control which parts of the + // `_source` are returned: + // + // ``` + // GET + // my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities + // ``` + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get GetSource core_get_source.NewGetSource - // Returns the health of the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/health-api.html + // Get the cluster health. + // Get a report with the health status of an Elasticsearch cluster. + // The report contains a list of indicators that compose Elasticsearch + // functionality. + // + // Each indicator has a health status of: green, unknown, yellow or red. + // The indicator will provide an explanation and metadata describing the reason + // for its current health status. + // + // The cluster’s status is controlled by the worst indicator status. + // + // In the event that an indicator’s status is non-green, a list of impacts may + // be present in the indicator result which detail the functionalities that are + // negatively affected by the health issue. + // Each impact carries with it a severity level, an area of the system that is + // affected, and a simple description of the impact on the system. + // + // Some health indicators can determine the root cause of a health problem and + // prescribe a set of steps that can be performed in order to improve the health + // of the system. + // The root cause and remediation steps are encapsulated in a diagnosis. + // A diagnosis contains a cause detailing a root cause analysis, an action + // containing a brief description of the steps to take to fix the problem, the + // list of affected resources (if applicable), and a detailed step-by-step + // troubleshooting guide to fix the diagnosed problem. + // + // NOTE: The health indicators perform root cause analysis of non-green health + // statuses. This can be computationally expensive when called frequently. + // When setting up automated polling of the API for health status, set verbose + // to false to disable the more expensive analysis logic. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report HealthReport core_health_report.NewHealthReport - // Index a document. - // Adds a JSON document to the specified data stream or index and makes it + // Create or update a document in an index. + // + // Add a JSON document to the specified data stream or index and make it // searchable. // If the target is an index and the document already exists, the request // updates the document and increments its version. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html + // + // NOTE: You cannot use this API to send update requests for existing documents + // in a data stream. + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or index alias: + // + // * To add or overwrite a document using the `PUT //_doc/<_id>` request + // format, you must have the `create`, `index`, or `write` index privilege. + // * To add a document using the `POST //_doc/` request format, you must + // have the `create_doc`, `create`, `index`, or `write` index privilege. + // * To automatically create a data stream or index with this API request, you + // must have the `auto_configure`, `create_index`, or `manage` index privilege. + // + // Automatic data stream creation requires a matching index template with data + // stream enabled. + // + // NOTE: Replica shards might not all be started when an indexing operation + // returns successfully. + // By default, only the primary is required. Set `wait_for_active_shards` to + // change this default behavior. + // + // **Automatically create data streams and indices** + // + // If the request's target doesn't exist and matches an index template with a + // `data_stream` definition, the index operation automatically creates the data + // stream. + // + // If the target doesn't exist and doesn't match a data stream template, the + // operation automatically creates the index and applies any matching index + // templates. + // + // NOTE: Elasticsearch includes several built-in index templates. To avoid + // naming collisions with these templates, refer to index pattern documentation. + // + // If no mapping exists, the index operation creates a dynamic mapping. + // By default, new fields and objects are automatically added to the mapping if + // needed. + // + // Automatic index creation is controlled by the `action.auto_create_index` + // setting. + // If it is `true`, any index can be created automatically. + // You can modify this setting to explicitly allow or block automatic creation + // of indices that match specified patterns or set it to `false` to turn off + // automatic index creation entirely. + // Specify a comma-separated list of patterns you want to allow or prefix each + // pattern with `+` or `-` to indicate whether it should be allowed or blocked. + // When a list is specified, the default behaviour is to disallow. + // + // NOTE: The `action.auto_create_index` setting affects the automatic creation + // of indices only. + // It does not affect the creation of data streams. + // + // **Optimistic concurrency control** + // + // Index operations can be made conditional and only be performed if the last + // modification to the document was assigned the sequence number and primary + // term specified by the `if_seq_no` and `if_primary_term` parameters. + // If a mismatch is detected, the operation will result in a + // `VersionConflictException` and a status code of `409`. + // + // **Routing** + // + // By default, shard placement — or routing — is controlled by using a hash of + // the document's ID value. + // For more explicit control, the value fed into the hash function used by the + // router can be directly specified on a per-operation basis using the `routing` + // parameter. + // + // When setting up explicit mapping, you can also use the `_routing` field to + // direct the index operation to extract the routing value from the document + // itself. + // This does come at the (very minimal) cost of an additional document parsing + // pass. + // If the `_routing` mapping is defined and set to be required, the index + // operation will fail if no routing value is provided or extracted. + // + // NOTE: Data streams do not support custom routing unless they were created + // with the `allow_custom_routing` setting enabled in the template. + // + // **Distributed** + // + // The index operation is directed to the primary shard based on its route and + // performed on the actual node containing this shard. + // After the primary shard completes the operation, if needed, the update is + // distributed to applicable replicas. + // + // **Active shards** + // + // To improve the resiliency of writes to the system, indexing operations can be + // configured to wait for a certain number of active shard copies before + // proceeding with the operation. + // If the requisite number of active shard copies are not available, then the + // write operation must wait and retry, until either the requisite shard copies + // have started or a timeout occurs. + // By default, write operations only wait for the primary shards to be active + // before proceeding (that is to say `wait_for_active_shards` is `1`). + // This default can be overridden in the index settings dynamically by setting + // `index.write.wait_for_active_shards`. + // To alter this behavior per operation, use the `wait_for_active_shards + // request` parameter. + // + // Valid values are all or any positive integer up to the total number of + // configured copies per shard in the index (which is `number_of_replicas`+1). + // Specifying a negative value or a number greater than the number of shard + // copies will throw an error. + // + // For example, suppose you have a cluster of three nodes, A, B, and C and you + // create an index index with the number of replicas set to 3 (resulting in 4 + // shard copies, one more copy than there are nodes). + // If you attempt an indexing operation, by default the operation will only + // ensure the primary copy of each shard is available before proceeding. + // This means that even if B and C went down and A hosted the primary shard + // copies, the indexing operation would still proceed with only one copy of the + // data. + // If `wait_for_active_shards` is set on the request to `3` (and all three nodes + // are up), the indexing operation will require 3 active shard copies before + // proceeding. + // This requirement should be met because there are 3 active nodes in the + // cluster, each one holding a copy of the shard. + // However, if you set `wait_for_active_shards` to `all` (or to `4`, which is + // the same in this situation), the indexing operation will not proceed as you + // do not have all 4 copies of each shard active in the index. + // The operation will timeout unless a new node is brought up in the cluster to + // host the fourth copy of the shard. + // + // It is important to note that this setting greatly reduces the chances of the + // write operation not writing to the requisite number of shard copies, but it + // does not completely eliminate the possibility, because this check occurs + // before the write operation starts. + // After the write operation is underway, it is still possible for replication + // to fail on any number of shard copies but still succeed on the primary. + // The `_shards` section of the API response reveals the number of shard copies + // on which replication succeeded and failed. + // + // **No operation (noop) updates** + // + // When updating a document by using this API, a new version of the document is + // always created even if the document hasn't changed. + // If this isn't acceptable use the `_update` API with `detect_noop` set to + // `true`. + // The `detect_noop` option isn't available on this API because it doesn’t fetch + // the old source and isn't able to compare it against the new source. + // + // There isn't a definitive rule for when noop updates aren't acceptable. + // It's a combination of lots of factors like how frequently your data source + // sends updates that are actually noops and how many queries per second + // Elasticsearch runs on the shard receiving the updates. + // + // **Versioning** + // + // Each indexed document is given a version number. + // By default, internal versioning is used that starts at 1 and increments with + // each update, deletes included. + // Optionally, the version number can be set to an external value (for example, + // if maintained in a database). + // To enable this functionality, `version_type` should be set to `external`. + // The value provided must be a numeric, long value greater than or equal to 0, + // and less than around `9.2e+18`. + // + // NOTE: Versioning is completely real time, and is not affected by the near + // real time aspects of search operations. + // If no version is provided, the operation runs without any version checks. + // + // When using the external version type, the system checks to see if the version + // number passed to the index request is greater than the version of the + // currently stored document. + // If true, the document will be indexed and the new version number used. + // If the value provided is less than or equal to the stored document's version + // number, a version conflict will occur and the index operation will fail. For + // example: + // + // ``` + // PUT my-index-000001/_doc/1?version=2&version_type=external + // { + // "user": { + // "id": "elkbee" + // } + // } + // + // In this example, the operation will succeed since the supplied version of 2 + // is higher than the current document version of 1. + // If the document was already updated and its version was set to 2 or higher, + // the indexing command will fail and result in a conflict (409 HTTP status + // code). + // + // A nice side effect is that there is no need to maintain strict ordering of + // async indexing operations run as a result of changes to a source database, as + // long as version numbers from the source database are used. + // Even the simple case of updating the Elasticsearch index using data from a + // database is simplified if external versioning is used, as only the latest + // version will be used if the index operations arrive out of order. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create Index core_index.NewIndex // Get cluster info. - // Returns basic information about the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html + // Get basic build, version, and cluster information. + // https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info Info core_info.NewInfo // Run a knn search. // @@ -1292,7 +2486,16 @@ type Core struct { // // The kNN search API supports restricting the search using a filter. // The search will return the top k documents that also match the filter query. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html + // + // A kNN search response has the exact same structure as a search API response. + // However, certain sections have a meaning specific to kNN search: + // + // * The document `_score` is determined by the similarity between the query and + // document vector. + // * The `hits.total` object contains the total number of nearest neighbor + // candidates considered, which is `num_candidates * num_shards`. The + // `hits.total.relation` will always be `eq`, indicating an exact value. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html KnnSearch core_knn_search.NewKnnSearch // Get multiple documents. // @@ -1301,7 +2504,24 @@ type Core struct { // document IDs in the request body. // To ensure fast responses, this multi get (mget) API responds with partial // results if one or more shards fail. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html + // + // **Filter source fields** + // + // By default, the `_source` field is returned for every document (if stored). + // Use the `_source` and `_source_include` or `source_exclude` attributes to + // filter what fields are returned for a particular document. + // You can include the `_source`, `_source_includes`, and `_source_excludes` + // query parameters in the request URI to specify the defaults to use when there + // are no per-document instructions. + // + // **Get stored fields** + // + // Use the `stored_fields` attribute to specify the set of stored fields you + // want to retrieve. + // Any requested fields that are not stored are ignored. + // You can include the `stored_fields` query parameter in the request URI to + // specify the defaults to use when there are no per-document instructions. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget Mget core_mget.NewMget // Run multiple searches. // @@ -1323,19 +2543,43 @@ type Core struct { // Each newline character may be preceded by a carriage return `\r`. // When sending requests to this endpoint the `Content-Type` header should be // set to `application/x-ndjson`. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch Msearch core_msearch.NewMsearch // Run multiple templated searches. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html + // + // Run multiple templated searches with a single request. + // If you are providing a text file or text input to `curl`, use the + // `--data-binary` flag instead of `-d` to preserve newlines. + // For example: + // + // ``` + // $ cat requests + // { "index": "my-index" } + // { "id": "my-search-template", "params": { "query_string": "hello world", + // "from": 0, "size": 10 }} + // { "index": "my-other-index" } + // { "id": "my-other-search-template", "params": { "query_type": "match_all" }} + // + // $ curl -H "Content-Type: application/x-ndjson" -XGET + // localhost:9200/_msearch/template --data-binary "@requests"; echo + // ``` + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template MsearchTemplate core_msearch_template.NewMsearchTemplate // Get multiple term vectors. // + // Get multiple term vectors with a single request. // You can specify existing documents by index and ID or provide artificial // documents in the body of the request. // You can specify the index in the request body or request URI. // The response contains a `docs` array with all the fetched termvectors. // Each element has the structure provided by the termvectors API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-termvectors.html + // + // **Artificial documents** + // + // You can also use `mtermvectors` to generate term vectors for artificial + // documents provided in the body of the request. + // The mapping used is determined by the specified `_index`. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors Mtermvectors core_mtermvectors.NewMtermvectors // Open a point in time. // @@ -1353,41 +2597,382 @@ type Core struct { // // A point in time must be opened explicitly before being used in search // requests. - // The `keep_alive` parameter tells Elasticsearch how long it should persist. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html + // + // A subsequent search request with the `pit` parameter must not specify + // `index`, `routing`, or `preference` values as these parameters are copied + // from the point in time. + // + // Just like regular searches, you can use `from` and `size` to page through + // point in time search results, up to the first 10,000 hits. + // If you want to retrieve more hits, use PIT with `search_after`. + // + // IMPORTANT: The open point in time request and each subsequent search request + // can return different identifiers; always use the most recently received ID + // for the next search request. + // + // When a PIT that contains shard failures is used in a search request, the + // missing are always reported in the search response as a + // `NoShardAvailableActionException` exception. + // To get rid of these exceptions, a new PIT needs to be created so that shards + // missing from the previous PIT can be handled, assuming they become available + // in the meantime. + // + // **Keeping point in time alive** + // + // The `keep_alive` parameter, which is passed to a open point in time request + // and search request, extends the time to live of the corresponding point in + // time. + // The value does not need to be long enough to process all data — it just needs + // to be long enough for the next request. + // + // Normally, the background merge process optimizes the index by merging + // together smaller segments to create new, bigger segments. + // Once the smaller segments are no longer needed they are deleted. + // However, open point-in-times prevent the old segments from being deleted + // since they are still in use. + // + // TIP: Keeping older segments alive means that more disk space and file handles + // are needed. + // Ensure that you have configured your nodes to have ample free file handles. + // + // Additionally, if a segment contains deleted or updated documents then the + // point in time must keep track of whether each document in the segment was + // live at the time of the initial search request. + // Ensure that your nodes have sufficient heap space if you have many open + // point-in-times on an index that is subject to ongoing deletes or updates. + // Note that a point-in-time doesn't prevent its associated indices from being + // deleted. + // You can check how many point-in-times (that is, search contexts) are open + // with the nodes stats API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time OpenPointInTime core_open_point_in_time.NewOpenPointInTime // Ping the cluster. - // Returns whether the cluster is running. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html + // Get information about whether the cluster is running. + // https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster Ping core_ping.NewPing // Create or update a script or search template. // Creates or updates a stored script or search template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script PutScript core_put_script.NewPutScript // Evaluate ranked search results. // // Evaluate the quality of ranked search results over a set of typical search // queries. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval RankEval core_rank_eval.NewRankEval // Reindex documents. - // Copies documents from a source to a destination. The source can be any - // existing index, alias, or data stream. The destination must differ from the - // source. For example, you cannot reindex a data stream into itself. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html + // + // Copy documents from a source to a destination. + // You can copy all documents to the destination index or reindex a subset of + // the documents. + // The source can be any existing index, alias, or data stream. + // The destination must differ from the source. + // For example, you cannot reindex a data stream into itself. + // + // IMPORTANT: Reindex requires `_source` to be enabled for all documents in the + // source. + // The destination should be configured as wanted before calling the reindex + // API. + // Reindex does not copy the settings from the source or its associated + // template. + // Mappings, shard counts, and replicas, for example, must be configured ahead + // of time. + // + // If the Elasticsearch security features are enabled, you must have the + // following security privileges: + // + // * The `read` index privilege for the source data stream, index, or alias. + // * The `write` index privilege for the destination data stream, index, or + // index alias. + // * To automatically create a data stream or index with a reindex API request, + // you must have the `auto_configure`, `create_index`, or `manage` index + // privilege for the destination data stream, index, or alias. + // * If reindexing from a remote cluster, the `source.remote.user` must have the + // `monitor` cluster privilege and the `read` index privilege for the source + // data stream, index, or alias. + // + // If reindexing from a remote cluster, you must explicitly allow the remote + // host in the `reindex.remote.whitelist` setting. + // Automatic data stream creation requires a matching index template with data + // stream enabled. + // + // The `dest` element can be configured like the index API to control optimistic + // concurrency control. + // Omitting `version_type` or setting it to `internal` causes Elasticsearch to + // blindly dump documents into the destination, overwriting any that happen to + // have the same ID. + // + // Setting `version_type` to `external` causes Elasticsearch to preserve the + // `version` from the source, create any documents that are missing, and update + // any documents that have an older version in the destination than they do in + // the source. + // + // Setting `op_type` to `create` causes the reindex API to create only missing + // documents in the destination. + // All existing documents will cause a version conflict. + // + // IMPORTANT: Because data streams are append-only, any reindex request to a + // destination data stream must have an `op_type` of `create`. + // A reindex can only add new documents to a destination data stream. + // It cannot update existing documents in a destination data stream. + // + // By default, version conflicts abort the reindex process. + // To continue reindexing if there are conflicts, set the `conflicts` request + // body property to `proceed`. + // In this case, the response includes a count of the version conflicts that + // were encountered. + // Note that the handling of other error types is unaffected by the `conflicts` + // property. + // Additionally, if you opt to count version conflicts, the operation could + // attempt to reindex more documents from the source than `max_docs` until it + // has successfully indexed `max_docs` documents into the target or it has gone + // through every document in the source query. + // + // NOTE: The reindex API makes no effort to handle ID collisions. + // The last document written will "win" but the order isn't usually predictable + // so it is not a good idea to rely on this behavior. + // Instead, make sure that IDs are unique by using a script. + // + // **Running reindex asynchronously** + // + // If the request contains `wait_for_completion=false`, Elasticsearch performs + // some preflight checks, launches the request, and returns a task you can use + // to cancel or get the status of the task. + // Elasticsearch creates a record of this task as a document at + // `_tasks/`. + // + // **Reindex from multiple sources** + // + // If you have many sources to reindex it is generally better to reindex them + // one at a time rather than using a glob pattern to pick up multiple sources. + // That way you can resume the process if there are any errors by removing the + // partially completed source and starting over. + // It also makes parallelizing the process fairly simple: split the list of + // sources to reindex and run each list in parallel. + // + // For example, you can use a bash script like this: + // + // ``` + // for index in i1 i2 i3 i4 i5; do + // curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty + // -d'{ + // "source": { + // "index": "'$index'" + // }, + // "dest": { + // "index": "'$index'-reindexed" + // } + // }' + // done + // ``` + // + // **Throttling** + // + // Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, + // for example) to throttle the rate at which reindex issues batches of index + // operations. + // Requests are throttled by padding each batch with a wait time. + // To turn off throttling, set `requests_per_second` to `-1`. + // + // The throttling is done by waiting between batches so that the scroll that + // reindex uses internally can be given a timeout that takes into account the + // padding. + // The padding time is the difference between the batch size divided by the + // `requests_per_second` and the time spent writing. + // By default the batch size is `1000`, so if `requests_per_second` is set to + // `500`: + // + // ``` + // target_time = 1000 / 500 per second = 2 seconds + // wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds + // ``` + // + // Since the batch is issued as a single bulk request, large batch sizes cause + // Elasticsearch to create many requests and then wait for a while before + // starting the next set. + // This is "bursty" instead of "smooth". + // + // **Slicing** + // + // Reindex supports sliced scroll to parallelize the reindexing process. + // This parallelization can improve efficiency and provide a convenient way to + // break the request down into smaller parts. + // + // NOTE: Reindexing from remote clusters does not support manual or automatic + // slicing. + // + // You can slice a reindex request manually by providing a slice ID and total + // number of slices to each request. + // You can also let reindex automatically parallelize by using sliced scroll to + // slice on `_id`. + // The `slices` parameter specifies the number of slices to use. + // + // Adding `slices` to the reindex request just automates the manual process, + // creating sub-requests which means it has some quirks: + // + // * You can see these requests in the tasks API. These sub-requests are "child" + // tasks of the task for the request with slices. + // * Fetching the status of the task for the request with `slices` only contains + // the status of completed slices. + // * These sub-requests are individually addressable for things like + // cancellation and rethrottling. + // * Rethrottling the request with `slices` will rethrottle the unfinished + // sub-request proportionally. + // * Canceling the request with `slices` will cancel each sub-request. + // * Due to the nature of `slices`, each sub-request won't get a perfectly even + // portion of the documents. All documents will be addressed, but some slices + // may be larger than others. Expect larger slices to have a more even + // distribution. + // * Parameters like `requests_per_second` and `max_docs` on a request with + // `slices` are distributed proportionally to each sub-request. Combine that + // with the previous point about distribution being uneven and you should + // conclude that using `max_docs` with `slices` might not result in exactly + // `max_docs` documents being reindexed. + // * Each sub-request gets a slightly different snapshot of the source, though + // these are all taken at approximately the same time. + // + // If slicing automatically, setting `slices` to `auto` will choose a reasonable + // number for most indices. + // If slicing manually or otherwise tuning automatic slicing, use the following + // guidelines. + // + // Query performance is most efficient when the number of slices is equal to the + // number of shards in the index. + // If that number is large (for example, `500`), choose a lower number as too + // many slices will hurt performance. + // Setting slices higher than the number of shards generally does not improve + // efficiency and adds overhead. + // + // Indexing performance scales linearly across available resources with the + // number of slices. + // + // Whether query or indexing performance dominates the runtime depends on the + // documents being reindexed and cluster resources. + // + // **Modify documents during reindexing** + // + // Like `_update_by_query`, reindex operations support a script that modifies + // the document. + // Unlike `_update_by_query`, the script is allowed to modify the document's + // metadata. + // + // Just as in `_update_by_query`, you can set `ctx.op` to change the operation + // that is run on the destination. + // For example, set `ctx.op` to `noop` if your script decides that the document + // doesn’t have to be indexed in the destination. This "no operation" will be + // reported in the `noop` counter in the response body. + // Set `ctx.op` to `delete` if your script decides that the document must be + // deleted from the destination. + // The deletion will be reported in the `deleted` counter in the response body. + // Setting `ctx.op` to anything else will return an error, as will setting any + // other field in `ctx`. + // + // Think of the possibilities! Just be careful; you are able to change: + // + // * `_id` + // * `_index` + // * `_version` + // * `_routing` + // + // Setting `_version` to `null` or clearing it from the `ctx` map is just like + // not sending the version in an indexing request. + // It will cause the document to be overwritten in the destination regardless of + // the version on the target or the version type you use in the reindex API. + // + // **Reindex from remote** + // + // Reindex supports reindexing from a remote Elasticsearch cluster. + // The `host` parameter must contain a scheme, host, port, and optional path. + // The `username` and `password` parameters are optional and when they are + // present the reindex operation will connect to the remote Elasticsearch node + // using basic authentication. + // Be sure to use HTTPS when using basic authentication or the password will be + // sent in plain text. + // There are a range of settings available to configure the behavior of the + // HTTPS connection. + // + // When using Elastic Cloud, it is also possible to authenticate against the + // remote cluster through the use of a valid API key. + // Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` + // setting. + // It can be set to a comma delimited list of allowed remote host and port + // combinations. + // Scheme is ignored; only the host and port are used. + // For example: + // + // ``` + // reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, + // localhost:*"] + // ``` + // + // The list of allowed hosts must be configured on any nodes that will + // coordinate the reindex. + // This feature should work with remote clusters of any version of + // Elasticsearch. + // This should enable you to upgrade from any version of Elasticsearch to the + // current version by reindexing from a cluster of the old version. + // + // WARNING: Elasticsearch does not support forward compatibility across major + // versions. + // For example, you cannot reindex from a 7.x cluster into a 6.x cluster. + // + // To enable queries sent to older versions of Elasticsearch, the `query` + // parameter is sent directly to the remote host without validation or + // modification. + // + // NOTE: Reindexing from remote clusters does not support manual or automatic + // slicing. + // + // Reindexing from a remote server uses an on-heap buffer that defaults to a + // maximum size of 100mb. + // If the remote index includes very large documents you'll need to use a + // smaller batch size. + // It is also possible to set the socket read timeout on the remote connection + // with the `socket_timeout` field and the connection timeout with the + // `connect_timeout` field. + // Both default to 30 seconds. + // + // **Configuring SSL parameters** + // + // Reindex from remote supports configurable SSL settings. + // These must be specified in the `elasticsearch.yml` file, with the exception + // of the secure settings, which you add in the Elasticsearch keystore. + // It is not possible to configure SSL in the body of the reindex request. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex Reindex core_reindex.NewReindex // Throttle a reindex operation. // // Change the number of requests per second for a particular reindex operation. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html + // For example: + // + // ``` + // POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 + // ``` + // + // Rethrottling that speeds up the query takes effect immediately. + // Rethrottling that slows down the query will take effect after completing the + // current batch. + // This behavior prevents scroll timeouts. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex ReindexRethrottle core_reindex_rethrottle.NewReindexRethrottle // Render a search template. // // Render a search template as a search request body. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/render-search-template-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template RenderSearchTemplate core_render_search_template.NewRenderSearchTemplate // Run a script. + // // Runs a script and returns a result. + // Use this API to build and test scripts, such as when defining a script for a + // runtime field. + // This API requires very few dependencies and is especially useful if you don't + // have permissions to write documents on a cluster. + // + // The API uses several _contexts_, which control how scripts are run, what + // variables are available at runtime, and what the return type is. + // + // Each context requires a script, but additional parameters depend on the + // context you're using for that script. // https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html ScriptsPainlessExecute core_scripts_painless_execute.NewScriptsPainlessExecute // Run a scrolling search. @@ -1416,7 +3001,7 @@ type Core struct { // IMPORTANT: Results from a scrolling search reflect the state of the index at // the time of the initial search request. Subsequent indexing or document // changes only affect later search and scroll requests. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html#request-body-search-scroll + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll Scroll core_scroll.NewScroll // Run a search. // @@ -1424,58 +3009,451 @@ type Core struct { // You can provide search queries using the `q` query string parameter or the // request body. // If both are specified, only the query parameter is used. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html + // + // If the Elasticsearch security features are enabled, you must have the read + // index privilege for the target data stream, index, or alias. For + // cross-cluster search, refer to the documentation about configuring CCS + // privileges. + // To search a point in time (PIT) for an alias, you must have the `read` index + // privilege for the alias's data streams or indices. + // + // **Search slicing** + // + // When paging through a large number of documents, it can be helpful to split + // the search into multiple slices to consume them independently with the + // `slice` and `pit` properties. + // By default the splitting is done first on the shards, then locally on each + // shard. + // The local splitting partitions the shard into contiguous ranges based on + // Lucene document IDs. + // + // For instance if the number of shards is equal to 2 and you request 4 slices, + // the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are + // assigned to the second shard. + // + // IMPORTANT: The same point-in-time ID should be used for all slices. + // If different PIT IDs are used, slices can overlap and miss documents. + // This situation can occur because the splitting criterion is based on Lucene + // document IDs, which are not stable across changes to the index. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search Search core_search.NewSearch // Search a vector tile. // // Search a vector tile for geospatial values. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-vector-tile-api.html + // Before using this API, you should be familiar with the Mapbox vector tile + // specification. + // The API returns results as a binary mapbox vector tile. + // + // Internally, Elasticsearch translates a vector tile search API request into a + // search containing: + // + // * A `geo_bounding_box` query on the ``. The query uses the + // `//` tile as a bounding box. + // * A `geotile_grid` or `geohex_grid` aggregation on the ``. The + // `grid_agg` parameter determines the aggregation type. The aggregation uses + // the `//` tile as a bounding box. + // * Optionally, a `geo_bounds` aggregation on the ``. The search only + // includes this aggregation if the `exact_bounds` parameter is `true`. + // * If the optional parameter `with_labels` is `true`, the internal search will + // include a dynamic runtime field that calls the `getLabelPosition` function of + // the geometry doc value. This enables the generation of new point features + // containing suggested geometry labels, so that, for example, multi-polygons + // will have only one label. + // + // For example, Elasticsearch may translate a vector tile search API request + // with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of + // `true` into the following search + // + // ``` + // GET my-index/_search + // { + // "size": 10000, + // "query": { + // "geo_bounding_box": { + // "my-geo-field": { + // "top_left": { + // "lat": -40.979898069620134, + // "lon": -45 + // }, + // "bottom_right": { + // "lat": -66.51326044311186, + // "lon": 0 + // } + // } + // } + // }, + // "aggregations": { + // "grid": { + // "geotile_grid": { + // "field": "my-geo-field", + // "precision": 11, + // "size": 65536, + // "bounds": { + // "top_left": { + // "lat": -40.979898069620134, + // "lon": -45 + // }, + // "bottom_right": { + // "lat": -66.51326044311186, + // "lon": 0 + // } + // } + // } + // }, + // "bounds": { + // "geo_bounds": { + // "field": "my-geo-field", + // "wrap_longitude": false + // } + // } + // } + // } + // ``` + // + // The API returns results as a binary Mapbox vector tile. + // Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the + // tile contains three layers: + // + // * A `hits` layer containing a feature for each `` value matching the + // `geo_bounding_box` query. + // * An `aggs` layer containing a feature for each cell of the `geotile_grid` or + // `geohex_grid`. The layer only contains features for cells with matching data. + // * A meta layer containing: + // * A feature containing a bounding box. By default, this is the bounding box + // of the tile. + // * Value ranges for any sub-aggregations on the `geotile_grid` or + // `geohex_grid`. + // * Metadata for the search. + // + // The API only returns features that can display at its zoom level. + // For example, if a polygon feature has no area at its zoom level, the API + // omits it. + // The API returns errors as UTF-8 encoded JSON. + // + // IMPORTANT: You can specify several options for this API as either a query + // parameter or request body parameter. + // If you specify both parameters, the query parameter takes precedence. + // + // **Grid precision for geotile** + // + // For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles + // for lower zoom levels. + // `grid_precision` represents the additional zoom levels available through + // these cells. The final precision is computed by as follows: ` + + // grid_precision`. + // For example, if `` is 7 and `grid_precision` is 8, then the + // `geotile_grid` aggregation will use a precision of 15. + // The maximum final precision is 29. + // The `grid_precision` also determines the number of cells for the grid as + // follows: `(2^grid_precision) x (2^grid_precision)`. + // For example, a value of 8 divides the tile into a grid of 256 x 256 cells. + // The `aggs` layer only contains features for cells with matching data. + // + // **Grid precision for geohex** + // + // For a `grid_agg` of `geohex`, Elasticsearch uses `` and + // `grid_precision` to calculate a final precision as follows: ` + + // grid_precision`. + // + // This precision determines the H3 resolution of the hexagonal cells produced + // by the `geohex` aggregation. + // The following table maps the H3 resolution for each precision. + // For example, if `` is 3 and `grid_precision` is 3, the precision is 6. + // At a precision of 6, hexagonal cells have an H3 resolution of 2. + // If `` is 3 and `grid_precision` is 4, the precision is 7. + // At a precision of 7, hexagonal cells have an H3 resolution of 3. + // + // | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | + // | --------- | ---------------- | ------------- | ----------------| ----- | + // | 1 | 4 | 0 | 122 | 30.5 | + // | 2 | 16 | 0 | 122 | 7.625 | + // | 3 | 64 | 1 | 842 | 13.15625 | + // | 4 | 256 | 1 | 842 | 3.2890625 | + // | 5 | 1024 | 2 | 5882 | 5.744140625 | + // | 6 | 4096 | 2 | 5882 | 1.436035156 | + // | 7 | 16384 | 3 | 41162 | 2.512329102 | + // | 8 | 65536 | 3 | 41162 | 0.6280822754 | + // | 9 | 262144 | 4 | 288122 | 1.099098206 | + // | 10 | 1048576 | 4 | 288122 | 0.2747745514 | + // | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | + // | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | + // | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | + // | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | + // | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | + // | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | + // | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | + // | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | + // | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | + // | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | + // | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | + // | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | + // | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | + // | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | + // | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | + // | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | + // | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | + // | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | + // | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | + // + // Hexagonal cells don't align perfectly on a vector tile. + // Some cells may intersect more than one vector tile. + // To compute the H3 resolution for each precision, Elasticsearch compares the + // average density of hexagonal bins at each resolution with the average density + // of tile bins at each zoom level. + // Elasticsearch uses the H3 resolution that is closest to the corresponding + // geotile density. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt SearchMvt core_search_mvt.NewSearchMvt // Get the search shards. // // Get the indices and shards that a search request would be run against. // This information can be useful for working out issues or planning // optimizations with routing and shard preferences. - // When filtered aliases are used, the filter is returned as part of the indices - // section. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-shards.html + // When filtered aliases are used, the filter is returned as part of the + // `indices` section. + // + // If the Elasticsearch security features are enabled, you must have the + // `view_index_metadata` or `manage` index privilege for the target data stream, + // index, or alias. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards SearchShards core_search_shards.NewSearchShards // Run a search with a search template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template SearchTemplate core_search_template.NewSearchTemplate // Get terms in an index. // // Discover terms that match a partial string in an index. - // This "terms enum" API is designed for low-latency look-ups used in - // auto-complete scenarios. - // - // If the `complete` property in the response is false, the returned terms set - // may be incomplete and should be treated as approximate. - // This can occur due to a few reasons, such as a request timeout or a node - // error. + // This API is designed for low-latency look-ups used in auto-complete + // scenarios. // - // NOTE: The terms enum API may return terms from deleted documents. Deleted + // > info + // > The terms enum API may return terms from deleted documents. Deleted // documents are initially only marked as deleted. It is not until their // segments are merged that documents are actually deleted. Until that happens, // the terms enum API will return terms from these documents. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-terms-enum.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum TermsEnum core_terms_enum.NewTermsEnum // Get term vector information. // // Get information and statistics about terms in the fields of a particular // document. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-termvectors.html + // + // You can retrieve term vectors for documents stored in the index or for + // artificial documents passed in the body of the request. + // You can specify the fields you are interested in through the `fields` + // parameter or by adding the fields to the request body. + // For example: + // + // ``` + // GET /my-index-000001/_termvectors/1?fields=message + // ``` + // + // Fields can be specified using wildcards, similar to the multi match query. + // + // Term vectors are real-time by default, not near real-time. + // This can be changed by setting `realtime` parameter to `false`. + // + // You can request three types of values: _term information_, _term statistics_, + // and _field statistics_. + // By default, all term information and field statistics are returned for all + // fields but term statistics are excluded. + // + // **Term information** + // + // * term frequency in the field (always returned) + // * term positions (`positions: true`) + // * start and end offsets (`offsets: true`) + // * term payloads (`payloads: true`), as base64 encoded bytes + // + // If the requested information wasn't stored in the index, it will be computed + // on the fly if possible. + // Additionally, term vectors could be computed for documents not even existing + // in the index, but instead provided by the user. + // + // > warn + // > Start and end offsets assume UTF-16 encoding is being used. If you want to + // use these offsets in order to get the original text that produced this token, + // you should make sure that the string you are taking a sub-string of is also + // encoded using UTF-16. + // + // **Behaviour** + // + // The term and field statistics are not accurate. + // Deleted documents are not taken into account. + // The information is only retrieved for the shard the requested document + // resides in. + // The term and field statistics are therefore only useful as relative measures + // whereas the absolute numbers have no meaning in this context. + // By default, when requesting term vectors of artificial documents, a shard to + // get the statistics from is randomly selected. + // Use `routing` only to hit a particular shard. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors Termvectors core_termvectors.NewTermvectors // Update a document. - // Updates a document by running a script or passing a partial document. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html + // + // Update a document by running a script or passing a partial document. + // + // If the Elasticsearch security features are enabled, you must have the `index` + // or `write` index privilege for the target index or index alias. + // + // The script can update, delete, or skip modifying the document. + // The API also supports passing a partial document, which is merged into the + // existing document. + // To fully replace an existing document, use the index API. + // This operation: + // + // * Gets the document (collocated with the shard) from the index. + // * Runs the specified script. + // * Indexes the result. + // + // The document must still be reindexed, but using this API removes some network + // roundtrips and reduces chances of version conflicts between the GET and the + // index operation. + // + // The `_source` field must be enabled to use this API. + // In addition to `_source`, you can access the following variables through the + // `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the + // current timestamp). + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update Update core_update.NewUpdate // Update documents. // Updates documents that match the specified query. // If no query is specified, performs an update on every document in the data // stream or index without modifying the source, which is useful for picking up // mapping changes. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or alias: + // + // * `read` + // * `index` or `write` + // + // You can specify the query criteria in the request URI or the request body + // using the same syntax as the search API. + // + // When you submit an update by query request, Elasticsearch gets a snapshot of + // the data stream or index when it begins processing the request and updates + // matching documents using internal versioning. + // When the versions match, the document is updated and the version number is + // incremented. + // If a document changes between the time that the snapshot is taken and the + // update operation is processed, it results in a version conflict and the + // operation fails. + // You can opt to count version conflicts instead of halting and returning by + // setting `conflicts` to `proceed`. + // Note that if you opt to count version conflicts, the operation could attempt + // to update more documents from the source than `max_docs` until it has + // successfully updated `max_docs` documents or it has gone through every + // document in the source query. + // + // NOTE: Documents with a version equal to 0 cannot be updated using update by + // query because internal versioning does not support 0 as a valid version + // number. + // + // While processing an update by query request, Elasticsearch performs multiple + // search requests sequentially to find all of the matching documents. + // A bulk update request is performed for each batch of matching documents. + // Any query or update failures cause the update by query request to fail and + // the failures are shown in the response. + // Any update requests that completed successfully still stick, they are not + // rolled back. + // + // **Throttling update requests** + // + // To control the rate at which update by query issues batches of update + // operations, you can set `requests_per_second` to any positive decimal number. + // This pads each batch with a wait time to throttle the rate. + // Set `requests_per_second` to `-1` to turn off throttling. + // + // Throttling uses a wait time between batches so that the internal scroll + // requests can be given a timeout that takes the request padding into account. + // The padding time is the difference between the batch size divided by the + // `requests_per_second` and the time spent writing. + // By default the batch size is 1000, so if `requests_per_second` is set to + // `500`: + // + // ``` + // target_time = 1000 / 500 per second = 2 seconds + // wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds + // ``` + // + // Since the batch is issued as a single _bulk request, large batch sizes cause + // Elasticsearch to create many requests and wait before starting the next set. + // This is "bursty" instead of "smooth". + // + // **Slicing** + // + // Update by query supports sliced scroll to parallelize the update process. + // This can improve efficiency and provide a convenient way to break the request + // down into smaller parts. + // + // Setting `slices` to `auto` chooses a reasonable number for most data streams + // and indices. + // This setting will use one slice per shard, up to a certain limit. + // If there are multiple source data streams or indices, it will choose the + // number of slices based on the index or backing index with the smallest number + // of shards. + // + // Adding `slices` to `_update_by_query` just automates the manual process of + // creating sub-requests, which means it has some quirks: + // + // * You can see these requests in the tasks APIs. These sub-requests are + // "child" tasks of the task for the request with slices. + // * Fetching the status of the task for the request with `slices` only contains + // the status of completed slices. + // * These sub-requests are individually addressable for things like + // cancellation and rethrottling. + // * Rethrottling the request with `slices` will rethrottle the unfinished + // sub-request proportionally. + // * Canceling the request with slices will cancel each sub-request. + // * Due to the nature of slices each sub-request won't get a perfectly even + // portion of the documents. All documents will be addressed, but some slices + // may be larger than others. Expect larger slices to have a more even + // distribution. + // * Parameters like `requests_per_second` and `max_docs` on a request with + // slices are distributed proportionally to each sub-request. Combine that with + // the point above about distribution being uneven and you should conclude that + // using `max_docs` with `slices` might not result in exactly `max_docs` + // documents being updated. + // * Each sub-request gets a slightly different snapshot of the source data + // stream or index though these are all taken at approximately the same time. + // + // If you're slicing manually or otherwise tuning automatic slicing, keep in + // mind that: + // + // * Query performance is most efficient when the number of slices is equal to + // the number of shards in the index or backing index. If that number is large + // (for example, 500), choose a lower number as too many slices hurts + // performance. Setting slices higher than the number of shards generally does + // not improve efficiency and adds overhead. + // * Update performance scales linearly across available resources with the + // number of slices. + // + // Whether query or update performance dominates the runtime depends on the + // documents being reindexed and cluster resources. + // + // **Update the document source** + // + // Update by query supports scripts to update the document source. + // As with the update API, you can set `ctx.op` to change the operation that is + // performed. + // + // Set `ctx.op = "noop"` if your script decides that it doesn't have to make any + // changes. + // The update by query operation skips updating the document and increments the + // `noop` counter. + // + // Set `ctx.op = "delete"` if your script decides that the document should be + // deleted. + // The update by query operation deletes the document and increments the + // `deleted` counter. + // + // Update by query supports only `index`, `noop`, and `delete`. + // Setting `ctx.op` to anything else is an error. + // Setting any other field in `ctx` is an error. + // This API enables you to only modify the source of matching documents; you + // cannot move them. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query UpdateByQuery core_update_by_query.NewUpdateByQuery // Throttle an update by query operation. // @@ -1484,19 +3462,18 @@ type Core struct { // Rethrottling that speeds up the query takes effect immediately but // rethrotting that slows down the query takes effect after completing the // current batch to prevent scroll timeouts. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle UpdateByQueryRethrottle core_update_by_query_rethrottle.NewUpdateByQueryRethrottle } type DanglingIndices struct { // Delete a dangling index. - // // If Elasticsearch encounters index data that is absent from the current // cluster state, those indices are considered to be dangling. // For example, this can happen if you delete more than // `cluster.indices.tombstones.size` indices while an Elasticsearch node is // offline. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index DeleteDanglingIndex dangling_indices_delete_dangling_index.NewDeleteDanglingIndex // Import a dangling index. // @@ -1505,7 +3482,7 @@ type DanglingIndices struct { // For example, this can happen if you delete more than // `cluster.indices.tombstones.size` indices while an Elasticsearch node is // offline. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index ImportDanglingIndex dangling_indices_import_dangling_index.NewImportDanglingIndex // Get the dangling indices. // @@ -1516,201 +3493,518 @@ type DanglingIndices struct { // offline. // // Use this API to list dangling indices, which you can then import or delete. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices ListDanglingIndices dangling_indices_list_dangling_indices.NewListDanglingIndices } type Enrich struct { // Delete an enrich policy. // Deletes an existing enrich policy and its enrich index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-enrich-policy-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy DeletePolicy enrich_delete_policy.NewDeletePolicy - // Creates the enrich index for an existing enrich policy. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/execute-enrich-policy-api.html + // Run an enrich policy. + // Create the enrich index for an existing enrich policy. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy ExecutePolicy enrich_execute_policy.NewExecutePolicy // Get an enrich policy. // Returns information about an enrich policy. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-enrich-policy-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy GetPolicy enrich_get_policy.NewGetPolicy // Create an enrich policy. // Creates an enrich policy. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-enrich-policy-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy PutPolicy enrich_put_policy.NewPutPolicy // Get enrich stats. // Returns enrich coordinator statistics and information about enrich policies // that are currently executing. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/enrich-stats-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats Stats enrich_stats.NewStats } type Eql struct { - // Deletes an async EQL search or a stored synchronous EQL search. + // Delete an async EQL search. + // Delete an async EQL search or a stored synchronous EQL search. // The API also deletes results for the search. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete Delete eql_delete.NewDelete - // Returns the current status and available results for an async EQL search or a + // Get async EQL search results. + // Get the current status and available results for an async EQL search or a // stored synchronous EQL search. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-eql-search-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get Get eql_get.NewGet - // Returns the current status for an async EQL search or a stored synchronous - // EQL search without returning results. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-eql-status-api.html + // Get the async EQL status. + // Get the current status for an async EQL search or a stored synchronous EQL + // search without returning results. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status GetStatus eql_get_status.NewGetStatus - // Returns results matching a query expressed in Event Query Language (EQL) - // https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html + // Get EQL search results. + // Returns search results for an Event Query Language (EQL) query. + // EQL assumes each document in a data stream or index corresponds to an event. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search Search eql_search.NewSearch } type Esql struct { - // Executes an ESQL request asynchronously - // https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-async-query-api.html + // Run an async ES|QL query. + // Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its + // progress, and retrieve results when they become available. + // + // The API accepts the same parameters and request body as the synchronous query + // API, along with additional async related properties. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query AsyncQuery esql_async_query.NewAsyncQuery - // Executes an ES|QL request + // Delete an async ES|QL query. + // If the query is still running, it is cancelled. + // Otherwise, the stored results are deleted. + // + // If the Elasticsearch security features are enabled, only the following users + // can use this API to delete a query: + // + // * The authenticated user that submitted the original query request + // * Users with the `cancel_task` cluster privilege + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete + AsyncQueryDelete esql_async_query_delete.NewAsyncQueryDelete + // Get async ES|QL query results. + // Get the current status and available results or stored results for an ES|QL + // asynchronous query. + // If the Elasticsearch security features are enabled, only the user who first + // submitted the ES|QL query can retrieve the results using this API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get + AsyncQueryGet esql_async_query_get.NewAsyncQueryGet + // Stop async ES|QL query. + // + // This API interrupts the query execution and returns the results so far. + // If the Elasticsearch security features are enabled, only the user who first + // submitted the ES|QL query can stop it. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-async-query-stop-api.html + AsyncQueryStop esql_async_query_stop.NewAsyncQueryStop + // Run an ES|QL query. + // Get search results for an ES|QL (Elasticsearch query language) query. // https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-rest.html Query esql_query.NewQuery } type Features struct { - // Gets a list of features which can be included in snapshots using the - // feature_states field when creating a snapshot - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-features-api.html + // Get the features. + // Get a list of features that can be included in snapshots using the + // `feature_states` field when creating a snapshot. + // You can use this API to determine which feature states to include when taking + // a snapshot. + // By default, all feature states are included in a snapshot if that snapshot + // includes the global state, or none if it does not. + // + // A feature state includes one or more system indices necessary for a given + // feature to function. + // In order to ensure data integrity, all system indices that comprise a feature + // state are snapshotted and restored together. + // + // The features listed by this API are a combination of built-in features and + // features defined by plugins. + // In order for a feature state to be listed in this API and recognized as a + // valid feature state by the create snapshot API, the plugin that defines that + // feature must be installed on the master node. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features GetFeatures features_get_features.NewGetFeatures - // Resets the internal state of features, usually by deleting system indices - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Reset the features. + // Clear all of the state information stored in system indices by Elasticsearch + // features, including the security and machine learning indices. + // + // WARNING: Intended for development and testing use only. Do not reset features + // on a production cluster. + // + // Return a cluster to the same state as a new installation by resetting the + // feature state for all Elasticsearch features. + // This deletes all state information stored in system indices. + // + // The response code is HTTP 200 if the state is successfully reset for all + // features. + // It is HTTP 500 if the reset operation failed for any feature. + // + // Note that select features might provide a way to reset particular system + // indices. + // Using this API resets all features, both those that are built-in and + // implemented as plugins. + // + // To list the features that will be affected, use the get features API. + // + // IMPORTANT: The features installed on the node you submit this request to are + // the features that will be reset. Run on the master node if you have any + // doubts about which plugins are installed on individual nodes. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features ResetFeatures features_reset_features.NewResetFeatures } type Fleet struct { - // Returns the current global checkpoints for an index. This API is design for - // internal use by the fleet server project. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-global-checkpoints.html - GlobalCheckpoints fleet_global_checkpoints.NewGlobalCheckpoints - // Executes several [fleet - // searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) - // with a single API request. - // The API follows the same structure as the [multi - // search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) - // API. However, similar to the fleet search API, it - // supports the wait_for_checkpoints parameter. + // Get global checkpoints. // + // Get the current global checkpoints for an index. + // This API is designed for internal use by the Fleet server project. + // https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet + GlobalCheckpoints fleet_global_checkpoints.NewGlobalCheckpoints + // Run multiple Fleet searches. + // Run several Fleet searches with a single API request. + // The API follows the same structure as the multi search API. + // However, similar to the Fleet search API, it supports the + // `wait_for_checkpoints` parameter. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-msearch Msearch fleet_msearch.NewMsearch // Creates a secret stored by Fleet. // PostSecret fleet_post_secret.NewPostSecret - // The purpose of the fleet search api is to provide a search api where the - // search will only be executed - // after provided checkpoint has been processed and is visible for searches + // Run a Fleet search. + // The purpose of the Fleet search API is to provide an API where the search + // will be run only + // after the provided checkpoint has been processed and is visible for searches // inside of Elasticsearch. - // + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-search Search fleet_search.NewSearch } type Graph struct { - // Extracts and summarizes information about the documents and terms in an + // Explore graph analytics. + // Extract and summarize information about the documents and terms in an // Elasticsearch data stream or index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html + // The easiest way to understand the behavior of this API is to use the Graph UI + // to explore connections. + // An initial request to the `_explore` API contains a seed query that + // identifies the documents of interest and specifies the fields that define the + // vertices and connections you want to include in the graph. + // Subsequent requests enable you to spider out from one more vertices of + // interest. + // You can exclude vertices that have already been returned. + // https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph Explore graph_explore.NewExplore } type Ilm struct { - // Deletes the specified lifecycle policy definition. You cannot delete policies - // that are currently in use. If the policy is being used to manage any indices, - // the request fails and returns an error. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete-lifecycle.html + // Delete a lifecycle policy. + // You cannot delete policies that are currently in use. If the policy is being + // used to manage any indices, the request fails and returns an error. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle DeleteLifecycle ilm_delete_lifecycle.NewDeleteLifecycle - // Retrieves information about the index’s current lifecycle state, such as the - // currently executing phase, action, and step. Shows when the index entered - // each one, the definition of the running phase, and information about any - // failures. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-explain-lifecycle.html + // Explain the lifecycle state. + // Get the current lifecycle status for one or more indices. + // For data streams, the API retrieves the current lifecycle status for the + // stream's backing indices. + // + // The response indicates when the index entered each lifecycle state, provides + // the definition of the running phase, and information about any failures. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle ExplainLifecycle ilm_explain_lifecycle.NewExplainLifecycle - // Retrieves a lifecycle policy. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-lifecycle.html + // Get lifecycle policies. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle GetLifecycle ilm_get_lifecycle.NewGetLifecycle - // Retrieves the current index lifecycle management (ILM) status. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-status.html + // Get the ILM status. + // + // Get the current index lifecycle management status. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status GetStatus ilm_get_status.NewGetStatus - // Switches the indices, ILM policies, and legacy, composable and component - // templates from using custom node attributes and - // attribute-based allocation filters to using data tiers, and optionally - // deletes one legacy index template.+ + // Migrate to data tiers routing. + // Switch the indices, ILM policies, and legacy, composable, and component + // templates from using custom node attributes and attribute-based allocation + // filters to using data tiers. + // Optionally, delete one legacy index template. // Using node roles enables ILM to automatically move the indices between data // tiers. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate-to-data-tiers.html + // + // Migrating away from custom node attributes routing can be manually performed. + // This API provides an automated way of performing three out of the four manual + // steps listed in the migration guide: + // + // 1. Stop setting the custom hot attribute on new indices. + // 1. Remove custom allocation settings from existing ILM policies. + // 1. Replace custom allocation settings from existing indices with the + // corresponding tier preference. + // + // ILM must be stopped before performing the migration. + // Use the stop ILM and get ILM status APIs to wait until the reported operation + // mode is `STOPPED`. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers MigrateToDataTiers ilm_migrate_to_data_tiers.NewMigrateToDataTiers - // Manually moves an index into the specified step and executes that step. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-move-to-step.html + // Move to a lifecycle step. + // Manually move an index into a specific step in the lifecycle policy and run + // that step. + // + // WARNING: This operation can result in the loss of data. Manually moving an + // index into a specific step runs that step even if it has already been + // performed. This is a potentially destructive action and this should be + // considered an expert level API. + // + // You must specify both the current step and the step to be executed in the + // body of the request. + // The request will fail if the current step does not match the step currently + // running for the index + // This is to prevent the index from being moved from an unexpected step into + // the next step. + // + // When specifying the target (`next_step`) to which the index will be moved, + // either the name or both the action and name fields are optional. + // If only the phase is specified, the index will move to the first step of the + // first action in the target phase. + // If the phase and action are specified, the index will move to the first step + // of the specified action in the specified phase. + // Only actions specified in the ILM policy are considered valid. + // An index cannot move to a step that is not part of its policy. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step MoveToStep ilm_move_to_step.NewMoveToStep - // Creates a lifecycle policy. If the specified policy exists, the policy is - // replaced and the policy version is incremented. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-put-lifecycle.html + // Create or update a lifecycle policy. + // If the specified policy exists, it is replaced and the policy version is + // incremented. + // + // NOTE: Only the latest version of the policy is stored, you cannot revert to + // previous versions. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle PutLifecycle ilm_put_lifecycle.NewPutLifecycle - // Removes the assigned lifecycle policy and stops managing the specified index - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-remove-policy.html + // Remove policies from an index. + // Remove the assigned lifecycle policies from an index or a data stream's + // backing indices. + // It also stops managing the indices. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy RemovePolicy ilm_remove_policy.NewRemovePolicy - // Retries executing the policy for an index that is in the ERROR step. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-retry-policy.html + // Retry a policy. + // Retry running the lifecycle policy for an index that is in the ERROR step. + // The API sets the policy back to the step where the error occurred and runs + // the step. + // Use the explain lifecycle state API to determine whether an index is in the + // ERROR step. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry Retry ilm_retry.NewRetry - // Start the index lifecycle management (ILM) plugin. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-start.html + // Start the ILM plugin. + // Start the index lifecycle management plugin if it is currently stopped. + // ILM is started automatically when the cluster is formed. + // Restarting ILM is necessary only when it has been stopped using the stop ILM + // API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start Start ilm_start.NewStart - // Halts all lifecycle management operations and stops the index lifecycle - // management (ILM) plugin - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-stop.html + // Stop the ILM plugin. + // Halt all lifecycle management operations and stop the index lifecycle + // management plugin. + // This is useful when you are performing maintenance on the cluster and need to + // prevent ILM from performing any actions on your indices. + // + // The API returns as soon as the stop request has been acknowledged, but the + // plugin might continue to run until in-progress operations complete and the + // plugin can be safely stopped. + // Use the get ILM status API to check whether ILM is running. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop Stop ilm_stop.NewStop } type Indices struct { // Add an index block. - // Limits the operations allowed on an index by blocking specific operation - // types. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-blocks.html + // + // Add an index block to an index. + // Index blocks limit the operations allowed on an index by blocking specific + // operation types. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block AddBlock indices_add_block.NewAddBlock // Get tokens from text analysis. - // The analyze API performs - // [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) - // on a text string and returns the resulting tokens. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-analyze.html + // The analyze API performs analysis on a text string and returns the resulting + // tokens. + // + // Generating excessive amount of tokens may cause a node to run out of memory. + // The `index.analyze.max_token_count` setting enables you to limit the number + // of tokens that can be produced. + // If more than this limit of tokens gets generated, an error occurs. + // The `_analyze` endpoint without a specified index will always use `10000` as + // its limit. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze Analyze indices_analyze.NewAnalyze - // Clears the caches of one or more indices. - // For data streams, the API clears the caches of the stream’s backing indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clearcache.html + // Cancel a migration reindex operation. + // + // Cancel a migration reindex attempt for a data stream or index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html + CancelMigrateReindex indices_cancel_migrate_reindex.NewCancelMigrateReindex + // Clear the cache. + // Clear the cache of one or more indices. + // For data streams, the API clears the caches of the stream's backing indices. + // + // By default, the clear cache API clears all caches. + // To clear only specific caches, use the `fielddata`, `query`, or `request` + // parameters. + // To clear the cache only of specific fields, use the `fields` parameter. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache ClearCache indices_clear_cache.NewClearCache - // Clones an existing index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clone-index.html + // Clone an index. + // Clone an existing index into a new index. + // Each original primary shard is cloned into a new primary shard in the new + // index. + // + // IMPORTANT: Elasticsearch does not apply index templates to the resulting + // index. + // The API also does not copy index metadata from the original index. + // Index metadata includes aliases, index lifecycle management phase + // definitions, and cross-cluster replication (CCR) follower information. + // For example, if you clone a CCR follower index, the resulting clone will not + // be a follower index. + // + // The clone API copies most index settings from the source index to the + // resulting index, with the exception of `index.number_of_replicas` and + // `index.auto_expand_replicas`. + // To set the number of replicas in the resulting index, configure these + // settings in the clone request. + // + // Cloning works as follows: + // + // * First, it creates a new target index with the same definition as the source + // index. + // * Then it hard-links segments from the source index into the target index. If + // the file system does not support hard-linking, all segments are copied into + // the new index, which is a much more time consuming process. + // * Finally, it recovers the target index as though it were a closed index + // which had just been re-opened. + // + // IMPORTANT: Indices can only be cloned if they meet the following + // requirements: + // + // * The index must be marked as read-only and have a cluster health status of + // green. + // * The target index must not exist. + // * The source index must have the same number of primary shards as the target + // index. + // * The node handling the clone process must have sufficient free disk space to + // accommodate a second copy of the existing index. + // + // The current write index on a data stream cannot be cloned. + // In order to clone the current write index, the data stream must first be + // rolled over so that a new write index is created and then the previous write + // index can be cloned. + // + // NOTE: Mappings cannot be specified in the `_clone` request. The mappings of + // the source index will be used for the target index. + // + // **Monitor the cloning process** + // + // The cloning process can be monitored with the cat recovery API or the cluster + // health API can be used to wait until all primary shards have been allocated + // by setting the `wait_for_status` parameter to `yellow`. + // + // The `_clone` API returns as soon as the target index has been added to the + // cluster state, before any shards have been allocated. + // At this point, all shards are in the state unassigned. + // If, for any reason, the target index can't be allocated, its primary shard + // will remain unassigned until it can be allocated on that node. + // + // Once the primary shard is allocated, it moves to state initializing, and the + // clone process begins. + // When the clone operation completes, the shard will become active. + // At that point, Elasticsearch will try to allocate any replicas and may decide + // to relocate the primary shard to another node. + // + // **Wait for active shards** + // + // Because the clone operation creates a new index to clone the shards to, the + // wait for active shards setting on index creation applies to the clone index + // action as well. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone Clone indices_clone.NewClone - // Closes an index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-close.html + // Close an index. + // A closed index is blocked for read or write operations and does not allow all + // operations that opened indices allow. + // It is not possible to index documents or to search for documents in a closed + // index. + // Closed indices do not have to maintain internal data structures for indexing + // or searching documents, which results in a smaller overhead on the cluster. + // + // When opening or closing an index, the master node is responsible for + // restarting the index shards to reflect the new state of the index. + // The shards will then go through the normal recovery process. + // The data of opened and closed indices is automatically replicated by the + // cluster to ensure that enough shard copies are safely kept around at all + // times. + // + // You can open and close multiple indices. + // An error is thrown if the request explicitly refers to a missing index. + // This behaviour can be turned off using the `ignore_unavailable=true` + // parameter. + // + // By default, you must explicitly name the indices you are opening or closing. + // To open or close indices with `_all`, `*`, or other wildcard expressions, + // change the` action.destructive_requires_name` setting to `false`. This + // setting can also be changed with the cluster update settings API. + // + // Closed indices consume a significant amount of disk-space which can cause + // problems in managed environments. + // Closing indices can be turned off with the cluster settings API by setting + // `cluster.indices.close.enable` to `false`. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close Close indices_close.NewClose // Create an index. - // Creates a new index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html + // You can use the create index API to add a new index to an Elasticsearch + // cluster. + // When creating an index, you can specify the following: + // + // * Settings for the index. + // * Mappings for fields in the index. + // * Index aliases + // + // **Wait for active shards** + // + // By default, index creation will only return a response to the client when the + // primary copies of each shard have been started, or the request times out. + // The index creation response will indicate what happened. + // For example, `acknowledged` indicates whether the index was successfully + // created in the cluster, `while shards_acknowledged` indicates whether the + // requisite number of shard copies were started for each shard in the index + // before timing out. + // Note that it is still possible for either `acknowledged` or + // `shards_acknowledged` to be `false`, but for the index creation to be + // successful. + // These values simply indicate whether the operation completed before the + // timeout. + // If `acknowledged` is false, the request timed out before the cluster state + // was updated with the newly created index, but it probably will be created + // sometime soon. + // If `shards_acknowledged` is false, then the request timed out before the + // requisite number of shards were started (by default just the primaries), even + // if the cluster state was successfully updated to reflect the newly created + // index (that is to say, `acknowledged` is `true`). + // + // You can change the default of only waiting for the primary shards to start + // through the index setting `index.write.wait_for_active_shards`. + // Note that changing this setting will also affect the `wait_for_active_shards` + // value on all subsequent write operations. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create Create indices_create.NewCreate // Create a data stream. - // Creates a data stream. + // // You must have a matching index template with data stream enabled. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream CreateDataStream indices_create_data_stream.NewCreateDataStream + // Create an index from a source index. + // + // Copy the mappings and settings from the source index to a destination index + // while allowing request settings and mappings to override the source values. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html + CreateFrom indices_create_from.NewCreateFrom // Get data stream stats. - // Retrieves statistics for one or more data streams. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html + // + // Get statistics for one or more data streams. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1 DataStreamsStats indices_data_streams_stats.NewDataStreamsStats // Delete indices. - // Deletes one or more indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html + // Deleting an index deletes its documents, shards, and metadata. + // It does not delete related Kibana components, such as data views, + // visualizations, or dashboards. + // + // You cannot delete the current write index of a data stream. + // To delete the index, you must roll over the data stream so a new write index + // is created. + // You can then use the delete index API to delete the previous write index. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete Delete indices_delete.NewDelete // Delete an alias. // Removes a data stream or index from an alias. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias DeleteAlias indices_delete_alias.NewDeleteAlias // Delete data stream lifecycles. // Removes the data stream lifecycle from a data stream, rendering it not // managed by the data stream lifecycle. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-delete-lifecycle.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle DeleteDataLifecycle indices_delete_data_lifecycle.NewDeleteDataLifecycle // Delete data streams. // Deletes one or more data streams and their backing indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream DeleteDataStream indices_delete_data_stream.NewDeleteDataStream // Delete an index template. // The provided may contain multiple template names separated @@ -1718,94 +4012,265 @@ type Indices struct { // names are specified then there is no wildcard support and the provided names // should match completely with // existing templates. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-template.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template DeleteIndexTemplate indices_delete_index_template.NewDeleteIndexTemplate - // Deletes a legacy index template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-template-v1.html + // Delete a legacy index template. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template DeleteTemplate indices_delete_template.NewDeleteTemplate - // Analyzes the disk usage of each field of an index or data stream. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-disk-usage.html + // Analyze the index disk usage. + // Analyze the disk usage of each field of an index or data stream. + // This API might not support indices created in previous Elasticsearch + // versions. + // The result of a small index can be inaccurate as some parts of an index might + // not be analyzed by the API. + // + // NOTE: The total size of fields of the analyzed shards of the index in the + // response is usually smaller than the index `store_size` value because some + // small metadata files are ignored and some parts of data files might not be + // scanned by the API. + // Since stored fields are stored together in a compressed format, the sizes of + // stored fields are also estimates and can be inaccurate. + // The stored size of the `_id` field is likely underestimated while the + // `_source` field is overestimated. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage DiskUsage indices_disk_usage.NewDiskUsage - // Aggregates a time series (TSDS) index and stores pre-computed statistical + // Downsample an index. + // Aggregate a time series (TSDS) index and store pre-computed statistical // summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric // field grouped by a configured time interval. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-downsample-data-stream.html + // For example, a TSDS index that contains metrics sampled every 10 seconds can + // be downsampled to an hourly index. + // All documents within an hour interval are summarized and stored as a single + // document in the downsample index. + // + // NOTE: Only indices in a time series data stream are supported. + // Neither field nor document level security can be defined on the source index. + // The source index must be read only (`index.blocks.write: true`). + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample Downsample indices_downsample.NewDownsample // Check indices. - // Checks if one or more indices, index aliases, or data streams exist. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-exists.html + // Check if one or more indices, index aliases, or data streams exist. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists Exists indices_exists.NewExists // Check aliases. - // Checks if one or more data stream or index aliases exist. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html + // + // Check if one or more data stream or index aliases exist. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias ExistsAlias indices_exists_alias.NewExistsAlias - // Returns information about whether a particular index template exists. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html + // Check index templates. + // + // Check whether index templates exist. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template ExistsIndexTemplate indices_exists_index_template.NewExistsIndexTemplate // Check existence of index templates. - // Returns information about whether a particular index template exists. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-template-exists-v1.html + // Get information about whether index templates exist. + // Index templates define settings, mappings, and aliases that can be applied + // automatically to new indices. + // + // IMPORTANT: This documentation is about legacy index templates, which are + // deprecated and will be replaced by the composable templates introduced in + // Elasticsearch 7.8. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template ExistsTemplate indices_exists_template.NewExistsTemplate // Get the status for a data stream lifecycle. - // Retrieves information about an index or data stream’s current data stream - // lifecycle status, such as time since index creation, time since rollover, the - // lifecycle configuration managing the index, or any errors encountered during - // lifecycle execution. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-explain-lifecycle.html + // Get information about an index or data stream's current data stream lifecycle + // status, such as time since index creation, time since rollover, the lifecycle + // configuration managing the index, or any errors encountered during lifecycle + // execution. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle ExplainDataLifecycle indices_explain_data_lifecycle.NewExplainDataLifecycle - // Returns field usage information for each shard and field of an index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/field-usage-stats.html + // Get field usage stats. + // Get field usage information for each shard and field of an index. + // Field usage statistics are automatically captured when queries are running on + // a cluster. + // A shard-level search request that accesses a given field, even if multiple + // times during that request, is counted as a single use. + // + // The response body reports the per-shard usage count of the data structures + // that back the fields in the index. + // A given request will increment each count by a maximum value of 1, even if + // the request accesses the same field multiple times. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats FieldUsageStats indices_field_usage_stats.NewFieldUsageStats - // Flushes one or more data streams or indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html + // Flush data streams or indices. + // Flushing a data stream or index is the process of making sure that any data + // that is currently only stored in the transaction log is also permanently + // stored in the Lucene index. + // When restarting, Elasticsearch replays any unflushed operations from the + // transaction log into the Lucene index to bring it back into the state that it + // was in before the restart. + // Elasticsearch automatically triggers flushes as needed, using heuristics that + // trade off the size of the unflushed transaction log against the cost of + // performing each flush. + // + // After each operation has been flushed it is permanently stored in the Lucene + // index. + // This may mean that there is no need to maintain an additional copy of it in + // the transaction log. + // The transaction log is made up of multiple files, called generations, and + // Elasticsearch will delete any generation files when they are no longer + // needed, freeing up disk space. + // + // It is also possible to trigger a flush on one or more indices using the flush + // API, although it is rare for users to need to call this API directly. + // If you call the flush API after indexing some documents then a successful + // response indicates that Elasticsearch has flushed all the documents that were + // indexed before the flush API was called. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush Flush indices_flush.NewFlush - // Performs the force merge operation on one or more indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html + // Force a merge. + // Perform the force merge operation on the shards of one or more indices. + // For data streams, the API forces a merge on the shards of the stream's + // backing indices. + // + // Merging reduces the number of segments in each shard by merging some of them + // together and also frees up the space used by deleted documents. + // Merging normally happens automatically, but sometimes it is useful to trigger + // a merge manually. + // + // WARNING: We recommend force merging only a read-only index (meaning the index + // is no longer receiving writes). + // When documents are updated or deleted, the old version is not immediately + // removed but instead soft-deleted and marked with a "tombstone". + // These soft-deleted documents are automatically cleaned up during regular + // segment merges. + // But force merge can cause very large (greater than 5 GB) segments to be + // produced, which are not eligible for regular merges. + // So the number of soft-deleted documents can then grow rapidly, resulting in + // higher disk usage and worse search performance. + // If you regularly force merge an index receiving writes, this can also make + // snapshots more expensive, since the new documents can't be backed up + // incrementally. + // + // **Blocks during a force merge** + // + // Calls to this API block until the merge is complete (unless request contains + // `wait_for_completion=false`). + // If the client connection is lost before completion then the force merge + // process will continue in the background. + // Any new requests to force merge the same indices will also block until the + // ongoing force merge is complete. + // + // **Running force merge asynchronously** + // + // If the request contains `wait_for_completion=false`, Elasticsearch performs + // some preflight checks, launches the request, and returns a task you can use + // to get the status of the task. + // However, you can not cancel this task as the force merge task is not + // cancelable. + // Elasticsearch creates a record of this task as a document at + // `_tasks/`. + // When you are done with a task, you should delete the task document so + // Elasticsearch can reclaim the space. + // + // **Force merging multiple indices** + // + // You can force merge multiple indices with a single request by targeting: + // + // * One or more data streams that contain multiple backing indices + // * Multiple indices + // * One or more aliases + // * All data streams and indices in a cluster + // + // Each targeted shard is force-merged separately using the force_merge + // threadpool. + // By default each node only has a single `force_merge` thread which means that + // the shards on that node are force-merged one at a time. + // If you expand the `force_merge` threadpool on a node then it will force merge + // its shards in parallel + // + // Force merge makes the storage for the shard being merged temporarily + // increase, as it may require free space up to triple its size in case + // `max_num_segments parameter` is set to `1`, to rewrite all segments into a + // new one. + // + // **Data streams and time-based indices** + // + // Force-merging is useful for managing a data stream's older backing indices + // and other time-based indices, particularly after a rollover. + // In these cases, each index only receives indexing traffic for a certain + // period of time. + // Once an index receive no more writes, its shards can be force-merged to a + // single segment. + // This can be a good idea because single-segment shards can sometimes use + // simpler and more efficient data structures to perform searches. + // For example: + // + // ``` + // POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 + // ``` + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge Forcemerge indices_forcemerge.NewForcemerge // Get index information. - // Returns information about one or more indices. For data streams, the API - // returns information about the + // Get information about one or more indices. For data streams, the API returns + // information about the // stream’s backing indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get Get indices_get.NewGet // Get aliases. // Retrieves information for one or more data stream or index aliases. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias GetAlias indices_get_alias.NewGetAlias // Get data stream lifecycles. - // Retrieves the data stream lifecycle configuration of one or more data - // streams. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-get-lifecycle.html + // + // Get the data stream lifecycle configuration of one or more data streams. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle GetDataLifecycle indices_get_data_lifecycle.NewGetDataLifecycle + // Get data stream lifecycle stats. + // Get statistics about the data streams that are managed by a data stream + // lifecycle. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats + GetDataLifecycleStats indices_get_data_lifecycle_stats.NewGetDataLifecycleStats // Get data streams. - // Retrieves information about one or more data streams. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html + // + // Get information about one or more data streams. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream GetDataStream indices_get_data_stream.NewGetDataStream // Get mapping definitions. // Retrieves mapping definitions for one or more fields. // For data streams, the API retrieves field mappings for the stream’s backing // indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-field-mapping.html + // + // This API is useful if you don't need a complete mapping or if an index + // mapping contains a large number of fields. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping GetFieldMapping indices_get_field_mapping.NewGetFieldMapping // Get index templates. - // Returns information about one or more index templates. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-template.html + // Get information about one or more index templates. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template GetIndexTemplate indices_get_index_template.NewGetIndexTemplate // Get mapping definitions. - // Retrieves mapping definitions for one or more indices. // For data streams, the API retrieves mappings for the stream’s backing // indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping GetMapping indices_get_mapping.NewGetMapping + // Get the migration reindexing status. + // + // Get the status of a migration reindex attempt for a data stream or index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html + GetMigrateReindexStatus indices_get_migrate_reindex_status.NewGetMigrateReindexStatus // Get index settings. - // Returns setting information for one or more indices. For data streams, - // returns setting information for the stream’s backing indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html + // Get setting information for one or more indices. + // For data streams, it returns setting information for the stream's backing + // indices. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings GetSettings indices_get_settings.NewGetSettings // Get index templates. - // Retrieves information about one or more index templates. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-template-v1.html + // Get information about one or more index templates. + // + // IMPORTANT: This documentation is about legacy index templates, which are + // deprecated and will be replaced by the composable templates introduced in + // Elasticsearch 7.8. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template GetTemplate indices_get_template.NewGetTemplate + // Reindex legacy backing indices. + // + // Reindex all legacy backing indices for a data stream. + // This operation occurs in a persistent task. + // The persistent task ID is returned immediately and the reindexing work is + // completed in that task. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html + MigrateReindex indices_migrate_reindex.NewMigrateReindex // Convert an index alias to a data stream. // Converts an index alias to a data stream. // You must have a matching index template that is data stream enabled. @@ -1819,248 +4284,952 @@ type Indices struct { // the same name. // The indices for the alias become hidden backing indices for the stream. // The write index for the alias becomes the write index for the stream. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-to-data-stream MigrateToDataStream indices_migrate_to_data_stream.NewMigrateToDataStream // Update data streams. // Performs one or more data stream modification actions in a single atomic // operation. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-modify-data-stream ModifyDataStream indices_modify_data_stream.NewModifyDataStream - // Opens a closed index. + // Open a closed index. // For data streams, the API opens any closed backing indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html + // + // A closed index is blocked for read/write operations and does not allow all + // operations that opened indices allow. + // It is not possible to index documents or to search for documents in a closed + // index. + // This allows closed indices to not have to maintain internal data structures + // for indexing or searching documents, resulting in a smaller overhead on the + // cluster. + // + // When opening or closing an index, the master is responsible for restarting + // the index shards to reflect the new state of the index. + // The shards will then go through the normal recovery process. + // The data of opened or closed indices is automatically replicated by the + // cluster to ensure that enough shard copies are safely kept around at all + // times. + // + // You can open and close multiple indices. + // An error is thrown if the request explicitly refers to a missing index. + // This behavior can be turned off by using the `ignore_unavailable=true` + // parameter. + // + // By default, you must explicitly name the indices you are opening or closing. + // To open or close indices with `_all`, `*`, or other wildcard expressions, + // change the `action.destructive_requires_name` setting to `false`. + // This setting can also be changed with the cluster update settings API. + // + // Closed indices consume a significant amount of disk-space which can cause + // problems in managed environments. + // Closing indices can be turned off with the cluster settings API by setting + // `cluster.indices.close.enable` to `false`. + // + // Because opening or closing an index allocates its shards, the + // `wait_for_active_shards` setting on index creation applies to the `_open` and + // `_close` index actions as well. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open Open indices_open.NewOpen - // Promotes a data stream from a replicated data stream managed by CCR to a - // regular data stream - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html + // Promote a data stream. + // Promote a data stream from a replicated data stream managed by cross-cluster + // replication (CCR) to a regular data stream. + // + // With CCR auto following, a data stream from a remote cluster can be + // replicated to the local cluster. + // These data streams can't be rolled over in the local cluster. + // These replicated data streams roll over only if the upstream data stream + // rolls over. + // In the event that the remote cluster is no longer available, the data stream + // in the local cluster can be promoted to a regular data stream, which allows + // these data streams to be rolled over in the local cluster. + // + // NOTE: When promoting a data stream, ensure the local cluster has a data + // stream enabled index template that matches the data stream. + // If this is missing, the data stream will not be able to roll over until a + // matching index template is created. + // This will affect the lifecycle management of the data stream and interfere + // with the data stream size and retention. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-promote-data-stream PromoteDataStream indices_promote_data_stream.NewPromoteDataStream // Create or update an alias. // Adds a data stream or index to an alias. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-alias PutAlias indices_put_alias.NewPutAlias // Update data stream lifecycles. // Update the data stream lifecycle of the specified data streams. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-put-lifecycle.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-lifecycle PutDataLifecycle indices_put_data_lifecycle.NewPutDataLifecycle // Create or update an index template. // Index templates define settings, mappings, and aliases that can be applied // automatically to new indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-template.html + // + // Elasticsearch applies templates to new indices based on an wildcard pattern + // that matches the index name. + // Index templates are applied during data stream or index creation. + // For data streams, these settings and mappings are applied when the stream's + // backing indices are created. + // Settings and mappings specified in a create index API request override any + // settings or mappings specified in an index template. + // Changes to index templates do not affect existing indices, including the + // existing backing indices of a data stream. + // + // You can use C-style `/* *\/` block comments in index templates. + // You can include comments anywhere in the request body, except before the + // opening curly bracket. + // + // **Multiple matching templates** + // + // If multiple index templates match the name of a new index or data stream, the + // template with the highest priority is used. + // + // Multiple templates with overlapping index patterns at the same priority are + // not allowed and an error will be thrown when attempting to create a template + // matching an existing index template at identical priorities. + // + // **Composing aliases, mappings, and settings** + // + // When multiple component templates are specified in the `composed_of` field + // for an index template, they are merged in the order specified, meaning that + // later component templates override earlier component templates. + // Any mappings, settings, or aliases from the parent index template are merged + // in next. + // Finally, any configuration on the index request itself is merged. + // Mapping definitions are merged recursively, which means that later mapping + // components can introduce new field mappings and update the mapping + // configuration. + // If a field mapping is already contained in an earlier component, its + // definition will be completely overwritten by the later one. + // This recursive merging strategy applies not only to field mappings, but also + // root options like `dynamic_templates` and `meta`. + // If an earlier component contains a `dynamic_templates` block, then by default + // new `dynamic_templates` entries are appended onto the end. + // If an entry already exists with the same key, then it is overwritten by the + // new definition. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template PutIndexTemplate indices_put_index_template.NewPutIndexTemplate // Update field mappings. - // Adds new fields to an existing data stream or index. - // You can also use this API to change the search settings of existing fields. + // Add new fields to an existing data stream or index. + // You can also use this API to change the search settings of existing fields + // and add new properties to existing object fields. // For data streams, these changes are applied to all backing indices by // default. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html + // + // **Add multi-fields to an existing field** + // + // Multi-fields let you index the same field in different ways. + // You can use this API to update the fields mapping parameter and enable + // multi-fields for an existing field. + // WARNING: If an index (or data stream) contains documents when you add a + // multi-field, those documents will not have values for the new multi-field. + // You can populate the new multi-field with the update by query API. + // + // **Change supported mapping parameters for an existing field** + // + // The documentation for each mapping parameter indicates whether you can update + // it for an existing field using this API. + // For example, you can use the update mapping API to update the `ignore_above` + // parameter. + // + // **Change the mapping of an existing field** + // + // Except for supported mapping parameters, you can't change the mapping or + // field type of an existing field. + // Changing an existing field could invalidate data that's already indexed. + // + // If you need to change the mapping of a field in a data stream's backing + // indices, refer to documentation about modifying data streams. + // If you need to change the mapping of a field in other indices, create a new + // index with the correct mapping and reindex your data into that index. + // + // **Rename a field** + // + // Renaming a field would invalidate data already indexed under the old field + // name. + // Instead, add an alias field to create an alternate field name. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping PutMapping indices_put_mapping.NewPutMapping // Update index settings. - // Changes dynamic index settings in real time. For data streams, index setting - // changes are applied to all backing indices by default. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html + // Changes dynamic index settings in real time. + // For data streams, index setting changes are applied to all backing indices by + // default. + // + // To revert a setting to the default value, use a null value. + // The list of per-index settings that can be updated dynamically on live + // indices can be found in index module documentation. + // To preserve existing settings from being updated, set the `preserve_existing` + // parameter to `true`. + // + // NOTE: You can only define new analyzers on closed indices. + // To add an analyzer, you must close the index, define the analyzer, and reopen + // the index. + // You cannot close the write index of a data stream. + // To update the analyzer for a data stream's write index and future backing + // indices, update the analyzer in the index template used by the stream. + // Then roll over the data stream to apply the new analyzer to the stream's + // write index and future backing indices. + // This affects searches and any new data added to the stream after the + // rollover. + // However, it does not affect the data stream's backing indices or their + // existing data. + // To change the analyzer for existing backing indices, you must create a new + // data stream and reindex your data into it. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings PutSettings indices_put_settings.NewPutSettings // Create or update an index template. // Index templates define settings, mappings, and aliases that can be applied // automatically to new indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates-v1.html + // Elasticsearch applies templates to new indices based on an index pattern that + // matches the index name. + // + // IMPORTANT: This documentation is about legacy index templates, which are + // deprecated and will be replaced by the composable templates introduced in + // Elasticsearch 7.8. + // + // Composable templates always take precedence over legacy templates. + // If no composable template matches a new index, matching legacy templates are + // applied according to their order. + // + // Index templates are only applied during index creation. + // Changes to index templates do not affect existing indices. + // Settings and mappings specified in create index API requests override any + // settings or mappings specified in an index template. + // + // You can use C-style `/* *\/` block comments in index templates. + // You can include comments anywhere in the request body, except before the + // opening curly bracket. + // + // **Indices matching multiple templates** + // + // Multiple index templates can potentially match an index, in this case, both + // the settings and mappings are merged into the final configuration of the + // index. + // The order of the merging can be controlled using the order parameter, with + // lower order being applied first, and higher orders overriding them. + // NOTE: Multiple matching templates with the same order value will result in a + // non-deterministic merging order. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template PutTemplate indices_put_template.NewPutTemplate - // Returns information about ongoing and completed shard recoveries for one or - // more indices. - // For data streams, the API returns information for the stream’s backing + // Get index recovery information. + // Get information about ongoing and completed shard recoveries for one or more // indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-recovery.html + // For data streams, the API returns information for the stream's backing + // indices. + // + // All recoveries, whether ongoing or complete, are kept in the cluster state + // and may be reported on at any time. + // + // Shard recovery is the process of initializing a shard copy, such as restoring + // a primary shard from a snapshot or creating a replica shard from a primary + // shard. + // When a shard recovery completes, the recovered shard is available for search + // and indexing. + // + // Recovery automatically occurs during the following processes: + // + // * When creating an index for the first time. + // * When a node rejoins the cluster and starts up any missing primary shard + // copies using the data that it holds in its data path. + // * Creation of new replica shard copies from the primary. + // * Relocation of a shard copy to a different node in the same cluster. + // * A snapshot restore operation. + // * A clone, shrink, or split operation. + // + // You can determine the cause of a shard recovery using the recovery or cat + // recovery APIs. + // + // The index recovery API reports information about completed recoveries only + // for shard copies that currently exist in the cluster. + // It only reports the last recovery for each shard copy and does not report + // historical information about earlier recoveries, nor does it report + // information about the recoveries of shard copies that no longer exist. + // This means that if a shard copy completes a recovery and then Elasticsearch + // relocates it onto a different node then the information about the original + // recovery will not be shown in the recovery API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery Recovery indices_recovery.NewRecovery // Refresh an index. // A refresh makes recent operations performed on one or more indices available // for search. // For data streams, the API runs the refresh operation on the stream’s backing // indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html + // + // By default, Elasticsearch periodically refreshes indices every second, but + // only on indices that have received one search request or more in the last 30 + // seconds. + // You can change this default interval with the `index.refresh_interval` + // setting. + // + // Refresh requests are synchronous and do not return a response until the + // refresh operation completes. + // + // Refreshes are resource-intensive. + // To ensure good cluster performance, it's recommended to wait for + // Elasticsearch's periodic refresh rather than performing an explicit refresh + // when possible. + // + // If your application workflow indexes documents and then runs a search to + // retrieve the indexed document, it's recommended to use the index API's + // `refresh=wait_for` query parameter option. + // This option ensures the indexing operation waits for a periodic refresh + // before running the search. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh Refresh indices_refresh.NewRefresh - // Reloads an index's search analyzers and their resources. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-reload-analyzers.html + // Reload search analyzers. + // Reload an index's search analyzers and their resources. + // For data streams, the API reloads search analyzers and resources for the + // stream's backing indices. + // + // IMPORTANT: After reloading the search analyzers you should clear the request + // cache to make sure it doesn't contain responses derived from the previous + // versions of the analyzer. + // + // You can use the reload search analyzers API to pick up changes to synonym + // files used in the `synonym_graph` or `synonym` token filter of a search + // analyzer. + // To be eligible, the token filter must have an `updateable` flag of `true` and + // only be used in search analyzers. + // + // NOTE: This API does not perform a reload for each shard of an index. + // Instead, it performs a reload for each node containing index shards. + // As a result, the total shard count returned by the API can differ from the + // number of index shards. + // Because reloading affects every node with an index shard, it is important to + // update the synonym file on every data node in the cluster--including nodes + // that don't contain a shard replica--before using this API. + // This ensures the synonym file is updated everywhere in the cluster in case + // shards are relocated in the future. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers ReloadSearchAnalyzers indices_reload_search_analyzers.NewReloadSearchAnalyzers - // Resolves the specified index expressions to return information about each - // cluster, including - // the local cluster, if included. - // Multiple patterns and remote clusters are supported. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-resolve-cluster-api.html + // Resolve the cluster. + // + // Resolve the specified index expressions to return information about each + // cluster, including the local "querying" cluster, if included. + // If no index expression is provided, the API will return information about all + // the remote clusters that are configured on the querying cluster. + // + // This endpoint is useful before doing a cross-cluster search in order to + // determine which remote clusters should be included in a search. + // + // You use the same index expression with this endpoint as you would for + // cross-cluster search. + // Index and cluster exclusions are also supported with this endpoint. + // + // For each cluster in the index expression, information is returned about: + // + // * Whether the querying ("local") cluster is currently connected to each + // remote cluster specified in the index expression. Note that this endpoint + // actively attempts to contact the remote clusters, unlike the `remote/info` + // endpoint. + // * Whether each remote cluster is configured with `skip_unavailable` as `true` + // or `false`. + // * Whether there are any indices, aliases, or data streams on that cluster + // that match the index expression. + // * Whether the search is likely to have errors returned when you do the + // cross-cluster search (including any authorization errors if you do not have + // permission to query the index). + // * Cluster version information, including the Elasticsearch server version. + // + // For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns + // information about the local cluster and all remotely configured clusters that + // start with the alias `cluster*`. + // Each cluster returns information about whether it has any indices, aliases or + // data streams that match `my-index-*`. + // + // ## Note on backwards compatibility + // The ability to query without an index expression was added in version 8.18, + // so when + // querying remote clusters older than that, the local cluster will send the + // index + // expression `dummy*` to those remote clusters. Thus, if an errors occur, you + // may see a reference + // to that index expression even though you didn't request it. If it causes a + // problem, you can + // instead include an index expression like `*:*` to bypass the issue. + // + // ## Advantages of using this endpoint before a cross-cluster search + // + // You may want to exclude a cluster or index from a search when: + // + // * A remote cluster is not currently connected and is configured with + // `skip_unavailable=false`. Running a cross-cluster search under those + // conditions will cause the entire search to fail. + // * A cluster has no matching indices, aliases or data streams for the index + // expression (or your user does not have permissions to search them). For + // example, suppose your index expression is `logs*,remote1:logs*` and the + // remote1 cluster has no indices, aliases or data streams that match `logs*`. + // In that case, that cluster will return no results from that cluster if you + // include it in a cross-cluster search. + // * The index expression (combined with any query parameters you specify) will + // likely cause an exception to be thrown when you do the search. In these + // cases, the "error" field in the `_resolve/cluster` response will be present. + // (This is also where security/permission errors will be shown.) + // * A remote cluster is an older version that does not support the feature you + // want to use in your search. + // + // ## Test availability of remote clusters + // + // The `remote/info` endpoint is commonly used to test whether the "local" + // cluster (the cluster being queried) is connected to its remote clusters, but + // it does not necessarily reflect whether the remote cluster is available or + // not. + // The remote cluster may be available, while the local cluster is not currently + // connected to it. + // + // You can use the `_resolve/cluster` API to attempt to reconnect to remote + // clusters. + // For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. + // The `connected` field in the response will indicate whether it was + // successful. + // If a connection was (re-)established, this will also cause the `remote/info` + // endpoint to now indicate a connected status. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster ResolveCluster indices_resolve_cluster.NewResolveCluster - // Resolves the specified name(s) and/or index patterns for indices, aliases, - // and data streams. + // Resolve indices. + // Resolve the names and/or index patterns for indices, aliases, and data + // streams. // Multiple patterns and remote clusters are supported. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-resolve-index-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index ResolveIndex indices_resolve_index.NewResolveIndex // Roll over to a new index. - // Creates a new index for a data stream or index alias. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-rollover-index.html + // TIP: It is recommended to use the index lifecycle rollover action to automate + // rollovers. + // + // The rollover API creates a new index for a data stream or index alias. + // The API behavior depends on the rollover target. + // + // **Roll over a data stream** + // + // If you roll over a data stream, the API creates a new write index for the + // stream. + // The stream's previous write index becomes a regular backing index. + // A rollover also increments the data stream's generation. + // + // **Roll over an index alias with a write index** + // + // TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a + // write index to manage time series data. + // Data streams replace this functionality, require less maintenance, and + // automatically integrate with data tiers. + // + // If an index alias points to multiple indices, one of the indices must be a + // write index. + // The rollover API creates a new write index for the alias with + // `is_write_index` set to `true`. + // The API also `sets is_write_index` to `false` for the previous write index. + // + // **Roll over an index alias with one index** + // + // If you roll over an index alias that points to only one index, the API + // creates a new index for the alias and removes the original index from the + // alias. + // + // NOTE: A rollover creates a new index and is subject to the + // `wait_for_active_shards` setting. + // + // **Increment index names for an alias** + // + // When you roll over an index alias, you can specify a name for the new index. + // If you don't specify a name and the current index ends with `-` and a number, + // such as `my-index-000001` or `my-index-3`, the new index name increments that + // number. + // For example, if you roll over an alias with a current index of + // `my-index-000001`, the rollover creates a new index named `my-index-000002`. + // This number is always six characters and zero-padded, regardless of the + // previous index's name. + // + // If you use an index alias for time series data, you can use date math in the + // index name to track the rollover date. + // For example, you can create an alias that points to an index named + // ``. + // If you create the index on May 6, 2099, the index's name is + // `my-index-2099.05.06-000001`. + // If you roll over the alias on May 7, 2099, the new index's name is + // `my-index-2099.05.07-000002`. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover Rollover indices_rollover.NewRollover - // Returns low-level information about the Lucene segments in index shards. - // For data streams, the API returns information about the stream’s backing + // Get index segments. + // Get low-level information about the Lucene segments in index shards. + // For data streams, the API returns information about the stream's backing // indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-segments.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments Segments indices_segments.NewSegments - // Retrieves store information about replica shards in one or more indices. - // For data streams, the API retrieves store information for the stream’s + // Get index shard stores. + // Get store information about replica shards in one or more indices. + // For data streams, the API retrieves store information for the stream's // backing indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html + // + // The index shard stores API returns the following information: + // + // * The node on which each replica shard exists. + // * The allocation ID for each replica shard. + // * A unique ID for each replica shard. + // * Any errors encountered while opening the shard index or from an earlier + // failure. + // + // By default, the API returns store information only for primary shards that + // are unassigned or have one or more unassigned replica shards. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores ShardStores indices_shard_stores.NewShardStores - // Shrinks an existing index into a new index with fewer primary shards. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html + // Shrink an index. + // Shrink an index into a new index with fewer primary shards. + // + // Before you can shrink an index: + // + // * The index must be read-only. + // * A copy of every shard in the index must reside on the same node. + // * The index must have a green health status. + // + // To make shard allocation easier, we recommend you also remove the index's + // replica shards. + // You can later re-add replica shards as part of the shrink operation. + // + // The requested number of primary shards in the target index must be a factor + // of the number of shards in the source index. + // For example an index with 8 primary shards can be shrunk into 4, 2 or 1 + // primary shards or an index with 15 primary shards can be shrunk into 5, 3 or + // 1. + // If the number of shards in the index is a prime number it can only be shrunk + // into a single primary shard + // Before shrinking, a (primary or replica) copy of every shard in the index + // must be present on the same node. + // + // The current write index on a data stream cannot be shrunk. In order to shrink + // the current write index, the data stream must first be rolled over so that a + // new write index is created and then the previous write index can be shrunk. + // + // A shrink operation: + // + // * Creates a new target index with the same definition as the source index, + // but with a smaller number of primary shards. + // * Hard-links segments from the source index into the target index. If the + // file system does not support hard-linking, then all segments are copied into + // the new index, which is a much more time consuming process. Also if using + // multiple data paths, shards on different data paths require a full copy of + // segment files if they are not on the same disk since hardlinks do not work + // across disks. + // * Recovers the target index as though it were a closed index which had just + // been re-opened. Recovers shards to the + // `.routing.allocation.initial_recovery._id` index setting. + // + // IMPORTANT: Indices can only be shrunk if they satisfy the following + // requirements: + // + // * The target index must not exist. + // * The source index must have more primary shards than the target index. + // * The number of primary shards in the target index must be a factor of the + // number of primary shards in the source index. The source index must have more + // primary shards than the target index. + // * The index must not contain more than 2,147,483,519 documents in total + // across all shards that will be shrunk into a single shard on the target index + // as this is the maximum number of docs that can fit into a single shard. + // * The node handling the shrink process must have sufficient free disk space + // to accommodate a second copy of the existing index. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink Shrink indices_shrink.NewShrink // Simulate an index. - // Returns the index configuration that would be applied to the specified index - // from an existing index template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-simulate-index.html + // Get the index configuration that would be applied to the specified index from + // an existing index template. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template SimulateIndexTemplate indices_simulate_index_template.NewSimulateIndexTemplate // Simulate an index template. - // Returns the index configuration that would be applied by a particular index + // Get the index configuration that would be applied by a particular index // template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-simulate-template.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template SimulateTemplate indices_simulate_template.NewSimulateTemplate - // Splits an existing index into a new index with more primary shards. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-split-index.html + // Split an index. + // Split an index into a new index with more primary shards. + // * Before you can split an index: + // + // * The index must be read-only. + // * The cluster health status must be green. + // + // You can do make an index read-only with the following request using the add + // index block API: + // + // ``` + // PUT /my_source_index/_block/write + // ``` + // + // The current write index on a data stream cannot be split. + // In order to split the current write index, the data stream must first be + // rolled over so that a new write index is created and then the previous write + // index can be split. + // + // The number of times the index can be split (and the number of shards that + // each original shard can be split into) is determined by the + // `index.number_of_routing_shards` setting. + // The number of routing shards specifies the hashing space that is used + // internally to distribute documents across shards with consistent hashing. + // For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x + // 2 x 3) could be split by a factor of 2 or 3. + // + // A split operation: + // + // * Creates a new target index with the same definition as the source index, + // but with a larger number of primary shards. + // * Hard-links segments from the source index into the target index. If the + // file system doesn't support hard-linking, all segments are copied into the + // new index, which is a much more time consuming process. + // * Hashes all documents again, after low level files are created, to delete + // documents that belong to a different shard. + // * Recovers the target index as though it were a closed index which had just + // been re-opened. + // + // IMPORTANT: Indices can only be split if they satisfy the following + // requirements: + // + // * The target index must not exist. + // * The source index must have fewer primary shards than the target index. + // * The number of primary shards in the target index must be a multiple of the + // number of primary shards in the source index. + // * The node handling the split process must have sufficient free disk space to + // accommodate a second copy of the existing index. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split Split indices_split.NewSplit - // Returns statistics for one or more indices. - // For data streams, the API retrieves statistics for the stream’s backing + // Get index statistics. + // For data streams, the API retrieves statistics for the stream's backing // indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html + // + // By default, the returned statistics are index-level with `primaries` and + // `total` aggregations. + // `primaries` are the values for only the primary shards. + // `total` are the accumulated values for both primary and replica shards. + // + // To get shard-level statistics, set the `level` parameter to `shards`. + // + // NOTE: When moving to another node, the shard-level statistics for a shard are + // cleared. + // Although the shard is no longer part of the node, that node retains any + // node-level statistics to which the shard contributed. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats Stats indices_stats.NewStats - // Unfreezes an index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/unfreeze-index-api.html - Unfreeze indices_unfreeze.NewUnfreeze // Create or update an alias. // Adds a data stream or index to an alias. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases UpdateAliases indices_update_aliases.NewUpdateAliases // Validate a query. // Validates a query without running it. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-validate.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-validate-query ValidateQuery indices_validate_query.NewValidateQuery } type Inference struct { + // Perform chat completion inference + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference + ChatCompletionUnified inference_chat_completion_unified.NewChatCompletionUnified + // Perform completion inference on the service + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference + Completion inference_completion.NewCompletion // Delete an inference endpoint - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-inference-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete Delete inference_delete.NewDelete // Get an inference endpoint - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-inference-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get Get inference_get.NewGet - // Perform inference on the service - // https://www.elastic.co/guide/en/elasticsearch/reference/current/post-inference-api.html - Inference inference_inference.NewInference - // Create an inference endpoint - // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-inference-api.html + // Create an inference endpoint. + // When you create an inference endpoint, the associated machine learning model + // is automatically deployed if it is not already running. + // After creating the endpoint, wait for the model deployment to complete before + // using it. + // To verify the deployment status, use the get trained model statistics API. + // Look for `"state": "fully_allocated"` in the response and ensure that the + // `"allocation_count"` matches the `"target_allocation_count"`. + // Avoid creating multiple endpoints for the same model unless required, as each + // endpoint consumes significant resources. + // + // IMPORTANT: The inference APIs enable you to use certain services, such as + // built-in machine learning models (ELSER, E5), models uploaded through Eland, + // Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, + // Anthropic, Watsonx.ai, or Hugging Face. + // For built-in models and models uploaded through Eland, the inference APIs + // offer an alternative way to use and manage trained models. + // However, if you do not plan to use the inference APIs to use these models or + // if you want to use non-NLP models, use the machine learning trained model + // APIs. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put Put inference_put.NewPut + // Create an OpenAI inference endpoint. + // + // Create an inference endpoint to perform an inference task with the `openai` + // service. + // + // When you create an inference endpoint, the associated machine learning model + // is automatically deployed if it is not already running. + // After creating the endpoint, wait for the model deployment to complete before + // using it. + // To verify the deployment status, use the get trained model statistics API. + // Look for `"state": "fully_allocated"` in the response and ensure that the + // `"allocation_count"` matches the `"target_allocation_count"`. + // Avoid creating multiple endpoints for the same model unless required, as each + // endpoint consumes significant resources. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-openai.html + PutOpenai inference_put_openai.NewPutOpenai + // Create a Watsonx inference endpoint. + // + // Create an inference endpoint to perform an inference task with the + // `watsonxai` service. + // You need an IBM Cloud Databases for Elasticsearch deployment to use the + // `watsonxai` inference service. + // You can provision one through the IBM catalog, the Cloud Databases CLI + // plug-in, the Cloud Databases API, or Terraform. + // + // When you create an inference endpoint, the associated machine learning model + // is automatically deployed if it is not already running. + // After creating the endpoint, wait for the model deployment to complete before + // using it. + // To verify the deployment status, use the get trained model statistics API. + // Look for `"state": "fully_allocated"` in the response and ensure that the + // `"allocation_count"` matches the `"target_allocation_count"`. + // Avoid creating multiple endpoints for the same model unless required, as each + // endpoint consumes significant resources. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx + PutWatsonx inference_put_watsonx.NewPutWatsonx + // Perform rereanking inference on the service + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference + Rerank inference_rerank.NewRerank + // Perform sparse embedding inference on the service + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference + SparseEmbedding inference_sparse_embedding.NewSparseEmbedding + // Perform streaming inference. + // Get real-time responses for completion tasks by delivering answers + // incrementally, reducing response times during computation. + // This API works only with the completion task type. + // + // IMPORTANT: The inference APIs enable you to use certain services, such as + // built-in machine learning models (ELSER, E5), models uploaded through Eland, + // Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, + // Watsonx.ai, or Hugging Face. For built-in models and models uploaded through + // Eland, the inference APIs offer an alternative way to use and manage trained + // models. However, if you do not plan to use the inference APIs to use these + // models or if you want to use non-NLP models, use the machine learning trained + // model APIs. + // + // This API requires the `monitor_inference` cluster privilege (the built-in + // `inference_admin` and `inference_user` roles grant this privilege). You must + // use a client that supports streaming. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference + StreamCompletion inference_stream_completion.NewStreamCompletion + // Perform text embedding inference on the service + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference + TextEmbedding inference_text_embedding.NewTextEmbedding + // Update an inference endpoint. + // + // Modify `task_settings`, secrets (within `service_settings`), or + // `num_allocations` for an inference endpoint, depending on the specific + // endpoint service and `task_type`. + // + // IMPORTANT: The inference APIs enable you to use certain services, such as + // built-in machine learning models (ELSER, E5), models uploaded through Eland, + // Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, + // Watsonx.ai, or Hugging Face. + // For built-in models and models uploaded through Eland, the inference APIs + // offer an alternative way to use and manage trained models. + // However, if you do not plan to use the inference APIs to use these models or + // if you want to use non-NLP models, use the machine learning trained model + // APIs. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update + Update inference_update.NewUpdate } type Ingest struct { - // Deletes a geoip database configuration. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-geoip-database-api.html + // Delete GeoIP database configurations. + // + // Delete one or more IP geolocation database configurations. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-geoip-database DeleteGeoipDatabase ingest_delete_geoip_database.NewDeleteGeoipDatabase - // Deletes one or more existing ingest pipeline. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-pipeline-api.html + // Delete IP geolocation database configurations. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database + DeleteIpLocationDatabase ingest_delete_ip_location_database.NewDeleteIpLocationDatabase + // Delete pipelines. + // Delete one or more ingest pipelines. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline DeletePipeline ingest_delete_pipeline.NewDeletePipeline - // Gets download statistics for GeoIP2 databases used with the geoip processor. + // Get GeoIP statistics. + // Get download statistics for GeoIP2 databases that are used with the GeoIP + // processor. // https://www.elastic.co/guide/en/elasticsearch/reference/current/geoip-processor.html GeoIpStats ingest_geo_ip_stats.NewGeoIpStats - // Returns information about one or more geoip database configurations. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-geoip-database-api.html + // Get GeoIP database configurations. + // + // Get information about one or more IP geolocation database configurations. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-geoip-database GetGeoipDatabase ingest_get_geoip_database.NewGetGeoipDatabase - // Returns information about one or more ingest pipelines. + // Get IP geolocation database configurations. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database + GetIpLocationDatabase ingest_get_ip_location_database.NewGetIpLocationDatabase + // Get pipelines. + // + // Get information about one or more ingest pipelines. // This API returns a local reference of the pipeline. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-pipeline-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline GetPipeline ingest_get_pipeline.NewGetPipeline - // Extracts structured fields out of a single text field within a document. - // You choose which field to extract matched fields from, as well as the grok - // pattern you expect will match. + // Run a grok processor. + // Extract structured fields out of a single text field within a document. + // You must choose which field to extract matched fields from, as well as the + // grok pattern you expect will match. // A grok pattern is like a regular expression that supports aliased expressions // that can be reused. // https://www.elastic.co/guide/en/elasticsearch/reference/current/grok-processor.html ProcessorGrok ingest_processor_grok.NewProcessorGrok - // Returns information about one or more geoip database configurations. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-geoip-database-api.html + // Create or update a GeoIP database configuration. + // + // Refer to the create or update IP geolocation database configuration API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-geoip-database PutGeoipDatabase ingest_put_geoip_database.NewPutGeoipDatabase - // Creates or updates an ingest pipeline. + // Create or update an IP geolocation database configuration. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database + PutIpLocationDatabase ingest_put_ip_location_database.NewPutIpLocationDatabase + // Create or update a pipeline. // Changes made using this API take effect immediately. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest.html PutPipeline ingest_put_pipeline.NewPutPipeline - // Executes an ingest pipeline against a set of provided documents. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/simulate-pipeline-api.html + // Simulate a pipeline. + // + // Run an ingest pipeline against a set of provided documents. + // You can either specify an existing pipeline to use with the provided + // documents or supply a pipeline definition in the body of the request. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate Simulate ingest_simulate.NewSimulate } type License struct { - // Deletes licensing information for the cluster - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-license.html + // Delete the license. + // + // When the license expires, your subscription level reverts to Basic. + // + // If the operator privileges feature is enabled, only operator users can use + // this API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-delete Delete license_delete.NewDelete // Get license information. - // Returns information about your Elastic license, including its type, its - // status, when it was issued, and when it expires. - // For more information about the different types of licenses, refer to [Elastic - // Stack subscriptions](https://www.elastic.co/subscriptions). - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-license.html + // + // Get information about your Elastic license including its type, its status, + // when it was issued, and when it expires. + // + // >info + // > If the master node is generating a new cluster state, the get license API + // may return a `404 Not Found` response. + // > If you receive an unexpected 404 response after cluster startup, wait a + // short period and retry the request. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get Get license_get.NewGet - // Retrieves information about the status of the basic license. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html + // Get the basic license status. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-basic-status GetBasicStatus license_get_basic_status.NewGetBasicStatus - // Retrieves information about the status of the trial license. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trial-status.html + // Get the trial status. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status GetTrialStatus license_get_trial_status.NewGetTrialStatus - // Updates the license for the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-license.html + // Update the license. + // + // You can update your license at runtime without shutting down your nodes. + // License updates take effect immediately. + // If the license you are installing does not support all of the features that + // were available with your previous license, however, you are notified in the + // response. + // You must then re-submit the API request with the acknowledge parameter set to + // true. + // + // NOTE: If Elasticsearch security features are enabled and you are installing a + // gold or higher license, you must enable TLS on the transport networking layer + // before you install the license. + // If the operator privileges feature is enabled, only operator users can use + // this API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post Post license_post.NewPost - // The start basic API enables you to initiate an indefinite basic license, - // which gives access to all the basic features. If the basic license does not - // support all of the features that are available with your current license, - // however, you are notified in the response. You must then re-submit the API - // request with the acknowledge parameter set to true. - // To check the status of your basic license, use the following API: [Get basic - // status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). - // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-basic.html + // Start a basic license. + // + // Start an indefinite basic license, which gives access to all the basic + // features. + // + // NOTE: In order to start a basic license, you must not currently have a basic + // license. + // + // If the basic license does not support all of the features that are available + // with your current license, however, you are notified in the response. + // You must then re-submit the API request with the `acknowledge` parameter set + // to `true`. + // + // To check the status of your basic license, use the get basic license API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic PostStartBasic license_post_start_basic.NewPostStartBasic - // The start trial API enables you to start a 30-day trial, which gives access - // to all subscription features. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-trial.html + // Start a trial. + // Start a 30-day trial, which gives access to all subscription features. + // + // NOTE: You are allowed to start a trial only if your cluster has not already + // activated a trial for the current major product version. + // For example, if you have already activated a trial for v8.0, you cannot start + // a new trial until v9.0. You can, however, request an extended trial at + // https://www.elastic.co/trialextension. + // + // To check the status of your trial, use the get trial status API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial PostStartTrial license_post_start_trial.NewPostStartTrial } type Logstash struct { - // Deletes a pipeline used for Logstash Central Management. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-delete-pipeline.html + // Delete a Logstash pipeline. + // Delete a pipeline that is used for Logstash Central Management. + // If the request succeeds, you receive an empty response with an appropriate + // status code. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline DeletePipeline logstash_delete_pipeline.NewDeletePipeline - // Retrieves pipelines used for Logstash Central Management. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-get-pipeline.html + // Get Logstash pipelines. + // Get pipelines that are used for Logstash Central Management. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline GetPipeline logstash_get_pipeline.NewGetPipeline - // Creates or updates a pipeline used for Logstash Central Management. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-put-pipeline.html + // Create or update a Logstash pipeline. + // + // Create a pipeline that is used for Logstash Central Management. + // If the specified pipeline exists, it is replaced. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline PutPipeline logstash_put_pipeline.NewPutPipeline } type Migration struct { - // Retrieves information about different cluster, node, and index level settings - // that use deprecated features that will be removed or changed in the next - // major version. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-deprecation.html + // Get deprecation information. + // Get information about different cluster, node, and index level settings that + // use deprecated features that will be removed or changed in the next major + // version. + // + // TIP: This APIs is designed for indirect use by the Upgrade Assistant. + // You are strongly recommended to use the Upgrade Assistant. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations Deprecations migration_deprecations.NewDeprecations - // Find out whether system features need to be upgraded or not - // https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-feature-upgrade.html + // Get feature migration information. + // Version upgrades sometimes require changes to how features store + // configuration information and data in system indices. + // Check which features need to be migrated and the status of any migrations + // that are in progress. + // + // TIP: This API is designed for indirect use by the Upgrade Assistant. + // You are strongly recommended to use the Upgrade Assistant. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status GetFeatureUpgradeStatus migration_get_feature_upgrade_status.NewGetFeatureUpgradeStatus - // Begin upgrades for system features - // https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-feature-upgrade.html + // Start the feature migration. + // Version upgrades sometimes require changes to how features store + // configuration information and data in system indices. + // This API starts the automatic migration process. + // + // Some functionality might be temporarily unavailable during the migration + // process. + // + // TIP: The API is designed for indirect use by the Upgrade Assistant. We + // strongly recommend you use the Upgrade Assistant. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status PostFeatureUpgrade migration_post_feature_upgrade.NewPostFeatureUpgrade } type Ml struct { // Clear trained model deployment cache. + // // Cache will be cleared on all nodes where the trained model is assigned. // A trained model deployment may have an inference cache enabled. // As requests are handled by each allocated node, their responses may be cached // on that individual node. // Calling this API clears the caches without restarting the deployment. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-trained-model-deployment-cache.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-clear-trained-model-deployment-cache ClearTrainedModelDeploymentCache ml_clear_trained_model_deployment_cache.NewClearTrainedModelDeploymentCache // Close anomaly detection jobs. + // // A job can be opened and closed multiple times throughout its lifecycle. A // closed job cannot receive data or perform analysis operations, but you can // still explore and navigate results. @@ -2077,87 +5246,99 @@ type Ml struct { // request. // When a datafeed that has a specified end date stops, it automatically closes // its associated job. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job CloseJob ml_close_job.NewCloseJob // Delete a calendar. - // Removes all scheduled events from a calendar, then deletes it. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar.html + // + // Remove all scheduled events from a calendar, then delete it. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar DeleteCalendar ml_delete_calendar.NewDeleteCalendar // Delete events from a calendar. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar-event.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-event DeleteCalendarEvent ml_delete_calendar_event.NewDeleteCalendarEvent // Delete anomaly jobs from a calendar. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar-job.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job DeleteCalendarJob ml_delete_calendar_job.NewDeleteCalendarJob // Delete a data frame analytics job. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-dfanalytics.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics DeleteDataFrameAnalytics ml_delete_data_frame_analytics.NewDeleteDataFrameAnalytics // Delete a datafeed. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed DeleteDatafeed ml_delete_datafeed.NewDeleteDatafeed // Delete expired ML data. - // Deletes all job results, model snapshots and forecast data that have exceeded + // + // Delete all job results, model snapshots and forecast data that have exceeded // their retention days period. Machine learning state documents that are not // associated with any job are also deleted. // You can limit the request to a single or set of anomaly detection jobs by // using a job identifier, a group name, a comma-separated list of jobs, or a // wildcard expression. You can delete expired data for all anomaly detection - // jobs by using _all, by specifying * as the , or by omitting the - // . - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-expired-data.html + // jobs by using `_all`, by specifying `*` as the ``, or by omitting the + // ``. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data DeleteExpiredData ml_delete_expired_data.NewDeleteExpiredData // Delete a filter. + // // If an anomaly detection job references the filter, you cannot delete the // filter. You must update or delete the job before you can delete the filter. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-filter.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter DeleteFilter ml_delete_filter.NewDeleteFilter // Delete forecasts from a job. + // // By default, forecasts are retained for 14 days. You can specify a // different retention period with the `expires_in` parameter in the forecast // jobs API. The delete forecast API enables you to delete one or more // forecasts before they expire. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-forecast.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-forecast DeleteForecast ml_delete_forecast.NewDeleteForecast // Delete an anomaly detection job. + // // All job configuration, model state and results are deleted. // It is not currently possible to delete multiple jobs using wildcards or a // comma separated list. If you delete a job that has a datafeed, the request // first tries to delete the datafeed. This behavior is equivalent to calling // the delete datafeed API with the same timeout and force parameters as the // delete job request. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job DeleteJob ml_delete_job.NewDeleteJob // Delete a model snapshot. + // // You cannot delete the active model snapshot. To delete that snapshot, first // revert to a different one. To identify the active model snapshot, refer to // the `model_snapshot_id` in the results from the get jobs API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-snapshot.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-model-snapshot DeleteModelSnapshot ml_delete_model_snapshot.NewDeleteModelSnapshot // Delete an unreferenced trained model. + // // The request deletes a trained inference model that is not referenced by an // ingest pipeline. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-models.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model DeleteTrainedModel ml_delete_trained_model.NewDeleteTrainedModel // Delete a trained model alias. + // // This API deletes an existing model alias that refers to a trained model. If // the model alias is missing or refers to a model other than the one identified // by the `model_id`, this API returns an error. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-models-aliases.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model-alias DeleteTrainedModelAlias ml_delete_trained_model_alias.NewDeleteTrainedModelAlias // Estimate job model memory usage. - // Makes an estimation of the memory usage for an anomaly detection job model. - // It is based on analysis configuration details for the job and cardinality + // + // Make an estimation of the memory usage for an anomaly detection job model. + // The estimate is based on analysis configuration details for the job and + // cardinality // estimates for the fields it references. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-apis.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-estimate-model-memory EstimateModelMemory ml_estimate_model_memory.NewEstimateModelMemory // Evaluate data frame analytics. + // // The API packages together commonly used evaluation metrics for various types // of machine learning features. This has been designed for use on indexes // created by data frame analytics. Evaluation requires both a ground truth // field and an analytics result field to be present. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/evaluate-dfanalytics.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-evaluate-data-frame EvaluateDataFrame ml_evaluate_data_frame.NewEvaluateDataFrame // Explain data frame analytics config. + // // This API provides explanations for a data frame analytics config that either // exists already or one that has not been created yet. The following // explanations are provided: @@ -2166,7 +5347,7 @@ type Ml struct { // deciding the appropriate value for model_memory_limit setting later on. // If you have object fields or fields that are excluded via source filtering, // they are not included in the explanation. - // http://www.elastic.co/guide/en/elasticsearch/reference/current/explain-dfanalytics.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-explain-data-frame-analytics ExplainDataFrameAnalytics ml_explain_data_frame_analytics.NewExplainDataFrameAnalytics // Force buffered data to be processed. // The flush jobs API is only applicable when sending data for analysis using @@ -2177,7 +5358,7 @@ type Ml struct { // to continue analyzing data. A close operation additionally prunes and // persists the model state to disk and the job must be opened again before // analyzing further data. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job FlushJob ml_flush_job.NewFlushJob // Predict future behavior of a time series. // @@ -2185,29 +5366,29 @@ type Ml struct { // error occurs if you try to create a forecast for a job that has an // `over_field_name` in its configuration. Forcasts predict future behavior // based on historical data. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-forecast.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast Forecast ml_forecast.NewForecast // Get anomaly detection job results for buckets. // The API presents a chronological view of the records, grouped by bucket. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets GetBuckets ml_get_buckets.NewGetBuckets // Get info about events in calendars. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar-event.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events GetCalendarEvents ml_get_calendar_events.NewGetCalendarEvents // Get calendar configuration info. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars GetCalendars ml_get_calendars.NewGetCalendars // Get anomaly detection job results for categories. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-categories GetCategories ml_get_categories.NewGetCategories // Get data frame analytics job configuration info. // You can get information for multiple data frame analytics jobs in a single // API request by using a comma-separated list of data frame analytics jobs or a // wildcard expression. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics GetDataFrameAnalytics ml_get_data_frame_analytics.NewGetDataFrameAnalytics // Get data frame analytics jobs usage info. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics-stats.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats GetDataFrameAnalyticsStats ml_get_data_frame_analytics_stats.NewGetDataFrameAnalyticsStats // Get datafeeds usage info. // You can get statistics for multiple datafeeds in a single API request by @@ -2216,7 +5397,7 @@ type Ml struct { // ``, or by omitting the ``. If the datafeed is stopped, the // only information you receive is the `datafeed_id` and the `state`. // This API returns a maximum of 10,000 datafeeds. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed-stats.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats GetDatafeedStats ml_get_datafeed_stats.NewGetDatafeedStats // Get datafeeds configuration info. // You can get information for multiple datafeeds in a single API request by @@ -2224,39 +5405,39 @@ type Ml struct { // get information for all datafeeds by using `_all`, by specifying `*` as the // ``, or by omitting the ``. // This API returns a maximum of 10,000 datafeeds. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds GetDatafeeds ml_get_datafeeds.NewGetDatafeeds // Get filters. // You can get a single filter or all filters. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-filter.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters GetFilters ml_get_filters.NewGetFilters // Get anomaly detection job results for influencers. // Influencers are the entities that have contributed to, or are to blame for, // the anomalies. Influencer results are available only if an // `influencer_field_name` is specified in the job configuration. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-influencer.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers GetInfluencers ml_get_influencers.NewGetInfluencers // Get anomaly detection jobs usage info. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats GetJobStats ml_get_job_stats.NewGetJobStats // Get anomaly detection jobs configuration info. // You can get information for multiple anomaly detection jobs in a single API // request by using a group name, a comma-separated list of jobs, or a wildcard // expression. You can get information for all anomaly detection jobs by using // `_all`, by specifying `*` as the ``, or by omitting the ``. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs GetJobs ml_get_jobs.NewGetJobs // Get machine learning memory usage info. // Get information about how machine learning jobs and trained models are using // memory, // on each node, both within the JVM heap, and natively, outside of the JVM. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-memory.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats GetMemoryStats ml_get_memory_stats.NewGetMemoryStats // Get anomaly detection job model snapshot upgrade usage info. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-model-snapshot-upgrade-stats.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshot-upgrade-stats GetModelSnapshotUpgradeStats ml_get_model_snapshot_upgrade_stats.NewGetModelSnapshotUpgradeStats // Get model snapshots info. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots GetModelSnapshots ml_get_model_snapshots.NewGetModelSnapshots // Get overall bucket results. // @@ -2277,7 +5458,7 @@ type Ml struct { // greater than its default), the `overall_score` is the maximum // `overall_score` of the overall buckets that have a span equal to the // jobs' largest bucket span. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-overall-buckets.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-overall-buckets GetOverallBuckets ml_get_overall_buckets.NewGetOverallBuckets // Get anomaly records for an anomaly detection job. // Records contain the detailed analytical results. They describe the anomalous @@ -2290,41 +5471,42 @@ type Ml struct { // The number of record results depends on the number of anomalies found in each // bucket, which relates to the number of time series being modeled and the // number of detectors. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-record.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records GetRecords ml_get_records.NewGetRecords // Get trained model configuration info. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models GetTrainedModels ml_get_trained_models.NewGetTrainedModels // Get trained models usage info. // You can get usage information for multiple trained // models in a single API request by using a comma-separated list of model IDs // or a wildcard expression. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models-stats.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats GetTrainedModelsStats ml_get_trained_models_stats.NewGetTrainedModelsStats // Evaluate a trained model. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-infer-trained-model InferTrainedModel ml_infer_trained_model.NewInferTrainedModel - // Return ML defaults and limits. - // Returns defaults and limits used by machine learning. + // Get machine learning information. + // Get defaults and limits used by machine learning. // This endpoint is designed to be used by a user interface that needs to fully // understand machine learning configurations where some options are not // specified, meaning that the defaults should be used. This endpoint may be // used to find out what those defaults are. It also provides information about // the maximum size of machine learning jobs that could run in the current // cluster configuration. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-info.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info Info ml_info.NewInfo // Open anomaly detection jobs. + // // An anomaly detection job must be opened to be ready to receive and analyze // data. It can be opened and closed multiple times throughout its lifecycle. // When you open a new job, it starts with an empty model. // When you open an existing job, the most recent model state is automatically // loaded. The job is ready to resume its analysis from where it left off, once // new data is received. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job OpenJob ml_open_job.NewOpenJob // Add scheduled events to the calendar. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-calendar-event.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events PostCalendarEvents ml_post_calendar_events.NewPostCalendarEvents // Send data to an anomaly detection job for analysis. // @@ -2332,11 +5514,11 @@ type Ml struct { // at a time. // It is not currently possible to post data to multiple jobs using wildcards or // a comma-separated list. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data PostData ml_post_data.NewPostData // Preview features used by data frame analytics. - // Previews the extracted features used by a data frame analytics config. - // http://www.elastic.co/guide/en/elasticsearch/reference/current/preview-dfanalytics.html + // Preview the extracted features used by a data frame analytics config. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-data-frame-analytics PreviewDataFrameAnalytics ml_preview_data_frame_analytics.NewPreviewDataFrameAnalytics // Preview a datafeed. // This API returns the first "page" of search results from a datafeed. @@ -2352,26 +5534,38 @@ type Ml struct { // datafeed. To get a preview that accurately reflects the behavior of the // datafeed, use the appropriate credentials. // You can also use secondary authorization headers to supply the credentials. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-datafeed.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-datafeed PreviewDatafeed ml_preview_datafeed.NewPreviewDatafeed // Create a calendar. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar PutCalendar ml_put_calendar.NewPutCalendar // Add anomaly detection job to calendar. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar-job.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job PutCalendarJob ml_put_calendar_job.NewPutCalendarJob // Create a data frame analytics job. // This API creates a data frame analytics job that performs an analysis on the // source indices and stores the outcome in a destination index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-dfanalytics.html + // By default, the query used in the source configuration is `{"match_all": + // {}}`. + // + // If the destination index does not exist, it is created automatically when you + // start the job. + // + // If you supply only a subset of the regression or classification parameters, + // hyperparameter optimization occurs. It determines a value for each of the + // undefined parameters. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics PutDataFrameAnalytics ml_put_data_frame_analytics.NewPutDataFrameAnalytics // Create a datafeed. // Datafeeds retrieve data from Elasticsearch for analysis by an anomaly // detection job. // You can associate only one datafeed with each anomaly detection job. // The datafeed contains a query that runs at a defined interval (`frequency`). - // If you are concerned about delayed data, you can add a delay (`query_delay`) + // If you are concerned about delayed data, you can add a delay (`query_delay') // at each interval. + // By default, the datafeed uses the following query: `{"match_all": {"boost": + // 1}}`. + // // When Elasticsearch security features are enabled, your datafeed remembers // which roles the user who created it had // at the time of creation and runs the query using those same roles. If you @@ -2381,24 +5575,27 @@ type Ml struct { // create a datafeed. Do not add a datafeed // directly to the `.ml-config` index. Do not give users `write` privileges on // the `.ml-config` index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed PutDatafeed ml_put_datafeed.NewPutDatafeed // Create a filter. // A filter contains a list of strings. It can be used by one or more anomaly // detection jobs. // Specifically, filters are referenced in the `custom_rules` property of // detector configuration objects. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-filter.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter PutFilter ml_put_filter.NewPutFilter // Create an anomaly detection job. + // // If you include a `datafeed_config`, you must have read index privileges on // the source index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html + // If you include a `datafeed_config` but do not provide a query, the datafeed + // uses `{"match_all": {"boost": 1}}`. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job PutJob ml_put_job.NewPutJob // Create a trained model. // Enable you to supply a trained model that is not created by data frame // analytics. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model PutTrainedModel ml_put_trained_model.NewPutTrainedModel // Create or update a trained model alias. // A trained model alias is a logical name used to reference a single trained @@ -2417,23 +5614,23 @@ type Ml struct { // If you use this API to update an alias and there are very few input fields in // common between the old and new trained models for the model alias, the API // returns a warning. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models-aliases.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias PutTrainedModelAlias ml_put_trained_model_alias.NewPutTrainedModelAlias // Create part of a trained model definition. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-definition-part.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-definition-part PutTrainedModelDefinitionPart ml_put_trained_model_definition_part.NewPutTrainedModelDefinitionPart // Create a trained model vocabulary. // This API is supported only for natural language processing (NLP) models. // The vocabulary is stored in the index as described in // `inference_config.*.vocabulary` of the trained model definition. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-vocabulary.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-vocabulary PutTrainedModelVocabulary ml_put_trained_model_vocabulary.NewPutTrainedModelVocabulary // Reset an anomaly detection job. // All model state and results are deleted. The job is ready to start over as if // it had just been created. // It is not currently possible to reset multiple jobs using wildcards or a // comma separated list. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-reset-job.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-reset-job ResetJob ml_reset_job.NewResetJob // Revert to a snapshot. // The machine learning features react quickly to anomalous input, learning new @@ -2443,7 +5640,7 @@ type Ml struct { // one-off, then it might be appropriate to reset the model state to a time // before this event. For example, you might consider reverting to a saved // snapshot after Black Friday or a critical system failure. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-revert-snapshot.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot RevertModelSnapshot ml_revert_model_snapshot.NewRevertModelSnapshot // Set upgrade_mode for ML indices. // Sets a cluster wide upgrade_mode setting that prepares machine learning @@ -2458,7 +5655,7 @@ type Ml struct { // indices, though stopping jobs is not a requirement in that case. // You can see the current value for the upgrade_mode setting by using the get // machine learning info API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-set-upgrade-mode.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode SetUpgradeMode ml_set_upgrade_mode.NewSetUpgradeMode // Start a data frame analytics job. // A data frame analytics job can be started and stopped multiple times @@ -2472,7 +5669,7 @@ type Ml struct { // If there are any mapping conflicts, the job fails to start. // If the destination index exists, it is used as is. You can therefore set up // the destination index in advance with custom settings and mappings. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-dfanalytics.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics StartDataFrameAnalytics ml_start_data_frame_analytics.NewStartDataFrameAnalytics // Start datafeeds. // @@ -2494,28 +5691,28 @@ type Ml struct { // those same roles. If you provided secondary // authorization headers when you created or updated the datafeed, those // credentials are used instead. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-datafeed StartDatafeed ml_start_datafeed.NewStartDatafeed // Start a trained model deployment. // It allocates the model to every machine learning node. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-trained-model-deployment.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment StartTrainedModelDeployment ml_start_trained_model_deployment.NewStartTrainedModelDeployment // Stop data frame analytics jobs. // A data frame analytics job can be started and stopped multiple times // throughout its lifecycle. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-dfanalytics.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics StopDataFrameAnalytics ml_stop_data_frame_analytics.NewStopDataFrameAnalytics // Stop datafeeds. // A datafeed that is stopped ceases to retrieve data from Elasticsearch. A // datafeed can be started and stopped // multiple times throughout its lifecycle. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-datafeed StopDatafeed ml_stop_datafeed.NewStopDatafeed // Stop a trained model deployment. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-trained-model-deployment.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment StopTrainedModelDeployment ml_stop_trained_model_deployment.NewStopTrainedModelDeployment // Update a data frame analytics job. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-dfanalytics.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics UpdateDataFrameAnalytics ml_update_data_frame_analytics.NewUpdateDataFrameAnalytics // Update a datafeed. // You must stop and start the datafeed for the changes to be applied. @@ -2524,26 +5721,26 @@ type Ml struct { // the time of the update and runs the query using those same roles. If you // provide secondary authorization headers, // those credentials are used instead. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafeed.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-datafeed UpdateDatafeed ml_update_datafeed.NewUpdateDatafeed // Update a filter. // Updates the description of a filter, adds items, or removes items from the // list. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-filter.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-filter UpdateFilter ml_update_filter.NewUpdateFilter // Update an anomaly detection job. // Updates certain properties of an anomaly detection job. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-job UpdateJob ml_update_job.NewUpdateJob // Update a snapshot. // Updates certain properties of a snapshot. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-model-snapshot UpdateModelSnapshot ml_update_model_snapshot.NewUpdateModelSnapshot // Update a trained model deployment. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-trained-model-deployment.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-trained-model-deployment UpdateTrainedModelDeployment ml_update_trained_model_deployment.NewUpdateTrainedModelDeployment // Upgrade a snapshot. - // Upgrades an anomaly detection model snapshot to the latest major version. + // Upgrade an anomaly detection model snapshot to the latest major version. // Over time, older snapshot formats are deprecated and removed. Anomaly // detection jobs support only snapshots that are from the current or previous // major version. @@ -2552,52 +5749,77 @@ type Ml struct { // Only one snapshot per anomaly detection job can be upgraded at a time and the // upgraded snapshot cannot be the current snapshot of the anomaly detection // job. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-upgrade-job-model-snapshot.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-upgrade-job-snapshot UpgradeJobSnapshot ml_upgrade_job_snapshot.NewUpgradeJobSnapshot - // Validates an anomaly detection job. + // Validate an anomaly detection job. // https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html Validate ml_validate.NewValidate - // Validates an anomaly detection detector. - // https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html + // Validate an anomaly detection job. + // https://www.elastic.co/docs/api/doc/elasticsearch ValidateDetector ml_validate_detector.NewValidateDetector } type Monitoring struct { - // Used by the monitoring features to send monitoring data. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/monitor-elasticsearch-cluster.html + // Send monitoring data. + // This API is used by the monitoring features to send monitoring data. + // https://www.elastic.co/docs/api/doc/elasticsearch Bulk monitoring_bulk.NewBulk } type Nodes struct { - // You can use this API to clear the archived repositories metering information - // in the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-repositories-metering-archive-api.html + // Clear the archived repositories metering. + // Clear the archived repositories metering information in the cluster. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive ClearRepositoriesMeteringArchive nodes_clear_repositories_metering_archive.NewClearRepositoriesMeteringArchive - // You can use the cluster repositories metering API to retrieve repositories - // metering information in a cluster. - // This API exposes monotonically non-decreasing counters and it’s expected that - // clients would durably store the - // information needed to compute aggregations over a period of time. - // Additionally, the information exposed by this - // API is volatile, meaning that it won’t be present after node restarts. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html + // Get cluster repositories metering. + // Get repositories metering information for a cluster. + // This API exposes monotonically non-decreasing counters and it is expected + // that clients would durably store the information needed to compute + // aggregations over a period of time. + // Additionally, the information exposed by this API is volatile, meaning that + // it will not be present after node restarts. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-get-repositories-metering-info GetRepositoriesMeteringInfo nodes_get_repositories_metering_info.NewGetRepositoriesMeteringInfo - // This API yields a breakdown of the hot threads on each selected node in the - // cluster. - // The output is plain text with a breakdown of each node’s top hot threads. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-hot-threads.html + // Get the hot threads for nodes. + // Get a breakdown of the hot threads on each selected node in the cluster. + // The output is plain text with a breakdown of the top hot threads for each + // node. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-hot-threads HotThreads nodes_hot_threads.NewHotThreads - // Returns cluster nodes information. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-info.html + // Get node information. + // + // By default, the API returns all attributes and core settings for cluster + // nodes. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info Info nodes_info.NewInfo - // Reloads the keystore on nodes in the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-settings.html#reloadable-secure-settings + // Reload the keystore on nodes in the cluster. + // + // Secure settings are stored in an on-disk keystore. Certain of these settings + // are reloadable. + // That is, you can change them on disk and reload them without restarting any + // nodes in the cluster. + // When you have updated reloadable secure settings in your keystore, you can + // use this API to reload those settings on each node. + // + // When the Elasticsearch keystore is password protected and not simply + // obfuscated, you must provide the password for the keystore when you reload + // the secure settings. + // Reloading the settings for the whole cluster assumes that the keystores for + // all nodes are protected with the same password; this method is allowed only + // when inter-node communications are encrypted. + // Alternatively, you can reload the secure settings on each node by locally + // accessing the API and passing the node-specific Elasticsearch keystore + // password. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-reload-secure-settings ReloadSecureSettings nodes_reload_secure_settings.NewReloadSecureSettings - // Returns cluster nodes statistics. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html + // Get node statistics. + // Get statistics for nodes in a cluster. + // By default, all stats are returned. You can limit the returned information by + // using metrics. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats Stats nodes_stats.NewStats - // Returns information on the usage of features. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-usage.html + // Get feature usage information. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-usage Usage nodes_usage.NewUsage } @@ -2618,108 +5840,290 @@ type Profiling struct { } type QueryRules struct { - // Deletes a query rule within a query ruleset. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-query-rule.html + // Delete a query rule. + // Delete a query rule within a query ruleset. + // This is a destructive action that is only recoverable by re-adding the same + // rule with the create or update query rule API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-rule DeleteRule query_rules_delete_rule.NewDeleteRule - // Deletes a query ruleset. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-query-ruleset.html + // Delete a query ruleset. + // Remove a query ruleset and its associated data. + // This is a destructive action that is not recoverable. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset DeleteRuleset query_rules_delete_ruleset.NewDeleteRuleset - // Returns the details about a query rule within a query ruleset - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-query-rule.html + // Get a query rule. + // Get details about a query rule within a query ruleset. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-rule GetRule query_rules_get_rule.NewGetRule - // Returns the details about a query ruleset - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-query-ruleset.html + // Get a query ruleset. + // Get details about a query ruleset. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset GetRuleset query_rules_get_ruleset.NewGetRuleset - // Returns summarized information about existing query rulesets. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-query-rulesets.html + // Get all query rulesets. + // Get summarized information about the query rulesets. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-list-rulesets ListRulesets query_rules_list_rulesets.NewListRulesets - // Creates or updates a query rule within a query ruleset. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-query-rule.html + // Create or update a query rule. + // Create or update a query rule within a query ruleset. + // + // IMPORTANT: Due to limitations within pinned queries, you can only pin + // documents using ids or docs, but cannot use both in single rule. + // It is advised to use one or the other in query rulesets, to avoid errors. + // Additionally, pinned queries have a maximum limit of 100 pinned hits. + // If multiple matching rules pin more than 100 documents, only the first 100 + // documents are pinned in the order they are specified in the ruleset. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-rule PutRule query_rules_put_rule.NewPutRule - // Creates or updates a query ruleset. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-query-ruleset.html + // Create or update a query ruleset. + // There is a limit of 100 rules per ruleset. + // This limit can be increased by using the + // `xpack.applications.rules.max_rules_per_ruleset` cluster setting. + // + // IMPORTANT: Due to limitations within pinned queries, you can only select + // documents using `ids` or `docs`, but cannot use both in single rule. + // It is advised to use one or the other in query rulesets, to avoid errors. + // Additionally, pinned queries have a maximum limit of 100 pinned hits. + // If multiple matching rules pin more than 100 documents, only the first 100 + // documents are pinned in the order they are specified in the ruleset. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset PutRuleset query_rules_put_ruleset.NewPutRuleset - // Creates or updates a query ruleset. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/test-query-ruleset.html + // Test a query ruleset. + // Evaluate match criteria against a query ruleset to identify the rules that + // would match that criteria. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-test Test query_rules_test.NewTest } type Rollup struct { - // Deletes an existing rollup job. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-delete-job.html + // Delete a rollup job. + // + // A job must be stopped before it can be deleted. + // If you attempt to delete a started job, an error occurs. + // Similarly, if you attempt to delete a nonexistent job, an exception occurs. + // + // IMPORTANT: When you delete a job, you remove only the process that is + // actively monitoring and rolling up data. + // The API does not delete any previously rolled up data. + // This is by design; a user may wish to roll up a static data set. + // Because the data set is static, after it has been fully rolled up there is no + // need to keep the indexing rollup job around (as there will be no new data). + // Thus the job can be deleted, leaving behind the rolled up data for analysis. + // If you wish to also remove the rollup data and the rollup index contains the + // data for only a single job, you can delete the whole rollup index. + // If the rollup index stores data from several jobs, you must issue a + // delete-by-query that targets the rollup job's identifier in the rollup index. + // For example: + // + // ``` + // POST my_rollup_index/_delete_by_query + // { + // "query": { + // "term": { + // "_rollup.id": "the_rollup_job_id" + // } + // } + // } + // ``` + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-delete-job DeleteJob rollup_delete_job.NewDeleteJob - // Retrieves the configuration, stats, and status of rollup jobs. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-job.html + // Get rollup job information. + // Get the configuration, stats, and status of rollup jobs. + // + // NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. + // If a job was created, ran for a while, then was deleted, the API does not + // return any details about it. + // For details about a historical rollup job, the rollup capabilities API may be + // more useful. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs GetJobs rollup_get_jobs.NewGetJobs - // Returns the capabilities of any rollup jobs that have been configured for a + // Get the rollup job capabilities. + // Get the capabilities of any rollup jobs that have been configured for a // specific index or index pattern. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-rollup-caps.html + // + // This API is useful because a rollup job is often configured to rollup only a + // subset of fields from the source index. + // Furthermore, only certain aggregations can be configured for various fields, + // leading to a limited subset of functionality depending on that configuration. + // This API enables you to inspect an index and determine: + // + // 1. Does this index have associated rollup data somewhere in the cluster? + // 2. If yes to the first question, what fields were rolled up, what + // aggregations can be performed, and where does the data live? + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-caps GetRollupCaps rollup_get_rollup_caps.NewGetRollupCaps - // Returns the rollup capabilities of all jobs inside of a rollup index (for - // example, the index where rollup data is stored). - // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-rollup-index-caps.html + // Get the rollup index capabilities. + // Get the rollup capabilities of all jobs inside of a rollup index. + // A single rollup index may store the data for multiple rollup jobs and may + // have a variety of capabilities depending on those jobs. This API enables you + // to determine: + // + // * What jobs are stored in an index (or indices specified via a pattern)? + // * What target indices were rolled up, what fields were used in those rollups, + // and what aggregations can be performed on each job? + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-index-caps GetRollupIndexCaps rollup_get_rollup_index_caps.NewGetRollupIndexCaps - // Creates a rollup job. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-put-job.html + // Create a rollup job. + // + // WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will + // fail with a message about the deprecation and planned removal of rollup + // features. A cluster needs to contain either a rollup job or a rollup index in + // order for this API to be allowed to run. + // + // The rollup job configuration contains all the details about how the job + // should run, when it indexes documents, and what future queries will be able + // to run against the rollup index. + // + // There are three main sections to the job configuration: the logistical + // details about the job (for example, the cron schedule), the fields that are + // used for grouping, and what metrics to collect for each group. + // + // Jobs are created in a `STOPPED` state. You can start them with the start + // rollup jobs API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-put-job PutJob rollup_put_job.NewPutJob - // Enables searching rolled-up data using the standard Query DSL. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-search.html + // Search rolled-up data. + // The rollup search endpoint is needed because, internally, rolled-up documents + // utilize a different document structure than the original data. + // It rewrites standard Query DSL into a format that matches the rollup + // documents then takes the response and rewrites it back to what a client would + // expect given the original query. + // + // The request body supports a subset of features from the regular search API. + // The following functionality is not available: + // + // `size`: Because rollups work on pre-aggregated data, no search hits can be + // returned and so size must be set to zero or omitted entirely. + // `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are + // similarly disallowed. + // + // **Searching both historical rollup and non-rollup data** + // + // The rollup search API has the capability to search across both "live" + // non-rollup data and the aggregated rollup data. + // This is done by simply adding the live indices to the URI. For example: + // + // ``` + // GET sensor-1,sensor_rollup/_rollup_search + // { + // "size": 0, + // "aggregations": { + // "max_temperature": { + // "max": { + // "field": "temperature" + // } + // } + // } + // } + // ``` + // + // The rollup search endpoint does two things when the search runs: + // + // * The original request is sent to the non-rollup index unaltered. + // * A rewritten version of the original request is sent to the rollup index. + // + // When the two responses are received, the endpoint rewrites the rollup + // response and merges the two together. + // During the merging process, if there is any overlap in buckets between the + // two responses, the buckets from the non-rollup index are used. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search RollupSearch rollup_rollup_search.NewRollupSearch - // Starts an existing, stopped rollup job. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-start-job.html + // Start rollup jobs. + // If you try to start a job that does not exist, an exception occurs. + // If you try to start a job that is already started, nothing happens. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job StartJob rollup_start_job.NewStartJob - // Stops an existing, started rollup job. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-stop-job.html + // Stop rollup jobs. + // If you try to stop a job that does not exist, an exception occurs. + // If you try to stop a job that is already stopped, nothing happens. + // + // Since only a stopped job can be deleted, it can be useful to block the API + // until the indexer has fully stopped. + // This is accomplished with the `wait_for_completion` query parameter, and + // optionally a timeout. For example: + // + // ``` + // POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s + // ``` + // The parameter blocks the API call from returning until either the job has + // moved to STOPPED or the specified time has elapsed. + // If the specified time elapses without the job moving to STOPPED, a timeout + // exception occurs. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job StopJob rollup_stop_job.NewStopJob } type SearchApplication struct { // Delete a search application. + // // Remove a search application and its associated alias. Indices attached to the // search application are not removed. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-search-application.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete Delete search_application_delete.NewDelete // Delete a behavioral analytics collection. // The associated data stream is also deleted. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-analytics-collection.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics DeleteBehavioralAnalytics search_application_delete_behavioral_analytics.NewDeleteBehavioralAnalytics // Get search application details. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-search-application.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get Get search_application_get.NewGet // Get behavioral analytics collections. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-analytics-collection.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics GetBehavioralAnalytics search_application_get_behavioral_analytics.NewGetBehavioralAnalytics - // Returns the existing search applications. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-search-applications.html + // Get search applications. + // Get information about search applications. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics List search_application_list.NewList + // Create a behavioral analytics collection event. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event + PostBehavioralAnalyticsEvent search_application_post_behavioral_analytics_event.NewPostBehavioralAnalyticsEvent // Create or update a search application. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-search-application.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put Put search_application_put.NewPut // Create a behavioral analytics collection. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-analytics-collection.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics PutBehavioralAnalytics search_application_put_behavioral_analytics.NewPutBehavioralAnalytics + // Render a search application query. + // Generate an Elasticsearch query using the specified query parameters and the + // search template associated with the search application or a default template + // if none is specified. + // If a parameter used in the search template is not specified in `params`, the + // parameter's default value will be used. + // The API returns the specific Elasticsearch query that would be generated and + // run by calling the search application search API. + // + // You must have `read` privileges on the backing alias of the search + // application. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-render-query + RenderQuery search_application_render_query.NewRenderQuery // Run a search application search. // Generate and run an Elasticsearch query that uses the specified query // parameteter and the search template associated with the search application or // default template. // Unspecified template parameters are assigned their default values if // applicable. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-application-search.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search Search search_application_search.NewSearch } type SearchableSnapshots struct { - // Retrieve node-level cache statistics about searchable snapshots. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html + // Get cache statistics. + // Get statistics about the shared cache for partially mounted indices. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-cache-stats CacheStats searchable_snapshots_cache_stats.NewCacheStats - // Clear the cache of searchable snapshots. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html + // Clear the cache. + // Clear indices and data streams from the shared cache for partially mounted + // indices. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache ClearCache searchable_snapshots_clear_cache.NewClearCache - // Mount a snapshot as a searchable index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-api-mount-snapshot.html + // Mount a snapshot. + // Mount a snapshot as a searchable snapshot index. + // Do not use this API for snapshots managed by index lifecycle management + // (ILM). + // Manually mounting ILM-managed snapshots can interfere with ILM processes. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount Mount searchable_snapshots_mount.NewMount - // Retrieve shard-level statistics about searchable snapshots. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html + // Get searchable snapshot statistics. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats Stats searchable_snapshots_stats.NewStats } @@ -2727,7 +6131,28 @@ type Security struct { // Activate a user profile. // // Create or update a user profile on behalf of another user. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-activate-user-profile.html + // + // NOTE: The user profile feature is designed only for use by Kibana and + // Elastic's Observability, Enterprise Search, and Elastic Security solutions. + // Individual users and external applications should not call this API directly. + // The calling application must have either an `access_token` or a combination + // of `username` and `password` for the user that the profile document is + // intended for. + // Elastic reserves the right to change or remove this feature in future + // releases without prior notice. + // + // This API creates or updates a profile document for end users with information + // that is extracted from the user's authentication object including `username`, + // `full_name,` `roles`, and the authentication realm. + // For example, in the JWT `access_token` case, the profile user's `username` is + // extracted from the JWT token claim pointed to by the `claims.principal` + // setting of the JWT realm that authenticated the token. + // + // When updating a profile document, the API enables the document if it was + // disabled. + // Any updates do not change existing content for either the `labels` or `data` + // fields. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-activate-user-profile ActivateUserProfile security_activate_user_profile.NewActivateUserProfile // Authenticate a user. // @@ -2739,7 +6164,7 @@ type Security struct { // metadata, and information about the realms that authenticated and authorized // the user. // If the user cannot be authenticated, this API returns a 401 status code. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-authenticate.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate Authenticate security_authenticate.NewAuthenticate // Bulk delete roles. // @@ -2747,7 +6172,7 @@ type Security struct { // rather than using file-based role management. // The bulk delete roles API cannot delete roles that are defined in roles // files. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-delete-role.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-delete-role BulkDeleteRole security_bulk_delete_role.NewBulkDeleteRole // Bulk create or update roles. // @@ -2755,56 +6180,109 @@ type Security struct { // rather than using file-based role management. // The bulk create or update roles API cannot update roles that are defined in // roles files. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-put-role.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-put-role BulkPutRole security_bulk_put_role.NewBulkPutRole - // Updates the attributes of multiple existing API keys. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-update-api-keys.html + // Bulk update API keys. + // Update the attributes for multiple API keys. + // + // IMPORTANT: It is not possible to use an API key as the authentication + // credential for this API. To update API keys, the owner user's credentials are + // required. + // + // This API is similar to the update API key API but enables you to apply the + // same update to multiple API keys in one API call. This operation can greatly + // improve performance over making individual updates. + // + // It is not possible to update expired or invalidated API keys. + // + // This API supports updates to API key access scope, metadata and expiration. + // The access scope of each API key is derived from the `role_descriptors` you + // specify in the request and a snapshot of the owner user's permissions at the + // time of the request. + // The snapshot of the owner's permissions is updated automatically on every + // call. + // + // IMPORTANT: If you don't specify `role_descriptors` in the request, a call to + // this API might still change an API key's access scope. This change can occur + // if the owner user's permissions have changed since the API key was created or + // last modified. + // + // A successful request returns a JSON structure that contains the IDs of all + // updated API keys, the IDs of API keys that already had the requested changes + // and did not require an update, and error details for any failed update. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-update-api-keys BulkUpdateApiKeys security_bulk_update_api_keys.NewBulkUpdateApiKeys // Change passwords. // // Change the passwords of users in the native realm and built-in users. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-change-password.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-change-password ChangePassword security_change_password.NewChangePassword // Clear the API key cache. // // Evict a subset of all entries from the API key cache. // The cache is also automatically cleared on state changes of the security // index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-api-key-cache.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-api-key-cache ClearApiKeyCache security_clear_api_key_cache.NewClearApiKeyCache // Clear the privileges cache. // // Evict privileges from the native application privilege cache. // The cache is also automatically cleared for applications that have their // privileges updated. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-privilege-cache.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-privileges ClearCachedPrivileges security_clear_cached_privileges.NewClearCachedPrivileges // Clear the user cache. // - // Evict users from the user cache. You can completely clear the cache or evict - // specific users. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-cache.html + // Evict users from the user cache. + // You can completely clear the cache or evict specific users. + // + // User credentials are cached in memory on each node to avoid connecting to a + // remote authentication service or hitting the disk for every incoming request. + // There are realm settings that you can use to configure the user cache. + // For more information, refer to the documentation about controlling the user + // cache. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-realms ClearCachedRealms security_clear_cached_realms.NewClearCachedRealms // Clear the roles cache. // // Evict roles from the native role cache. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-role-cache.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-roles ClearCachedRoles security_clear_cached_roles.NewClearCachedRoles // Clear service account token caches. // // Evict a subset of all entries from the service account token caches. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-service-token-caches.html + // Two separate caches exist for service account tokens: one cache for tokens + // backed by the `service_tokens` file, and another for tokens backed by the + // `.security` index. + // This API clears matching entries from both caches. + // + // The cache for service account tokens backed by the `.security` index is + // cleared automatically on state changes of the security index. + // The cache for tokens backed by the `service_tokens` file is cleared + // automatically on file changes. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-service-tokens ClearCachedServiceTokens security_clear_cached_service_tokens.NewClearCachedServiceTokens // Create an API key. // // Create an API key for access without requiring basic authentication. + // + // IMPORTANT: If the credential that is used to authenticate this request is an + // API key, the derived API key cannot have any privileges. + // If you specify privileges, the API returns an error. + // // A successful request returns a JSON structure that contains the API key, its // unique id, and its name. // If applicable, it also returns expiration information for the API key in // milliseconds. + // // NOTE: By default, API keys never expire. You can specify expiration // information when you create the API keys. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html + // + // The API keys are created by the Elasticsearch API key service, which is + // automatically enabled. + // To configure or turn off the API key service, refer to API key service + // setting documentation. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key CreateApiKey security_create_api_key.NewCreateApiKey // Create a cross-cluster API key. // @@ -2835,66 +6313,144 @@ type Security struct { // key API. // Attempting to update them with the update REST API key API or the bulk update // REST API keys API will result in an error. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-cross-cluster-api-key.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-cross-cluster-api-key CreateCrossClusterApiKey security_create_cross_cluster_api_key.NewCreateCrossClusterApiKey // Create a service account token. // // Create a service accounts token for access without requiring basic // authentication. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-service-token.html + // + // NOTE: Service account tokens never expire. + // You must actively delete them if they are no longer needed. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token CreateServiceToken security_create_service_token.NewCreateServiceToken + // Delegate PKI authentication. + // + // This API implements the exchange of an X509Certificate chain for an + // Elasticsearch access token. + // The certificate chain is validated, according to RFC 5280, by sequentially + // considering the trust configuration of every installed PKI realm that has + // `delegation.enabled` set to `true`. + // A successfully trusted client certificate is also subject to the validation + // of the subject distinguished name according to thw `username_pattern` of the + // respective realm. + // + // This API is called by smart and trusted proxies, such as Kibana, which + // terminate the user's TLS session but still want to authenticate the user by + // using a PKI realm—-​as if the user connected directly to Elasticsearch. + // + // IMPORTANT: The association between the subject public key in the target + // certificate and the corresponding private key is not validated. + // This is part of the TLS authentication process and it is delegated to the + // proxy that calls this API. + // The proxy is trusted to have performed the TLS authentication and this API + // translates that authentication into an Elasticsearch access token. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delegate-pki + DelegatePki security_delegate_pki.NewDelegatePki // Delete application privileges. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-privilege.html + // + // To use this API, you must have one of the following privileges: + // + // * The `manage_security` cluster privilege (or a greater privilege such as + // `all`). + // * The "Manage Application Privileges" global privilege for the application + // being referenced in the request. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-privileges DeletePrivileges security_delete_privileges.NewDeletePrivileges // Delete roles. // // Delete roles in the native realm. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role.html + // The role management APIs are generally the preferred way to manage roles, + // rather than using file-based role management. + // The delete roles API cannot remove roles that are defined in roles files. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role DeleteRole security_delete_role.NewDeleteRole // Delete role mappings. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role-mapping.html + // + // Role mappings define which roles are assigned to each user. + // The role mapping APIs are generally the preferred way to manage role mappings + // rather than using role mapping files. + // The delete role mappings API cannot remove role mappings that are defined in + // role mapping files. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping DeleteRoleMapping security_delete_role_mapping.NewDeleteRoleMapping // Delete service account tokens. // // Delete service account tokens for a service in a specified namespace. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-service-token.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-service-token DeleteServiceToken security_delete_service_token.NewDeleteServiceToken // Delete users. // // Delete users from the native realm. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user DeleteUser security_delete_user.NewDeleteUser // Disable users. // // Disable users in the native realm. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user.html + // By default, when you create users, they are enabled. + // You can use this API to revoke a user's access to Elasticsearch. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user DisableUser security_disable_user.NewDisableUser // Disable a user profile. // // Disable user profiles so that they are not visible in user profile searches. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user-profile.html + // + // NOTE: The user profile feature is designed only for use by Kibana and + // Elastic's Observability, Enterprise Search, and Elastic Security solutions. + // Individual users and external applications should not call this API directly. + // Elastic reserves the right to change or remove this feature in future + // releases without prior notice. + // + // When you activate a user profile, its automatically enabled and visible in + // user profile searches. You can use the disable user profile API to disable a + // user profile so it’s not visible in these searches. + // To re-enable a disabled user profile, use the enable user profile API . + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user-profile DisableUserProfile security_disable_user_profile.NewDisableUserProfile // Enable users. // // Enable users in the native realm. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user.html + // By default, when you create users, they are enabled. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user EnableUser security_enable_user.NewEnableUser // Enable a user profile. // // Enable user profiles to make them visible in user profile searches. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user-profile.html + // + // NOTE: The user profile feature is designed only for use by Kibana and + // Elastic's Observability, Enterprise Search, and Elastic Security solutions. + // Individual users and external applications should not call this API directly. + // Elastic reserves the right to change or remove this feature in future + // releases without prior notice. + // + // When you activate a user profile, it's automatically enabled and visible in + // user profile searches. + // If you later disable the user profile, you can use the enable user profile + // API to make the profile visible in these searches again. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user-profile EnableUserProfile security_enable_user_profile.NewEnableUserProfile // Enroll Kibana. // // Enable a Kibana instance to configure itself for communication with a secured // Elasticsearch cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-kibana-enrollment.html + // + // NOTE: This API is currently intended for internal use only by Kibana. + // Kibana uses this API internally to configure itself for communications with + // an Elasticsearch cluster that already has security features enabled. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-kibana EnrollKibana security_enroll_kibana.NewEnrollKibana // Enroll a node. // // Enroll a new node to allow it to join an existing cluster with security // features enabled. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-node-enrollment.html + // + // The response contains all the necessary information for the joining node to + // bootstrap discovery and security related settings so that it can successfully + // join the cluster. + // The response contains key and certificate material that allows the caller to + // generate valid signed certificates for the HTTP layer of all nodes in the + // cluster. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-node EnrollNode security_enroll_node.NewEnrollNode // Get API key information. // @@ -2904,21 +6460,31 @@ type Security struct { // If you have `read_security`, `manage_api_key` or greater privileges // (including `manage_security`), this API returns all API keys regardless of // ownership. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-api-key.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key GetApiKey security_get_api_key.NewGetApiKey // Get builtin privileges. // // Get the list of cluster privileges and index privileges that are available in // this version of Elasticsearch. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-builtin-privileges.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-builtin-privileges GetBuiltinPrivileges security_get_builtin_privileges.NewGetBuiltinPrivileges // Get application privileges. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-privileges.html + // + // To use this API, you must have one of the following privileges: + // + // * The `read_security` cluster privilege (or a greater privilege such as + // `manage_security` or `all`). + // * The "Manage Application Privileges" global privilege for the application + // being referenced in the request. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-privileges GetPrivileges security_get_privileges.NewGetPrivileges // Get roles. // // Get roles in the native realm. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role.html + // The role management APIs are generally the preferred way to manage roles, + // rather than using file-based role management. + // The get roles API cannot retrieve roles that are defined in roles files. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role GetRole security_get_role.NewGetRole // Get role mappings. // @@ -2927,47 +6493,109 @@ type Security struct { // rather than using role mapping files. // The get role mappings API cannot retrieve role mappings that are defined in // role mapping files. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role-mapping.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role-mapping GetRoleMapping security_get_role_mapping.NewGetRoleMapping // Get service accounts. // // Get a list of service accounts that match the provided path parameters. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-service-accounts.html + // + // NOTE: Currently, only the `elastic/fleet-server` service account is + // available. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts GetServiceAccounts security_get_service_accounts.NewGetServiceAccounts // Get service account credentials. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-service-credentials.html + // + // To use this API, you must have at least the `read_security` cluster privilege + // (or a greater privilege such as `manage_service_account` or + // `manage_security`). + // + // The response includes service account tokens that were created with the + // create service account tokens API as well as file-backed tokens from all + // nodes of the cluster. + // + // NOTE: For tokens backed by the `service_tokens` file, the API collects them + // from all nodes of the cluster. + // Tokens with the same name from different nodes are assumed to be the same + // token and are only counted once towards the total number of service tokens. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-credentials GetServiceCredentials security_get_service_credentials.NewGetServiceCredentials - // Retrieve settings for the security system indices - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-settings.html + // Get security index settings. + // + // Get the user-configurable settings for the security internal index + // (`.security` and associated indices). + // Only a subset of the index settings — those that are user-configurable—will + // be shown. + // This includes: + // + // * `index.auto_expand_replicas` + // * `index.number_of_replicas` + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-settings GetSettings security_get_settings.NewGetSettings // Get a token. // // Create a bearer token for access without requiring basic authentication. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-token.html + // The tokens are created by the Elasticsearch Token Service, which is + // automatically enabled when you configure TLS on the HTTP interface. + // Alternatively, you can explicitly enable the + // `xpack.security.authc.token.enabled` setting. + // When you are running in production mode, a bootstrap check prevents you from + // enabling the token service unless you also enable TLS on the HTTP interface. + // + // The get token API takes the same parameters as a typical OAuth 2.0 token API + // except for the use of a JSON request body. + // + // A successful get token API call returns a JSON structure that contains the + // access token, the amount of time (seconds) that the token expires in, the + // type, and the scope if available. + // + // The tokens returned by the get token API have a finite period of time for + // which they are valid and after that time period, they can no longer be used. + // That time period is defined by the `xpack.security.authc.token.timeout` + // setting. + // If you want to invalidate a token immediately, you can do so by using the + // invalidate token API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token GetToken security_get_token.NewGetToken // Get users. // // Get information about users in the native realm and built-in users. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user GetUser security_get_user.NewGetUser // Get user privileges. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-privileges.html + // + // Get the security privileges for the logged in user. + // All users can use this API, but only to determine their own privileges. + // To check the privileges of other users, you must use the run as feature. + // To check whether a user has a specific list of privileges, use the has + // privileges API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges GetUserPrivileges security_get_user_privileges.NewGetUserPrivileges // Get a user profile. // // Get a user's profile using the unique profile ID. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-profile.html + // + // NOTE: The user profile feature is designed only for use by Kibana and + // Elastic's Observability, Enterprise Search, and Elastic Security solutions. + // Individual users and external applications should not call this API directly. + // Elastic reserves the right to change or remove this feature in future + // releases without prior notice. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile GetUserProfile security_get_user_profile.NewGetUserProfile // Grant an API key. // // Create an API key on behalf of another user. // This API is similar to the create API keys API, however it creates the API // key for a user that is different than the user that runs the API. - // The caller must have authentication credentials (either an access token, or a - // username and password) for the user on whose behalf the API key will be - // created. - // It is not possible to use this API to create an API key without that user’s + // The caller must have authentication credentials for the user on whose behalf + // the API key will be created. + // It is not possible to use this API to create an API key without that user's // credentials. + // The supported user authentication credential types are: + // + // * username and password + // * Elasticsearch access tokens + // * JWTs + // // The user, for whom the authentication credentials is provided, can optionally // "run as" (impersonate) another user. // In this case, the API key will be created on behalf of the impersonated user. @@ -2975,6 +6603,8 @@ type Security struct { // This API is intended be used by applications that need to create and manage // API keys for end users, but cannot guarantee that those users have permission // to create API keys on their own behalf. + // The API keys are created by the Elasticsearch API key service, which is + // automatically enabled. // // A successful grant API key API call returns a JSON structure that contains // the API key, its unique id, and its name. @@ -2983,18 +6613,26 @@ type Security struct { // // By default, API keys never expire. You can specify expiration information // when you create the API keys. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-grant-api-key.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-grant-api-key GrantApiKey security_grant_api_key.NewGrantApiKey // Check user privileges. // // Determine whether the specified user has a specified list of privileges. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges.html + // All users can use this API, but only to determine their own privileges. + // To check the privileges of other users, you must use the run as feature. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges HasPrivileges security_has_privileges.NewHasPrivileges // Check user profile privileges. // // Determine whether the users associated with the specified user profile IDs // have all the requested privileges. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges-user-profile.html + // + // NOTE: The user profile feature is designed only for use by Kibana and + // Elastic's Observability, Enterprise Search, and Elastic Security solutions. + // Individual users and external applications should not call this API directly. + // Elastic reserves the right to change or remove this feature in future + // releases without prior notice. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges-user-profile HasPrivilegesUserProfile security_has_privileges_user_profile.NewHasPrivilegesUserProfile // Invalidate API keys. // @@ -3003,16 +6641,23 @@ type Security struct { // Invalidated API keys fail authentication, but they can still be viewed using // the get API key information and query API key information APIs, for at least // the configured retention period, until they are automatically deleted. - // The `manage_api_key` privilege allows deleting any API keys. - // The `manage_own_api_key` only allows deleting API keys that are owned by the - // user. + // + // To use this API, you must have at least the `manage_security`, + // `manage_api_key`, or `manage_own_api_key` cluster privileges. + // The `manage_security` privilege allows deleting any API key, including both + // REST and cross cluster API keys. + // The `manage_api_key` privilege allows deleting any REST API key, but not + // cross cluster API keys. + // The `manage_own_api_key` only allows deleting REST API keys that are owned by + // the user. // In addition, with the `manage_own_api_key` privilege, an invalidation request // must be issued in one of the three formats: + // // - Set the parameter `owner=true`. - // - Or, set both `username` and `realm_name` to match the user’s identity. + // - Or, set both `username` and `realm_name` to match the user's identity. // - Or, if the request is issued by an API key, that is to say an API key // invalidates itself, specify its ID in the `ids` field. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-api-key.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key InvalidateApiKey security_invalidate_api_key.NewInvalidateApiKey // Invalidate a token. // @@ -3026,21 +6671,86 @@ type Security struct { // They can also be used exactly once. // If you want to invalidate one or more access or refresh tokens immediately, // use this invalidate token API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-token.html + // + // NOTE: While all parameters are optional, at least one of them is required. + // More specifically, either one of `token` or `refresh_token` parameters is + // required. + // If none of these two are specified, then `realm_name` and/or `username` need + // to be specified. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-token InvalidateToken security_invalidate_token.NewInvalidateToken - // Exchanges an OpenID Connection authentication response message for an - // Elasticsearch access token and refresh token pair - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-authenticate.html + // Authenticate OpenID Connect. + // + // Exchange an OpenID Connect authentication response message for an + // Elasticsearch internal access token and refresh token that can be + // subsequently used for authentication. + // + // Elasticsearch exposes all the necessary OpenID Connect related functionality + // with the OpenID Connect APIs. + // These APIs are used internally by Kibana in order to provide OpenID Connect + // based authentication, but can also be used by other, custom web applications + // or other clients. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-authenticate OidcAuthenticate security_oidc_authenticate.NewOidcAuthenticate - // Invalidates a refresh token and access token that was generated from the - // OpenID Connect Authenticate API - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-logout.html + // Logout of OpenID Connect. + // + // Invalidate an access token and a refresh token that were generated as a + // response to the `/_security/oidc/authenticate` API. + // + // If the OpenID Connect authentication realm in Elasticsearch is accordingly + // configured, the response to this call will contain a URI pointing to the end + // session endpoint of the OpenID Connect Provider in order to perform single + // logout. + // + // Elasticsearch exposes all the necessary OpenID Connect related functionality + // with the OpenID Connect APIs. + // These APIs are used internally by Kibana in order to provide OpenID Connect + // based authentication, but can also be used by other, custom web applications + // or other clients. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-logout OidcLogout security_oidc_logout.NewOidcLogout - // Creates an OAuth 2.0 authentication request as a URL string - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-prepare-authentication.html + // Prepare OpenID connect authentication. + // + // Create an oAuth 2.0 authentication request as a URL string based on the + // configuration of the OpenID Connect authentication realm in Elasticsearch. + // + // The response of this API is a URL pointing to the Authorization Endpoint of + // the configured OpenID Connect Provider, which can be used to redirect the + // browser of the user in order to continue the authentication process. + // + // Elasticsearch exposes all the necessary OpenID Connect related functionality + // with the OpenID Connect APIs. + // These APIs are used internally by Kibana in order to provide OpenID Connect + // based authentication, but can also be used by other, custom web applications + // or other clients. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-prepare-authentication OidcPrepareAuthentication security_oidc_prepare_authentication.NewOidcPrepareAuthentication // Create or update application privileges. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-privileges.html + // + // To use this API, you must have one of the following privileges: + // + // * The `manage_security` cluster privilege (or a greater privilege such as + // `all`). + // * The "Manage Application Privileges" global privilege for the application + // being referenced in the request. + // + // Application names are formed from a prefix, with an optional suffix that + // conform to the following rules: + // + // * The prefix must begin with a lowercase ASCII letter. + // * The prefix must contain only ASCII letters or digits. + // * The prefix must be at least 3 characters long. + // * If the suffix exists, it must begin with either a dash `-` or `_`. + // * The suffix cannot contain any of the following characters: `\`, `/`, `*`, + // `?`, `"`, `<`, `>`, `|`, `,`, `*`. + // * No part of the name can contain whitespace. + // + // Privilege names must begin with a lowercase ASCII letter and must contain + // only ASCII letters and digits along with the characters `_`, `-`, and `.`. + // + // Action names can contain any number of printable ASCII characters and must + // contain at least one of the following characters: `/`, `*`, `:`. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-privileges PutPrivileges security_put_privileges.NewPutPrivileges // Create or update roles. // @@ -3049,7 +6759,7 @@ type Security struct { // The create or update roles API cannot update roles that are defined in roles // files. // File-based role management is not available in Elastic Serverless. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role PutRole security_put_role.NewPutRole // Create or update role mappings. // @@ -3060,229 +6770,872 @@ type Security struct { // rather than using role mapping files. The create or update role mappings API // cannot update role mappings that are defined in role mapping files. // - // This API does not create roles. Rather, it maps users to existing roles. + // NOTE: This API does not create roles. Rather, it maps users to existing + // roles. // Roles can be created by using the create or update roles API or roles files. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role-mapping.html + // + // **Role templates** + // + // The most common use for role mappings is to create a mapping from a known + // value on the user to a fixed role name. + // For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should + // be given the superuser role in Elasticsearch. + // The `roles` field is used for this purpose. + // + // For more complex needs, it is possible to use Mustache templates to + // dynamically determine the names of the roles that should be granted to the + // user. + // The `role_templates` field is used for this purpose. + // + // NOTE: To use role templates successfully, the relevant scripting feature must + // be enabled. + // Otherwise, all attempts to create a role mapping with role templates fail. + // + // All of the user fields that are available in the role mapping rules are also + // available in the role templates. + // Thus it is possible to assign a user to a role that reflects their username, + // their groups, or the name of the realm to which they authenticated. + // + // By default a template is evaluated to produce a single string that is the + // name of the role which should be assigned to the user. + // If the format of the template is set to "json" then the template is expected + // to produce a JSON string or an array of JSON strings for the role names. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping PutRoleMapping security_put_role_mapping.NewPutRoleMapping // Create or update users. // + // Add and update users in the native realm. // A password is required for adding a new user but is optional when updating an // existing user. - // To change a user’s password without updating any other fields, use the change + // To change a user's password without updating any other fields, use the change // password API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user PutUser security_put_user.NewPutUser // Find API keys with a query. // - // Get a paginated list of API keys and their information. You can optionally - // filter the results with a query. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-api-key.html + // Get a paginated list of API keys and their information. + // You can optionally filter the results with a query. + // + // To use this API, you must have at least the `manage_own_api_key` or the + // `read_security` cluster privileges. + // If you have only the `manage_own_api_key` privilege, this API returns only + // the API keys that you own. + // If you have the `read_security`, `manage_api_key`, or greater privileges + // (including `manage_security`), this API returns all API keys regardless of + // ownership. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys QueryApiKeys security_query_api_keys.NewQueryApiKeys // Find roles with a query. // - // Get roles in a paginated manner. You can optionally filter the results with a - // query. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-role.html + // Get roles in a paginated manner. + // The role management APIs are generally the preferred way to manage roles, + // rather than using file-based role management. + // The query roles API does not retrieve roles that are defined in roles files, + // nor built-in ones. + // You can optionally filter the results with a query. + // Also, the results can be paginated and sorted. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-role QueryRole security_query_role.NewQueryRole // Find users with a query. // // Get information for users in a paginated manner. // You can optionally filter the results with a query. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-user.html + // + // NOTE: As opposed to the get user API, built-in users are excluded from the + // result. + // This API is only for native users. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-user QueryUser security_query_user.NewQueryUser // Authenticate SAML. // - // Submits a SAML response message to Elasticsearch for consumption. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-authenticate.html + // Submit a SAML response message to Elasticsearch for consumption. + // + // NOTE: This API is intended for use by custom web applications other than + // Kibana. + // If you are using Kibana, refer to the documentation for configuring SAML + // single-sign-on on the Elastic Stack. + // + // The SAML message that is submitted can be: + // + // * A response to a SAML authentication request that was previously created + // using the SAML prepare authentication API. + // * An unsolicited SAML message in the case of an IdP-initiated single sign-on + // (SSO) flow. + // + // In either case, the SAML message needs to be a base64 encoded XML document + // with a root element of ``. + // + // After successful validation, Elasticsearch responds with an Elasticsearch + // internal access token and refresh token that can be subsequently used for + // authentication. + // This API endpoint essentially exchanges SAML responses that indicate + // successful authentication in the IdP for Elasticsearch access and refresh + // tokens, which can be used for authentication against Elasticsearch. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-authenticate SamlAuthenticate security_saml_authenticate.NewSamlAuthenticate // Logout of SAML completely. // // Verifies the logout response sent from the SAML IdP. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-complete-logout.html + // + // NOTE: This API is intended for use by custom web applications other than + // Kibana. + // If you are using Kibana, refer to the documentation for configuring SAML + // single-sign-on on the Elastic Stack. + // + // The SAML IdP may send a logout response back to the SP after handling the + // SP-initiated SAML Single Logout. + // This API verifies the response by ensuring the content is relevant and + // validating its signature. + // An empty response is returned if the verification process is successful. + // The response can be sent by the IdP with either the HTTP-Redirect or the + // HTTP-Post binding. + // The caller of this API must prepare the request accordingly so that this API + // can handle either of them. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-complete-logout SamlCompleteLogout security_saml_complete_logout.NewSamlCompleteLogout // Invalidate SAML. // - // Submits a SAML LogoutRequest message to Elasticsearch for consumption. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-invalidate.html + // Submit a SAML LogoutRequest message to Elasticsearch for consumption. + // + // NOTE: This API is intended for use by custom web applications other than + // Kibana. + // If you are using Kibana, refer to the documentation for configuring SAML + // single-sign-on on the Elastic Stack. + // + // The logout request comes from the SAML IdP during an IdP initiated Single + // Logout. + // The custom web application can use this API to have Elasticsearch process the + // `LogoutRequest`. + // After successful validation of the request, Elasticsearch invalidates the + // access token and refresh token that corresponds to that specific SAML + // principal and provides a URL that contains a SAML LogoutResponse message. + // Thus the user can be redirected back to their IdP. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-invalidate SamlInvalidate security_saml_invalidate.NewSamlInvalidate // Logout of SAML. // // Submits a request to invalidate an access token and refresh token. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-logout.html + // + // NOTE: This API is intended for use by custom web applications other than + // Kibana. + // If you are using Kibana, refer to the documentation for configuring SAML + // single-sign-on on the Elastic Stack. + // + // This API invalidates the tokens that were generated for a user by the SAML + // authenticate API. + // If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP + // supports this, the Elasticsearch response contains a URL to redirect the user + // to the IdP that contains a SAML logout request (starting an SP-initiated SAML + // Single Logout). + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-logout SamlLogout security_saml_logout.NewSamlLogout // Prepare SAML authentication. // - // Creates a SAML authentication request (``) as a URL string, - // based on the configuration of the respective SAML realm in Elasticsearch. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-prepare-authentication.html + // Create a SAML authentication request (``) as a URL string based + // on the configuration of the respective SAML realm in Elasticsearch. + // + // NOTE: This API is intended for use by custom web applications other than + // Kibana. + // If you are using Kibana, refer to the documentation for configuring SAML + // single-sign-on on the Elastic Stack. + // + // This API returns a URL pointing to the SAML Identity Provider. + // You can use the URL to redirect the browser of the user in order to continue + // the authentication process. + // The URL includes a single parameter named `SAMLRequest`, which contains a + // SAML Authentication request that is deflated and Base64 encoded. + // If the configuration dictates that SAML authentication requests should be + // signed, the URL has two extra parameters named `SigAlg` and `Signature`. + // These parameters contain the algorithm used for the signature and the + // signature value itself. + // It also returns a random string that uniquely identifies this SAML + // Authentication request. + // The caller of this API needs to store this identifier as it needs to be used + // in a following step of the authentication process. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-prepare-authentication SamlPrepareAuthentication security_saml_prepare_authentication.NewSamlPrepareAuthentication // Create SAML service provider metadata. // // Generate SAML metadata for a SAML 2.0 Service Provider. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-sp-metadata.html + // + // The SAML 2.0 specification provides a mechanism for Service Providers to + // describe their capabilities and configuration using a metadata file. + // This API generates Service Provider metadata based on the configuration of a + // SAML realm in Elasticsearch. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-service-provider-metadata SamlServiceProviderMetadata security_saml_service_provider_metadata.NewSamlServiceProviderMetadata // Suggest a user profile. // // Get suggestions for user profiles that match specified search criteria. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-suggest-user-profile.html + // + // NOTE: The user profile feature is designed only for use by Kibana and + // Elastic's Observability, Enterprise Search, and Elastic Security solutions. + // Individual users and external applications should not call this API directly. + // Elastic reserves the right to change or remove this feature in future + // releases without prior notice. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-suggest-user-profiles SuggestUserProfiles security_suggest_user_profiles.NewSuggestUserProfiles // Update an API key. // - // Updates attributes of an existing API key. + // Update attributes of an existing API key. + // This API supports updates to an API key's access scope, expiration, and + // metadata. + // + // To use this API, you must have at least the `manage_own_api_key` cluster + // privilege. // Users can only update API keys that they created or that were granted to // them. - // Use this API to update API keys created by the create API Key or grant API + // To update another user’s API key, use the `run_as` feature to submit a + // request on behalf of another user. + // + // IMPORTANT: It's not possible to use an API key as the authentication + // credential for this API. The owner user’s credentials are required. + // + // Use this API to update API keys created by the create API key or grant API // Key APIs. - // If you need to apply the same update to many API keys, you can use bulk - // update API Keys to reduce overhead. - // It’s not possible to update expired API keys, or API keys that have been - // invalidated by invalidate API Key. - // This API supports updates to an API key’s access scope and metadata. + // If you need to apply the same update to many API keys, you can use the bulk + // update API keys API to reduce overhead. + // It's not possible to update expired API keys or API keys that have been + // invalidated by the invalidate API key API. + // // The access scope of an API key is derived from the `role_descriptors` you - // specify in the request, and a snapshot of the owner user’s permissions at the + // specify in the request and a snapshot of the owner user's permissions at the // time of the request. - // The snapshot of the owner’s permissions is updated automatically on every + // The snapshot of the owner's permissions is updated automatically on every // call. - // If you don’t specify `role_descriptors` in the request, a call to this API - // might still change the API key’s access scope. - // This change can occur if the owner user’s permissions have changed since the + // + // IMPORTANT: If you don't specify `role_descriptors` in the request, a call to + // this API might still change the API key's access scope. + // This change can occur if the owner user's permissions have changed since the // API key was created or last modified. - // To update another user’s API key, use the `run_as` feature to submit a - // request on behalf of another user. - // IMPORTANT: It’s not possible to use an API key as the authentication - // credential for this API. - // To update an API key, the owner user’s credentials are required. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-api-key.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-api-key UpdateApiKey security_update_api_key.NewUpdateApiKey // Update a cross-cluster API key. // // Update the attributes of an existing cross-cluster API key, which is used for // API key based remote cluster access. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-cross-cluster-api-key.html + // + // To use this API, you must have at least the `manage_security` cluster + // privilege. + // Users can only update API keys that they created. + // To update another user's API key, use the `run_as` feature to submit a + // request on behalf of another user. + // + // IMPORTANT: It's not possible to use an API key as the authentication + // credential for this API. + // To update an API key, the owner user's credentials are required. + // + // It's not possible to update expired API keys, or API keys that have been + // invalidated by the invalidate API key API. + // + // This API supports updates to an API key's access scope, metadata, and + // expiration. + // The owner user's information, such as the `username` and `realm`, is also + // updated automatically on every call. + // + // NOTE: This API cannot update REST API keys, which should be updated by either + // the update API key or bulk update API keys API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key UpdateCrossClusterApiKey security_update_cross_cluster_api_key.NewUpdateCrossClusterApiKey - // Update settings for the security system index - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-settings.html + // Update security index settings. + // + // Update the user-configurable settings for the security internal index + // (`.security` and associated indices). Only a subset of settings are allowed + // to be modified. This includes `index.auto_expand_replicas` and + // `index.number_of_replicas`. + // + // NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will + // be ignored during updates. + // + // If a specific index is not in use on the system and settings are provided for + // it, the request will be rejected. + // This API does not yet support configuring the settings for indices before + // they are in use. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-settings UpdateSettings security_update_settings.NewUpdateSettings // Update user profile data. // // Update specific data for the user profile that is associated with a unique // ID. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-user-profile-data.html + // + // NOTE: The user profile feature is designed only for use by Kibana and + // Elastic's Observability, Enterprise Search, and Elastic Security solutions. + // Individual users and external applications should not call this API directly. + // Elastic reserves the right to change or remove this feature in future + // releases without prior notice. + // + // To use this API, you must have one of the following privileges: + // + // * The `manage_user_profile` cluster privilege. + // * The `update_profile_data` global privilege for the namespaces that are + // referenced in the request. + // + // This API updates the `labels` and `data` fields of an existing user profile + // document with JSON objects. + // New keys and their values are added to the profile document and conflicting + // keys are replaced by data that's included in the request. + // + // For both labels and data, content is namespaced by the top-level fields. + // The `update_profile_data` global privilege grants privileges for updating + // only the allowed namespaces. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-user-profile-data UpdateUserProfileData security_update_user_profile_data.NewUpdateUserProfileData } type Shutdown struct { - // Removes a node from the shutdown list. Designed for indirect use by ECE/ESS - // and ECK. Direct use is not supported. - // https://www.elastic.co/guide/en/elasticsearch/reference/current + // Cancel node shutdown preparations. + // Remove a node from the shutdown list so it can resume normal operations. + // You must explicitly clear the shutdown request when a node rejoins the + // cluster or when a node has permanently left the cluster. + // Shutdown requests are never removed automatically by Elasticsearch. + // + // NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + // Cloud Enterprise, and Elastic Cloud on Kubernetes. + // Direct use is not supported. + // + // If the operator privileges feature is enabled, you must be an operator to use + // this API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-delete-node DeleteNode shutdown_delete_node.NewDeleteNode - // Retrieve status of a node or nodes that are currently marked as shutting - // down. Designed for indirect use by ECE/ESS and ECK. Direct use is not + // Get the shutdown status. + // + // Get information about nodes that are ready to be shut down, have shut down + // preparations still in progress, or have stalled. + // The API returns status information for each part of the shut down process. + // + // NOTE: This feature is designed for indirect use by Elasticsearch Service, + // Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + // supported. + // + // If the operator privileges feature is enabled, you must be an operator to use + // this API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-get-node + GetNode shutdown_get_node.NewGetNode + // Prepare a node to be shut down. + // + // NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + // Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not // supported. - // https://www.elastic.co/guide/en/elasticsearch/reference/current - GetNode shutdown_get_node.NewGetNode - // Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. - // Direct use is not supported. - // https://www.elastic.co/guide/en/elasticsearch/reference/current + // + // If you specify a node that is offline, it will be prepared for shut down when + // it rejoins the cluster. + // + // If the operator privileges feature is enabled, you must be an operator to use + // this API. + // + // The API migrates ongoing tasks and index shards to other nodes as needed to + // prepare a node to be restarted or shut down and removed from the cluster. + // This ensures that Elasticsearch can be stopped safely with minimal disruption + // to the cluster. + // + // You must specify the type of shutdown: `restart`, `remove`, or `replace`. + // If a node is already being prepared for shutdown, you can use this API to + // change the shutdown type. + // + // IMPORTANT: This API does NOT terminate the Elasticsearch process. + // Monitor the node shutdown status to determine when it is safe to stop + // Elasticsearch. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node PutNode shutdown_put_node.NewPutNode } +type Simulate struct { + // Simulate data ingestion. + // Run ingest pipelines against a set of provided documents, optionally with + // substitute pipeline definitions, to simulate ingesting data into an index. + // + // This API is meant to be used for troubleshooting or pipeline development, as + // it does not actually index any data into Elasticsearch. + // + // The API runs the default and final pipeline for that index against a set of + // documents provided in the body of the request. + // If a pipeline contains a reroute processor, it follows that reroute processor + // to the new index, running that index's pipelines as well the same way that a + // non-simulated ingest would. + // No data is indexed into Elasticsearch. + // Instead, the transformed document is returned, along with the list of + // pipelines that have been run and the name of the index where the document + // would have been indexed if this were not a simulation. + // The transformed document is validated against the mappings that would apply + // to this index, and any validation error is reported in the result. + // + // This API differs from the simulate pipeline API in that you specify a single + // pipeline for that API, and it runs only that one pipeline. + // The simulate pipeline API is more useful for developing a single pipeline, + // while the simulate ingest API is more useful for troubleshooting the + // interaction of the various pipelines that get applied when ingesting into an + // index. + // + // By default, the pipeline definitions that are currently in the system are + // used. + // However, you can supply substitute pipeline definitions in the body of the + // request. + // These will be used in place of the pipeline definitions that are already in + // the system. This can be used to replace existing pipeline definitions or to + // create new ones. The pipeline substitutions are used only within this + // request. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-simulate-ingest + Ingest simulate_ingest.NewIngest +} + type Slm struct { - // Deletes an existing snapshot lifecycle policy. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-delete-policy.html + // Delete a policy. + // Delete a snapshot lifecycle policy definition. + // This operation prevents any future snapshots from being taken but does not + // cancel in-progress snapshots or remove previously-taken snapshots. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle DeleteLifecycle slm_delete_lifecycle.NewDeleteLifecycle - // Immediately creates a snapshot according to the lifecycle policy, without - // waiting for the scheduled time. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-lifecycle.html + // Run a policy. + // Immediately create a snapshot according to the snapshot lifecycle policy + // without waiting for the scheduled time. + // The snapshot policy is normally applied according to its schedule, but you + // might want to manually run a policy before performing an upgrade or other + // maintenance. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle ExecuteLifecycle slm_execute_lifecycle.NewExecuteLifecycle - // Deletes any snapshots that are expired according to the policy's retention - // rules. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-retention.html + // Run a retention policy. + // Manually apply the retention policy to force immediate removal of snapshots + // that are expired according to the snapshot lifecycle policy retention rules. + // The retention policy is normally applied according to its schedule. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention ExecuteRetention slm_execute_retention.NewExecuteRetention - // Retrieves one or more snapshot lifecycle policy definitions and information - // about the latest snapshot attempts. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-policy.html + // Get policy information. + // Get snapshot lifecycle policy definitions and information about the latest + // snapshot attempts. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle GetLifecycle slm_get_lifecycle.NewGetLifecycle - // Returns global and policy-level statistics about actions taken by snapshot + // Get snapshot lifecycle management statistics. + // Get global and policy-level statistics about actions taken by snapshot // lifecycle management. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-stats.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats GetStats slm_get_stats.NewGetStats - // Retrieves the status of snapshot lifecycle management (SLM). - // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-status.html + // Get the snapshot lifecycle management status. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status GetStatus slm_get_status.NewGetStatus - // Creates or updates a snapshot lifecycle policy. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-put-policy.html + // Create or update a policy. + // Create or update a snapshot lifecycle policy. + // If the policy already exists, this request increments the policy version. + // Only the latest version of a policy is stored. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle PutLifecycle slm_put_lifecycle.NewPutLifecycle - // Turns on snapshot lifecycle management (SLM). - // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-start.html + // Start snapshot lifecycle management. + // Snapshot lifecycle management (SLM) starts automatically when a cluster is + // formed. + // Manually starting SLM is necessary only if it has been stopped using the stop + // SLM API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start Start slm_start.NewStart - // Turns off snapshot lifecycle management (SLM). - // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-stop.html + // Stop snapshot lifecycle management. + // Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. + // This API is useful when you are performing maintenance on a cluster and need + // to prevent SLM from performing any actions on your data streams or indices. + // Stopping SLM does not stop any snapshots that are in progress. + // You can manually trigger snapshots with the run snapshot lifecycle policy API + // even if SLM is stopped. + // + // The API returns a response as soon as the request is acknowledged, but the + // plugin might continue to run until in-progress operations complete and it can + // be safely stopped. + // Use the get snapshot lifecycle management status API to see if SLM is + // running. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop Stop slm_stop.NewStop } type Snapshot struct { - // Triggers the review of a snapshot repository’s contents and deletes any stale - // data not referenced by existing snapshots. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/clean-up-snapshot-repo-api.html + // Clean up the snapshot repository. + // Trigger the review of the contents of a snapshot repository and delete any + // stale data not referenced by existing snapshots. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository CleanupRepository snapshot_cleanup_repository.NewCleanupRepository - // Clones indices from one snapshot into another snapshot in the same - // repository. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Clone a snapshot. + // Clone part of all of a snapshot into another snapshot in the same repository. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone Clone snapshot_clone.NewClone - // Creates a snapshot in a repository. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Create a snapshot. + // Take a snapshot of a cluster or of data streams and indices. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create Create snapshot_create.NewCreate - // Creates a repository. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Create or update a snapshot repository. + // IMPORTANT: If you are migrating searchable snapshots, the repository name + // must be identical in the source and destination clusters. + // To register a snapshot repository, the cluster's global metadata must be + // writeable. + // Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` + // and `clsuter.blocks.read_only_allow_delete` settings) that prevent write + // access. + // + // Several options for this API can be specified using a query parameter or a + // request body parameter. + // If both parameters are specified, only the query parameter is used. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository CreateRepository snapshot_create_repository.NewCreateRepository - // Deletes one or more snapshots. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Delete snapshots. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete Delete snapshot_delete.NewDelete - // Deletes a repository. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Delete snapshot repositories. + // When a repository is unregistered, Elasticsearch removes only the reference + // to the location where the repository is storing the snapshots. + // The snapshots themselves are left untouched and in place. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository DeleteRepository snapshot_delete_repository.NewDeleteRepository - // Returns information about a snapshot. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Get snapshot information. + // + // NOTE: The `after` parameter and `next` field enable you to iterate through + // snapshots with some consistency guarantees regarding concurrent creation or + // deletion of snapshots. + // It is guaranteed that any snapshot that exists at the beginning of the + // iteration and is not concurrently deleted will be seen during the iteration. + // Snapshots concurrently created may be seen during an iteration. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get Get snapshot_get.NewGet - // Returns information about a repository. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Get snapshot repository information. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository GetRepository snapshot_get_repository.NewGetRepository - // Verifies the integrity of the contents of a snapshot repository - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Analyze a snapshot repository. + // Analyze the performance characteristics and any incorrect behaviour found in + // a repository. + // + // The response exposes implementation details of the analysis which may change + // from version to version. + // The response body format is therefore not considered stable and may be + // different in newer versions. + // + // There are a large number of third-party storage systems available, not all of + // which are suitable for use as a snapshot repository by Elasticsearch. + // Some storage systems behave incorrectly, or perform poorly, especially when + // accessed concurrently by multiple clients as the nodes of an Elasticsearch + // cluster do. This API performs a collection of read and write operations on + // your repository which are designed to detect incorrect behaviour and to + // measure the performance characteristics of your storage system. + // + // The default values for the parameters are deliberately low to reduce the + // impact of running an analysis inadvertently and to provide a sensible + // starting point for your investigations. + // Run your first analysis with the default parameter values to check for simple + // problems. + // If successful, run a sequence of increasingly large analyses until you + // encounter a failure or you reach a `blob_count` of at least `2000`, a + // `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, + // and a `register_operation_count` of at least `100`. + // Always specify a generous timeout, possibly `1h` or longer, to allow time for + // each analysis to run to completion. + // Perform the analyses using a multi-node cluster of a similar size to your + // production cluster so that it can detect any problems that only arise when + // the repository is accessed by many nodes at once. + // + // If the analysis fails, Elasticsearch detected that your repository behaved + // unexpectedly. + // This usually means you are using a third-party storage system with an + // incorrect or incompatible implementation of the API it claims to support. + // If so, this storage system is not suitable for use as a snapshot repository. + // You will need to work with the supplier of your storage system to address the + // incompatibilities that Elasticsearch detects. + // + // If the analysis is successful, the API returns details of the testing + // process, optionally including how long each operation took. + // You can use this information to determine the performance of your storage + // system. + // If any operation fails or returns an incorrect result, the API returns an + // error. + // If the API returns an error, it may not have removed all the data it wrote to + // the repository. + // The error will indicate the location of any leftover data and this path is + // also recorded in the Elasticsearch logs. + // You should verify that this location has been cleaned up correctly. + // If there is still leftover data at the specified location, you should + // manually remove it. + // + // If the connection from your client to Elasticsearch is closed while the + // client is waiting for the result of the analysis, the test is cancelled. + // Some clients are configured to close their connection if no response is + // received within a certain timeout. + // An analysis takes a long time to complete so you might need to relax any such + // client-side timeouts. + // On cancellation the analysis attempts to clean up the data it was writing, + // but it may not be able to remove it all. + // The path to the leftover data is recorded in the Elasticsearch logs. + // You should verify that this location has been cleaned up correctly. + // If there is still leftover data at the specified location, you should + // manually remove it. + // + // If the analysis is successful then it detected no incorrect behaviour, but + // this does not mean that correct behaviour is guaranteed. + // The analysis attempts to detect common bugs but it does not offer 100% + // coverage. + // Additionally, it does not test the following: + // + // * Your repository must perform durable writes. Once a blob has been written + // it must remain in place until it is deleted, even after a power loss or + // similar disaster. + // * Your repository must not suffer from silent data corruption. Once a blob + // has been written, its contents must remain unchanged until it is deliberately + // modified or deleted. + // * Your repository must behave correctly even if connectivity from the cluster + // is disrupted. Reads and writes may fail in this case, but they must not + // return incorrect results. + // + // IMPORTANT: An analysis writes a substantial amount of data to your repository + // and then reads it back again. + // This consumes bandwidth on the network between the cluster and the + // repository, and storage space and I/O bandwidth on the repository itself. + // You must ensure this load does not affect other users of these systems. + // Analyses respect the repository settings `max_snapshot_bytes_per_sec` and + // `max_restore_bytes_per_sec` if available and the cluster setting + // `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth + // they consume. + // + // NOTE: This API is intended for exploratory use by humans. You should expect + // the request parameters and the response format to vary in future versions. + // + // NOTE: Different versions of Elasticsearch may perform different checks for + // repository compatibility, with newer versions typically being stricter than + // older ones. + // A storage system that passes repository analysis with one version of + // Elasticsearch may fail with a different version. + // This indicates it behaves incorrectly in ways that the former version did not + // detect. + // You must work with the supplier of your storage system to address the + // incompatibilities detected by the repository analysis API in any version of + // Elasticsearch. + // + // NOTE: This API may not work correctly in a mixed-version cluster. + // + // *Implementation details* + // + // NOTE: This section of documentation describes how the repository analysis API + // works in this version of Elasticsearch, but you should expect the + // implementation to vary between versions. The request parameters and response + // format depend on details of the implementation so may also be different in + // newer versions. + // + // The analysis comprises a number of blob-level tasks, as set by the + // `blob_count` parameter and a number of compare-and-exchange operations on + // linearizable registers, as set by the `register_operation_count` parameter. + // These tasks are distributed over the data and master-eligible nodes in the + // cluster for execution. + // + // For most blob-level tasks, the executing node first writes a blob to the + // repository and then instructs some of the other nodes in the cluster to + // attempt to read the data it just wrote. + // The size of the blob is chosen randomly, according to the `max_blob_size` and + // `max_total_data_size` parameters. + // If any of these reads fails then the repository does not implement the + // necessary read-after-write semantics that Elasticsearch requires. + // + // For some blob-level tasks, the executing node will instruct some of its peers + // to attempt to read the data before the writing process completes. + // These reads are permitted to fail, but must not return partial data. + // If any read returns partial data then the repository does not implement the + // necessary atomicity semantics that Elasticsearch requires. + // + // For some blob-level tasks, the executing node will overwrite the blob while + // its peers are reading it. + // In this case the data read may come from either the original or the + // overwritten blob, but the read operation must not return partial data or a + // mix of data from the two blobs. + // If any of these reads returns partial data or a mix of the two blobs then the + // repository does not implement the necessary atomicity semantics that + // Elasticsearch requires for overwrites. + // + // The executing node will use a variety of different methods to write the blob. + // For instance, where applicable, it will use both single-part and multi-part + // uploads. + // Similarly, the reading nodes will use a variety of different methods to read + // the data back again. + // For instance they may read the entire blob from start to end or may read only + // a subset of the data. + // + // For some blob-level tasks, the executing node will cancel the write before it + // is complete. + // In this case, it still instructs some of the other nodes in the cluster to + // attempt to read the blob but all of these reads must fail to find the blob. + // + // Linearizable registers are special blobs that Elasticsearch manipulates using + // an atomic compare-and-exchange operation. + // This operation ensures correct and strongly-consistent behavior even when the + // blob is accessed by multiple nodes at the same time. + // The detailed implementation of the compare-and-exchange operation on + // linearizable registers varies by repository type. + // Repository analysis verifies that that uncontended compare-and-exchange + // operations on a linearizable register blob always succeed. + // Repository analysis also verifies that contended operations either succeed or + // report the contention but do not return incorrect results. + // If an operation fails due to contention, Elasticsearch retries the operation + // until it succeeds. + // Most of the compare-and-exchange operations performed by repository analysis + // atomically increment a counter which is represented as an 8-byte blob. + // Some operations also verify the behavior on small blobs with sizes other than + // 8 bytes. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze + RepositoryAnalyze snapshot_repository_analyze.NewRepositoryAnalyze + // Verify the repository integrity. + // Verify the integrity of the contents of a snapshot repository. + // + // This API enables you to perform a comprehensive check of the contents of a + // repository, looking for any anomalies in its data or metadata which might + // prevent you from restoring snapshots from the repository or which might cause + // future snapshot create or delete operations to fail. + // + // If you suspect the integrity of the contents of one of your snapshot + // repositories, cease all write activity to this repository immediately, set + // its `read_only` option to `true`, and use this API to verify its integrity. + // Until you do so: + // + // * It may not be possible to restore some snapshots from this repository. + // * Searchable snapshots may report errors when searched or may have unassigned + // shards. + // * Taking snapshots into this repository may fail or may appear to succeed but + // have created a snapshot which cannot be restored. + // * Deleting snapshots from this repository may fail or may appear to succeed + // but leave the underlying data on disk. + // * Continuing to write to the repository while it is in an invalid state may + // causing additional damage to its contents. + // + // If the API finds any problems with the integrity of the contents of your + // repository, Elasticsearch will not be able to repair the damage. + // The only way to bring the repository back into a fully working state after + // its contents have been damaged is by restoring its contents from a repository + // backup which was taken before the damage occurred. + // You must also identify what caused the damage and take action to prevent it + // from happening again. + // + // If you cannot restore a repository backup, register a new repository and use + // this for all future snapshot operations. + // In some cases it may be possible to recover some of the contents of a damaged + // repository, either by restoring as many of its snapshots as needed and taking + // new snapshots of the restored data, or by using the reindex API to copy data + // from any searchable snapshots mounted from the damaged repository. + // + // Avoid all operations which write to the repository while the verify + // repository integrity API is running. + // If something changes the repository contents while an integrity verification + // is running then Elasticsearch may incorrectly report having detected some + // anomalies in its contents due to the concurrent writes. + // It may also incorrectly fail to report some anomalies that the concurrent + // writes prevented it from detecting. + // + // NOTE: This API is intended for exploratory use by humans. You should expect + // the request parameters and the response format to vary in future versions. + // + // NOTE: This API may not work correctly in a mixed-version cluster. + // + // The default values for the parameters of this API are designed to limit the + // impact of the integrity verification on other activities in your cluster. + // For instance, by default it will only use at most half of the `snapshot_meta` + // threads to verify the integrity of each snapshot, allowing other snapshot + // operations to use the other half of this thread pool. + // If you modify these parameters to speed up the verification process, you risk + // disrupting other snapshot-related operations in your cluster. + // For large repositories, consider setting up a separate single-node + // Elasticsearch cluster just for running the integrity verification API. + // + // The response exposes implementation details of the analysis which may change + // from version to version. + // The response body format is therefore not considered stable and may be + // different in newer versions. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-verify-integrity RepositoryVerifyIntegrity snapshot_repository_verify_integrity.NewRepositoryVerifyIntegrity - // Restores a snapshot. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Restore a snapshot. + // Restore a snapshot of a cluster or data streams and indices. + // + // You can restore a snapshot only to a running cluster with an elected master + // node. + // The snapshot repository must be registered and available to the cluster. + // The snapshot and cluster versions must be compatible. + // + // To restore a snapshot, the cluster's global metadata must be writable. Ensure + // there are't any cluster blocks that prevent writes. The restore operation + // ignores index blocks. + // + // Before you restore a data stream, ensure the cluster contains a matching + // index template with data streams enabled. To check, use the index management + // feature in Kibana or the get index template API: + // + // ``` + // GET + // _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream + // ``` + // + // If no such template exists, you can create one or restore a cluster state + // that contains one. Without a matching index template, a data stream can't + // roll over or create backing indices. + // + // If your snapshot contains data from App Search or Workplace Search, you must + // restore the Enterprise Search encryption key before you restore the snapshot. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore Restore snapshot_restore.NewRestore - // Returns information about the status of a snapshot. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Get the snapshot status. + // Get a detailed description of the current state for each shard participating + // in the snapshot. + // + // Note that this API should be used only to obtain detailed shard-level + // information for ongoing snapshots. + // If this detail is not needed or you want to obtain information about one or + // more existing snapshots, use the get snapshot API. + // + // If you omit the `` request path parameter, the request retrieves + // information only for currently running snapshots. + // This usage is preferred. + // If needed, you can specify `` and `` to retrieve + // information for specific snapshots, even if they're not currently running. + // + // WARNING: Using the API to return the status of any snapshots other than + // currently running snapshots can be expensive. + // The API requires a read from the repository for each shard in each snapshot. + // For example, if you have 100 snapshots with 1,000 shards each, an API request + // that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 + // shards). + // + // Depending on the latency of your storage, such requests can take an extremely + // long time to return results. + // These requests can also tax machine resources and, when using cloud storage, + // incur high processing costs. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status Status snapshot_status.NewStatus - // Verifies a repository. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Verify a snapshot repository. + // Check for common misconfigurations in a snapshot repository. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository VerifyRepository snapshot_verify_repository.NewVerifyRepository } type Sql struct { - // Clears the SQL cursor - // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-sql-cursor-api.html + // Clear an SQL search cursor. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor ClearCursor sql_clear_cursor.NewClearCursor - // Deletes an async SQL search or a stored synchronous SQL search. If the search - // is still running, the API cancels it. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-async-sql-search-api.html + // Delete an async SQL search. + // Delete an async SQL search or a stored synchronous SQL search. + // If the search is still running, the API cancels it. + // + // If the Elasticsearch security features are enabled, only the following users + // can use this API to delete a search: + // + // * Users with the `cancel_task` cluster privilege. + // * The user who first submitted the search. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-delete-async DeleteAsync sql_delete_async.NewDeleteAsync - // Returns the current status and available results for an async SQL search or - // stored synchronous SQL search - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-sql-search-api.html + // Get async SQL search results. + // Get the current status and available results for an async SQL search or + // stored synchronous SQL search. + // + // If the Elasticsearch security features are enabled, only the user who first + // submitted the SQL search can retrieve the search using this API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async GetAsync sql_get_async.NewGetAsync - // Returns the current status of an async SQL search or a stored synchronous SQL - // search - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-sql-search-status-api.html + // Get the async SQL search status. + // Get the current status of an async SQL search or a stored synchronous SQL + // search. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status GetAsyncStatus sql_get_async_status.NewGetAsyncStatus - // Executes a SQL request - // https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-search-api.html + // Get SQL search results. + // Run an SQL request. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query Query sql_query.NewQuery - // Translates SQL into Elasticsearch queries - // https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-translate-api.html + // Translate SQL into Elasticsearch queries. + // Translate an SQL search into a search API request containing Query DSL. + // It accepts the same request body parameters as the SQL search API, excluding + // `cursor`. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate Translate sql_translate.NewTranslate } @@ -3317,80 +7670,295 @@ type Ssl struct { // If Elasticsearch is configured to use a keystore or truststore, the API // output includes all certificates in that store, even though some of the // certificates might not be in active use within the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-ssl.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ssl-certificates Certificates ssl_certificates.NewCertificates } type Synonyms struct { - // Deletes a synonym set - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-synonyms-set.html + // Delete a synonym set. + // + // You can only delete a synonyms set that is not in use by any index analyzer. + // + // Synonyms sets can be used in synonym graph token filters and synonym token + // filters. + // These synonym filters can be used as part of search analyzers. + // + // Analyzers need to be loaded when an index is restored (such as when a node + // starts, or the index becomes open). + // Even if the analyzer is not used on any field mapping, it still needs to be + // loaded on the index recovery phase. + // + // If any analyzers cannot be loaded, the index becomes unavailable and the + // cluster status becomes red or yellow as index shards are not available. + // To prevent that, synonyms sets that are used in analyzers can't be deleted. + // A delete request in this case will return a 400 response code. + // + // To remove a synonyms set, you must first remove all indices that contain + // analyzers using it. + // You can migrate an index by creating a new index that does not contain the + // token filter with the synonyms set, and use the reindex API in order to copy + // over the index data. + // Once finished, you can delete the index. + // When the synonyms set is not used in analyzers, you will be able to delete + // it. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym DeleteSynonym synonyms_delete_synonym.NewDeleteSynonym - // Deletes a synonym rule in a synonym set - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-synonym-rule.html + // Delete a synonym rule. + // Delete a synonym rule from a synonym set. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule DeleteSynonymRule synonyms_delete_synonym_rule.NewDeleteSynonymRule - // Retrieves a synonym set - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-synonyms-set.html + // Get a synonym set. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym GetSynonym synonyms_get_synonym.NewGetSynonym - // Retrieves a synonym rule from a synonym set - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-synonym-rule.html + // Get a synonym rule. + // Get a synonym rule from a synonym set. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule GetSynonymRule synonyms_get_synonym_rule.NewGetSynonymRule - // Retrieves a summary of all defined synonym sets - // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-synonyms-sets.html + // Get all synonym sets. + // Get a summary of all defined synonym sets. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym GetSynonymsSets synonyms_get_synonyms_sets.NewGetSynonymsSets - // Creates or updates a synonym set. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-synonyms-set.html + // Create or update a synonym set. + // Synonyms sets are limited to a maximum of 10,000 synonym rules per set. + // If you need to manage more synonym rules, you can create multiple synonym + // sets. + // + // When an existing synonyms set is updated, the search analyzers that use the + // synonyms set are reloaded automatically for all indices. + // This is equivalent to invoking the reload search analyzers API for all + // indices that use the synonyms set. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym PutSynonym synonyms_put_synonym.NewPutSynonym - // Creates or updates a synonym rule in a synonym set - // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-synonym-rule.html + // Create or update a synonym rule. + // Create or update a synonym rule in a synonym set. + // + // If any of the synonym rules included is invalid, the API returns an error. + // + // When you update a synonym rule, all analyzers using the synonyms set will be + // reloaded automatically to reflect the new rule. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym-rule PutSynonymRule synonyms_put_synonym_rule.NewPutSynonymRule } type Tasks struct { - // Cancels a task, if it can be cancelled through an API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html + // Cancel a task. + // + // WARNING: The task management API is new and should still be considered a beta + // feature. + // The API may change in ways that are not backwards compatible. + // + // A task may continue to run for some time after it has been cancelled because + // it may not be able to safely stop its current activity straight away. + // It is also possible that Elasticsearch must complete its work on other tasks + // before it can process the cancellation. + // The get task information API will continue to list these cancelled tasks + // until they complete. + // The cancelled flag in the response indicates that the cancellation command + // has been processed and the task will stop as soon as possible. + // + // To troubleshoot why a cancelled task does not complete promptly, use the get + // task information API with the `?detailed` parameter to identify the other + // tasks the system is running. + // You can also use the node hot threads API to obtain detailed information + // about the work the system is doing instead of completing the cancelled task. + // https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks Cancel tasks_cancel.NewCancel // Get task information. - // Returns information about the tasks currently executing in the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html + // Get information about a task currently running in the cluster. + // + // WARNING: The task management API is new and should still be considered a beta + // feature. + // The API may change in ways that are not backwards compatible. + // + // If the task identifier is not found, a 404 response code indicates that there + // are no resources that match the request. + // https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks Get tasks_get.NewGet - // The task management API returns information about tasks currently executing - // on one or more nodes in the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html + // Get all tasks. + // Get information about the tasks currently running on one or more nodes in the + // cluster. + // + // WARNING: The task management API is new and should still be considered a beta + // feature. + // The API may change in ways that are not backwards compatible. + // + // **Identifying running tasks** + // + // The `X-Opaque-Id header`, when provided on the HTTP request header, is going + // to be returned as a header in the response as well as in the headers field + // for in the task information. + // This enables you to track certain calls or associate certain tasks with the + // client that started them. + // For example: + // + // ``` + // curl -i -H "X-Opaque-Id: 123456" + // "http://localhost:9200/_tasks?group_by=parents" + // ``` + // + // The API returns the following result: + // + // ``` + // HTTP/1.1 200 OK + // X-Opaque-Id: 123456 + // content-type: application/json; charset=UTF-8 + // content-length: 831 + // + // { + // "tasks" : { + // "u5lcZHqcQhu-rUoFaqDphA:45" : { + // "node" : "u5lcZHqcQhu-rUoFaqDphA", + // "id" : 45, + // "type" : "transport", + // "action" : "cluster:monitor/tasks/lists", + // "start_time_in_millis" : 1513823752749, + // "running_time_in_nanos" : 293139, + // "cancellable" : false, + // "headers" : { + // "X-Opaque-Id" : "123456" + // }, + // "children" : [ + // { + // "node" : "u5lcZHqcQhu-rUoFaqDphA", + // "id" : 46, + // "type" : "direct", + // "action" : "cluster:monitor/tasks/lists[n]", + // "start_time_in_millis" : 1513823752750, + // "running_time_in_nanos" : 92133, + // "cancellable" : false, + // "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", + // "headers" : { + // "X-Opaque-Id" : "123456" + // } + // } + // ] + // } + // } + // } + // ``` + // In this example, `X-Opaque-Id: 123456` is the ID as a part of the response + // header. + // The `X-Opaque-Id` in the task `headers` is the ID for the task that was + // initiated by the REST request. + // The `X-Opaque-Id` in the children `headers` is the child task of the task + // that was initiated by the REST request. + // https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks List tasks_list.NewList } type TextStructure struct { - // Finds the structure of a text field in an index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/find-field-structure.html + // Find the structure of a text field. + // Find the structure of a text field in an Elasticsearch index. + // + // This API provides a starting point for extracting further information from + // log messages already ingested into Elasticsearch. + // For example, if you have ingested data into a very simple index that has just + // `@timestamp` and message fields, you can use this API to see what common + // structure exists in the message field. + // + // The response from the API contains: + // + // * Sample messages. + // * Statistics that reveal the most common values for all fields detected + // within the text and basic numeric statistics for numeric fields. + // * Information about the structure of the text, which is useful when you write + // ingest configurations to index it or similarly formatted text. + // * Appropriate mappings for an Elasticsearch index, which you could use to + // ingest the text. + // + // All this information can be calculated by the structure finder with no + // guidance. + // However, you can optionally override some of the decisions about the text + // structure by specifying one or more query parameters. + // + // If the structure finder produces unexpected results, specify the `explain` + // query parameter and an explanation will appear in the response. + // It helps determine why the returned structure was chosen. + // https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-text_structure FindFieldStructure text_structure_find_field_structure.NewFindFieldStructure - // Finds the structure of a list of messages. The messages must contain data - // that is suitable to be ingested into Elasticsearch. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/find-message-structure.html + // Find the structure of text messages. + // Find the structure of a list of text messages. + // The messages must contain data that is suitable to be ingested into + // Elasticsearch. + // + // This API provides a starting point for ingesting data into Elasticsearch in a + // format that is suitable for subsequent use with other Elastic Stack + // functionality. + // Use this API rather than the find text structure API if your input text has + // already been split up into separate messages by some other process. + // + // The response from the API contains: + // + // * Sample messages. + // * Statistics that reveal the most common values for all fields detected + // within the text and basic numeric statistics for numeric fields. + // * Information about the structure of the text, which is useful when you write + // ingest configurations to index it or similarly formatted text. + // Appropriate mappings for an Elasticsearch index, which you could use to + // ingest the text. + // + // All this information can be calculated by the structure finder with no + // guidance. + // However, you can optionally override some of the decisions about the text + // structure by specifying one or more query parameters. + // + // If the structure finder produces unexpected results, specify the `explain` + // query parameter and an explanation will appear in the response. + // It helps determine why the returned structure was chosen. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure FindMessageStructure text_structure_find_message_structure.NewFindMessageStructure - // Finds the structure of a text file. The text file must contain data that is - // suitable to be ingested into Elasticsearch. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.html + // Find the structure of a text file. + // The text file must contain data that is suitable to be ingested into + // Elasticsearch. + // + // This API provides a starting point for ingesting data into Elasticsearch in a + // format that is suitable for subsequent use with other Elastic Stack + // functionality. + // Unlike other Elasticsearch endpoints, the data that is posted to this + // endpoint does not need to be UTF-8 encoded and in JSON format. + // It must, however, be text; binary text formats are not currently supported. + // The size is limited to the Elasticsearch HTTP receive buffer size, which + // defaults to 100 Mb. + // + // The response from the API contains: + // + // * A couple of messages from the beginning of the text. + // * Statistics that reveal the most common values for all fields detected + // within the text and basic numeric statistics for numeric fields. + // * Information about the structure of the text, which is useful when you write + // ingest configurations to index it or similarly formatted text. + // * Appropriate mappings for an Elasticsearch index, which you could use to + // ingest the text. + // + // All this information can be calculated by the structure finder with no + // guidance. + // However, you can optionally override some of the decisions about the text + // structure by specifying one or more query parameters. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure FindStructure text_structure_find_structure.NewFindStructure - // Tests a Grok pattern on some text. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/test-grok-pattern.html + // Test a Grok pattern. + // Test a Grok pattern on one or more lines of text. + // The API indicates whether the lines match the pattern together with the + // offsets and lengths of the matched substrings. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern TestGrokPattern text_structure_test_grok_pattern.NewTestGrokPattern } type Transform struct { // Delete a transform. - // Deletes a transform. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform DeleteTransform transform_delete_transform.NewDeleteTransform // Retrieves transform usage information for transform nodes. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-node-stats.html GetNodeStats transform_get_node_stats.NewGetNodeStats // Get transforms. - // Retrieves configuration information for transforms. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform.html + // Get configuration information for transforms. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform GetTransform transform_get_transform.NewGetTransform // Get transform stats. - // Retrieves usage information for transforms. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-stats.html + // + // Get usage information for transforms. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats GetTransformStats transform_get_transform_stats.NewGetTransformStats // Preview a transform. // Generates a preview of the results that you will get when you create a @@ -3401,7 +7969,7 @@ type Transform struct { // generates a list of mappings and settings for the destination index. These // values are determined based on the field // types of the source index and the transform aggregations. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/preview-transform.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform PreviewTransform transform_preview_transform.NewPreviewTransform // Create a transform. // Creates a transform. @@ -3438,28 +8006,26 @@ type Transform struct { // not give users any privileges on `.transform-internal*` indices. If you used // transforms prior to 7.5, also do not // give users any privileges on `.data-frame-internal*` indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-transform.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform PutTransform transform_put_transform.NewPutTransform // Reset a transform. - // Resets a transform. + // // Before you can reset it, you must stop it; alternatively, use the `force` // query parameter. // If the destination index was created by the transform, it is deleted. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-transform.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform ResetTransform transform_reset_transform.NewResetTransform // Schedule a transform to start now. - // Instantly runs a transform to process data. // - // If you _schedule_now a transform, it will process the new data instantly, - // without waiting for the configured frequency interval. After _schedule_now - // API is called, - // the transform will be processed again at now + frequency unless _schedule_now - // API + // Instantly run a transform to process data. + // If you run this API, the transform will process the new data instantly, + // without waiting for the configured frequency interval. After the API is + // called, + // the transform will be processed again at `now + frequency` unless the API // is called again in the meantime. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/schedule-now-transform.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform ScheduleNowTransform transform_schedule_now_transform.NewScheduleNowTransform // Start a transform. - // Starts a transform. // // When you start a transform, it creates the destination index if it does not // already exist. The `number_of_shards` is @@ -3487,11 +8053,11 @@ type Transform struct { // required privileges on the source and // destination indices, the transform fails when it attempts unauthorized // operations. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-transform.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform StartTransform transform_start_transform.NewStartTransform // Stop transforms. // Stops one or more transforms. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-transform.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform StopTransform transform_stop_transform.NewStopTransform // Update a transform. // Updates certain properties of a transform. @@ -3505,78 +8071,177 @@ type Transform struct { // Elasticsearch security features are enabled, the transform remembers which // roles the user who updated it had at the // time of update and runs with those privileges. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-transform.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform UpdateTransform transform_update_transform.NewUpdateTransform - // Upgrades all transforms. + // Upgrade all transforms. + // + // Transforms are compatible across minor versions and between supported major + // versions. + // However, over time, the format of transform configuration information may + // change. // This API identifies transforms that have a legacy configuration format and - // upgrades them to the latest version. It - // also cleans up the internal data structures that store the transform state - // and checkpoints. The upgrade does not - // affect the source and destination indices. The upgrade also does not affect - // the roles that transforms use when + // upgrades them to the latest version. + // It also cleans up the internal data structures that store the transform state + // and checkpoints. + // The upgrade does not affect the source and destination indices. + // The upgrade also does not affect the roles that transforms use when // Elasticsearch security features are enabled; the role used to read source - // data and write to the destination index - // remains unchanged. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/upgrade-transforms.html + // data and write to the destination index remains unchanged. + // + // If a transform upgrade step fails, the upgrade stops and an error is returned + // about the underlying issue. + // Resolve the issue then re-run the process again. + // A summary is returned when the upgrade is finished. + // + // To ensure continuous transforms remain running during a major version upgrade + // of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade + // transforms before upgrading the cluster. + // You may want to perform a recent cluster backup prior to the upgrade. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms UpgradeTransforms transform_upgrade_transforms.NewUpgradeTransforms } type Watcher struct { - // Acknowledges a watch, manually throttling the execution of the watch's - // actions. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-ack-watch.html + // Acknowledge a watch. + // Acknowledging a watch enables you to manually throttle the execution of the + // watch's actions. + // + // The acknowledgement state of an action is stored in the + // `status.actions..ack.state` structure. + // + // IMPORTANT: If the specified watch is currently being executed, this API will + // return an error + // The reason for this behavior is to prevent overwriting the watch status from + // a watch execution. + // + // Acknowledging an action throttles further executions of that action until its + // `ack.state` is reset to `awaits_successful_execution`. + // This happens when the condition of the watch is not met (the condition + // evaluates to false). + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch AckWatch watcher_ack_watch.NewAckWatch - // Activates a currently inactive watch. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-activate-watch.html + // Activate a watch. + // A watch can be either active or inactive. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch ActivateWatch watcher_activate_watch.NewActivateWatch - // Deactivates a currently active watch. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-deactivate-watch.html + // Deactivate a watch. + // A watch can be either active or inactive. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch DeactivateWatch watcher_deactivate_watch.NewDeactivateWatch - // Removes a watch from Watcher. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-delete-watch.html + // Delete a watch. + // When the watch is removed, the document representing the watch in the + // `.watches` index is gone and it will never be run again. + // + // Deleting a watch does not delete any watch execution records related to this + // watch from the watch history. + // + // IMPORTANT: Deleting a watch must be done by using only this API. + // Do not delete the watch directly from the `.watches` index using the + // Elasticsearch delete document API + // When Elasticsearch security features are enabled, make sure no write + // privileges are granted to anyone for the `.watches` index. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch DeleteWatch watcher_delete_watch.NewDeleteWatch + // Run a watch. // This API can be used to force execution of the watch outside of its // triggering logic or to simulate the watch execution for debugging purposes. + // // For testing and debugging purposes, you also have fine-grained control on how - // the watch runs. You can execute the watch without executing all of its - // actions or alternatively by simulating them. You can also force execution by - // ignoring the watch condition and control whether a watch record would be - // written to the watch history after execution. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html + // the watch runs. + // You can run the watch without running all of its actions or alternatively by + // simulating them. + // You can also force execution by ignoring the watch condition and control + // whether a watch record would be written to the watch history after it runs. + // + // You can use the run watch API to run watches that are not yet registered by + // specifying the watch definition inline. + // This serves as great tool for testing and debugging your watches prior to + // adding them to Watcher. + // + // When Elasticsearch security features are enabled on your cluster, watches are + // run with the privileges of the user that stored the watches. + // If your user is allowed to read index `a`, but not index `b`, then the exact + // same set of rules will apply during execution of a watch. + // + // When using the run watch API, the authorization data of the user that called + // the API will be used as a base, instead of the information who stored the + // watch. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch ExecuteWatch watcher_execute_watch.NewExecuteWatch - // Retrieve settings for the watcher system index - // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-settings.html + // Get Watcher index settings. + // Get settings for the Watcher internal index (`.watches`). + // Only a subset of settings are shown, for example `index.auto_expand_replicas` + // and `index.number_of_replicas`. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings GetSettings watcher_get_settings.NewGetSettings - // Retrieves a watch by its ID. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-watch.html + // Get a watch. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch GetWatch watcher_get_watch.NewGetWatch - // Creates a new watch, or updates an existing one. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-put-watch.html + // Create or update a watch. + // When a watch is registered, a new document that represents the watch is added + // to the `.watches` index and its trigger is immediately registered with the + // relevant trigger engine. + // Typically for the `schedule` trigger, the scheduler is the trigger engine. + // + // IMPORTANT: You must use Kibana or this API to create a watch. + // Do not add a watch directly to the `.watches` index by using the + // Elasticsearch index API. + // If Elasticsearch security features are enabled, do not give users write + // privileges on the `.watches` index. + // + // When you add a watch you can also define its initial active state by setting + // the *active* parameter. + // + // When Elasticsearch security features are enabled, your watch can index or + // search only on indices for which the user that stored the watch has + // privileges. + // If the user is able to read index `a`, but not index `b`, the same will apply + // when the watch runs. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch PutWatch watcher_put_watch.NewPutWatch - // Retrieves stored watches. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-query-watches.html + // Query watches. + // Get all registered watches in a paginated manner and optionally filter + // watches by a query. + // + // Note that only the `_id` and `metadata.*` fields are queryable or sortable. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-query-watches QueryWatches watcher_query_watches.NewQueryWatches - // Starts Watcher if it is not already running. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-start.html + // Start the watch service. + // Start the Watcher service if it is not already running. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start Start watcher_start.NewStart - // Retrieves the current Watcher metrics. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stats.html + // Get Watcher statistics. + // This API always returns basic metrics. + // You retrieve more metrics by using the metric parameter. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats Stats watcher_stats.NewStats - // Stops Watcher if it is running. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stop.html + // Stop the watch service. + // Stop the Watcher service if it is running. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop Stop watcher_stop.NewStop - // Update settings for the watcher system index - // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-update-settings.html + // Update Watcher index settings. + // Update settings for the Watcher internal index (`.watches`). + // Only a subset of settings can be modified. + // This includes `index.auto_expand_replicas` and `index.number_of_replicas`. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings UpdateSettings watcher_update_settings.NewUpdateSettings } type Xpack struct { - // Provides general information about the installed X-Pack features. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html + // Get information. + // The information provided by the API includes: + // + // * Build information including the build number and timestamp. + // * License information about the currently installed license. + // * Feature information for the features that are currently enabled and + // available under the current license. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-info Info xpack_info.NewInfo - // This API provides information about which features are currently enabled and - // available under the current license and some usage statistics. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/usage-api.html + // Get usage information. + // Get information about the features that are currently enabled and available + // under the current license. + // The API also provides some usage statistics. + // https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack Usage xpack_usage.NewUsage } @@ -3613,6 +8278,7 @@ type API struct { SearchableSnapshots SearchableSnapshots Security Security Shutdown Shutdown + Simulate Simulate Slm Slm Snapshot Snapshot Sql Sql @@ -3625,17 +8291,169 @@ type API struct { Xpack Xpack // Bulk index or delete documents. - // Performs multiple indexing or delete operations in a single API call. + // Perform multiple `index`, `create`, `delete`, and `update` actions in a + // single request. // This reduces overhead and can greatly increase indexing speed. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or index alias: + // + // * To use the `create` action, you must have the `create_doc`, `create`, + // `index`, or `write` index privilege. Data streams support only the `create` + // action. + // * To use the `index` action, you must have the `create`, `index`, or `write` + // index privilege. + // * To use the `delete` action, you must have the `delete` or `write` index + // privilege. + // * To use the `update` action, you must have the `index` or `write` index + // privilege. + // * To automatically create a data stream or index with a bulk API request, you + // must have the `auto_configure`, `create_index`, or `manage` index privilege. + // * To make the result of a bulk operation visible to search using the + // `refresh` parameter, you must have the `maintenance` or `manage` index + // privilege. + // + // Automatic data stream creation requires a matching index template with data + // stream enabled. + // + // The actions are specified in the request body using a newline delimited JSON + // (NDJSON) structure: + // + // ``` + // action_and_meta_data\n + // optional_source\n + // action_and_meta_data\n + // optional_source\n + // .... + // action_and_meta_data\n + // optional_source\n + // ``` + // + // The `index` and `create` actions expect a source on the next line and have + // the same semantics as the `op_type` parameter in the standard index API. + // A `create` action fails if a document with the same ID already exists in the + // target + // An `index` action adds or replaces a document as necessary. + // + // NOTE: Data streams support only the `create` action. + // To update or delete a document in a data stream, you must target the backing + // index containing the document. + // + // An `update` action expects that the partial doc, upsert, and script and its + // options are specified on the next line. + // + // A `delete` action does not expect a source on the next line and has the same + // semantics as the standard delete API. + // + // NOTE: The final line of data must end with a newline character (`\n`). + // Each newline character may be preceded by a carriage return (`\r`). + // When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header + // of `application/json` or `application/x-ndjson`. + // Because this format uses literal newline characters (`\n`) as delimiters, + // make sure that the JSON actions and sources are not pretty printed. + // + // If you provide a target in the request path, it is used for any actions that + // don't explicitly specify an `_index` argument. + // + // A note on the format: the idea here is to make processing as fast as + // possible. + // As some of the actions are redirected to other shards on other nodes, only + // `action_meta_data` is parsed on the receiving node side. + // + // Client libraries using this protocol should try and strive to do something + // similar on the client side, and reduce buffering as much as possible. + // + // There is no "correct" number of actions to perform in a single bulk request. + // Experiment with different settings to find the optimal size for your + // particular workload. + // Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by + // default so clients must ensure that no request exceeds this size. + // It is not possible to index a single document that exceeds the size limit, so + // you must pre-process any such documents into smaller pieces before sending + // them to Elasticsearch. + // For instance, split documents into pages or chapters before indexing them, or + // store raw binary data in a system outside Elasticsearch and replace the raw + // data with a link to the external system in the documents that you send to + // Elasticsearch. + // + // **Client suppport for bulk requests** + // + // Some of the officially supported clients provide helpers to assist with bulk + // requests and reindexing: + // + // * Go: Check out `esutil.BulkIndexer` + // * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and + // `Search::Elasticsearch::Client::5_0::Scroll` + // * Python: Check out `elasticsearch.helpers.*` + // * JavaScript: Check out `client.helpers.*` + // * .NET: Check out `BulkAllObservable` + // * PHP: Check out bulk indexing. + // + // **Submitting bulk requests with cURL** + // + // If you're providing text file input to `curl`, you must use the + // `--data-binary` flag instead of plain `-d`. + // The latter doesn't preserve newlines. For example: + // + // ``` + // $ cat requests + // { "index" : { "_index" : "test", "_id" : "1" } } + // { "field1" : "value1" } + // $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk + // --data-binary "@requests"; echo + // {"took":7, "errors": false, + // "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} + // ``` + // + // **Optimistic concurrency control** + // + // Each `index` and `delete` action within a bulk API call may include the + // `if_seq_no` and `if_primary_term` parameters in their respective action and + // meta data lines. + // The `if_seq_no` and `if_primary_term` parameters control how operations are + // run, based on the last modification to existing documents. See Optimistic + // concurrency control for more details. + // + // **Versioning** + // + // Each bulk item can include the version value using the `version` field. + // It automatically follows the behavior of the index or delete operation based + // on the `_version` mapping. + // It also support the `version_type`. + // + // **Routing** + // + // Each bulk item can include the routing value using the `routing` field. + // It automatically follows the behavior of the index or delete operation based + // on the `_routing` mapping. + // + // NOTE: Data streams do not support custom routing unless they were created + // with the `allow_custom_routing` setting enabled in the template. + // + // **Wait for active shards** + // + // When making bulk calls, you can set the `wait_for_active_shards` parameter to + // require a minimum number of shard copies to be active before starting to + // process the bulk request. + // + // **Refresh** + // + // Control when the changes made by this request are visible to search. + // + // NOTE: Only the shards that receive the bulk request will be affected by + // refresh. + // Imagine a `_bulk?refresh=wait_for` request with three documents in it that + // happen to be routed to different shards in an index with five shards. + // The request will only wait for those three shards to refresh. + // The other two shards that make up the index do not participate in the `_bulk` + // request at all. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk Bulk core_bulk.NewBulk // Clear a scrolling search. - // // Clear the search context and results for a scrolling search. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-scroll-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll ClearScroll core_clear_scroll.NewClearScroll // Close a point in time. - // // A point in time must be opened explicitly before being used in search // requests. // The `keep_alive` parameter tells Elasticsearch how long it should persist. @@ -3643,25 +8461,336 @@ type API struct { // elapsed. // However, keeping points in time has a cost; close them as soon as they are no // longer required for search requests. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time ClosePointInTime core_close_point_in_time.NewClosePointInTime - // Returns number of documents matching a query. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html + // Count search results. + // Get the number of documents matching a query. + // + // The query can be provided either by using a simple query string as a + // parameter, or by defining Query DSL within the request body. + // The query is optional. When no query is provided, the API uses `match_all` to + // count all the documents. + // + // The count API supports multi-target syntax. You can run a single count API + // search across multiple data streams and indices. + // + // The operation is broadcast across all shards. + // For each shard ID group, a replica is chosen and the search is run against + // it. + // This means that replicas increase the scalability of the count. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count Count core_count.NewCount - // Index a document. - // Adds a JSON document to the specified data stream or index and makes it - // searchable. - // If the target is an index and the document already exists, the request - // updates the document and increments its version. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html + // Create a new document in the index. + // + // You can index a new JSON document with the `//_doc/` or + // `//_create/<_id>` APIs + // Using `_create` guarantees that the document is indexed only if it does not + // already exist. + // It returns a 409 response when a document with a same ID already exists in + // the index. + // To update an existing document, you must use the `//_doc/` API. + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or index alias: + // + // * To add a document using the `PUT //_create/<_id>` or `POST + // //_create/<_id>` request formats, you must have the `create_doc`, + // `create`, `index`, or `write` index privilege. + // * To automatically create a data stream or index with this API request, you + // must have the `auto_configure`, `create_index`, or `manage` index privilege. + // + // Automatic data stream creation requires a matching index template with data + // stream enabled. + // + // **Automatically create data streams and indices** + // + // If the request's target doesn't exist and matches an index template with a + // `data_stream` definition, the index operation automatically creates the data + // stream. + // + // If the target doesn't exist and doesn't match a data stream template, the + // operation automatically creates the index and applies any matching index + // templates. + // + // NOTE: Elasticsearch includes several built-in index templates. To avoid + // naming collisions with these templates, refer to index pattern documentation. + // + // If no mapping exists, the index operation creates a dynamic mapping. + // By default, new fields and objects are automatically added to the mapping if + // needed. + // + // Automatic index creation is controlled by the `action.auto_create_index` + // setting. + // If it is `true`, any index can be created automatically. + // You can modify this setting to explicitly allow or block automatic creation + // of indices that match specified patterns or set it to `false` to turn off + // automatic index creation entirely. + // Specify a comma-separated list of patterns you want to allow or prefix each + // pattern with `+` or `-` to indicate whether it should be allowed or blocked. + // When a list is specified, the default behaviour is to disallow. + // + // NOTE: The `action.auto_create_index` setting affects the automatic creation + // of indices only. + // It does not affect the creation of data streams. + // + // **Routing** + // + // By default, shard placement — or routing — is controlled by using a hash of + // the document's ID value. + // For more explicit control, the value fed into the hash function used by the + // router can be directly specified on a per-operation basis using the `routing` + // parameter. + // + // When setting up explicit mapping, you can also use the `_routing` field to + // direct the index operation to extract the routing value from the document + // itself. + // This does come at the (very minimal) cost of an additional document parsing + // pass. + // If the `_routing` mapping is defined and set to be required, the index + // operation will fail if no routing value is provided or extracted. + // + // NOTE: Data streams do not support custom routing unless they were created + // with the `allow_custom_routing` setting enabled in the template. + // + // **Distributed** + // + // The index operation is directed to the primary shard based on its route and + // performed on the actual node containing this shard. + // After the primary shard completes the operation, if needed, the update is + // distributed to applicable replicas. + // + // **Active shards** + // + // To improve the resiliency of writes to the system, indexing operations can be + // configured to wait for a certain number of active shard copies before + // proceeding with the operation. + // If the requisite number of active shard copies are not available, then the + // write operation must wait and retry, until either the requisite shard copies + // have started or a timeout occurs. + // By default, write operations only wait for the primary shards to be active + // before proceeding (that is to say `wait_for_active_shards` is `1`). + // This default can be overridden in the index settings dynamically by setting + // `index.write.wait_for_active_shards`. + // To alter this behavior per operation, use the `wait_for_active_shards + // request` parameter. + // + // Valid values are all or any positive integer up to the total number of + // configured copies per shard in the index (which is `number_of_replicas`+1). + // Specifying a negative value or a number greater than the number of shard + // copies will throw an error. + // + // For example, suppose you have a cluster of three nodes, A, B, and C and you + // create an index index with the number of replicas set to 3 (resulting in 4 + // shard copies, one more copy than there are nodes). + // If you attempt an indexing operation, by default the operation will only + // ensure the primary copy of each shard is available before proceeding. + // This means that even if B and C went down and A hosted the primary shard + // copies, the indexing operation would still proceed with only one copy of the + // data. + // If `wait_for_active_shards` is set on the request to `3` (and all three nodes + // are up), the indexing operation will require 3 active shard copies before + // proceeding. + // This requirement should be met because there are 3 active nodes in the + // cluster, each one holding a copy of the shard. + // However, if you set `wait_for_active_shards` to `all` (or to `4`, which is + // the same in this situation), the indexing operation will not proceed as you + // do not have all 4 copies of each shard active in the index. + // The operation will timeout unless a new node is brought up in the cluster to + // host the fourth copy of the shard. + // + // It is important to note that this setting greatly reduces the chances of the + // write operation not writing to the requisite number of shard copies, but it + // does not completely eliminate the possibility, because this check occurs + // before the write operation starts. + // After the write operation is underway, it is still possible for replication + // to fail on any number of shard copies but still succeed on the primary. + // The `_shards` section of the API response reveals the number of shard copies + // on which replication succeeded and failed. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create Create core_create.NewCreate // Delete a document. - // Removes a JSON document from the specified index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html + // + // Remove a JSON document from the specified index. + // + // NOTE: You cannot send deletion requests directly to a data stream. + // To delete a document in a data stream, you must target the backing index + // containing the document. + // + // **Optimistic concurrency control** + // + // Delete operations can be made conditional and only be performed if the last + // modification to the document was assigned the sequence number and primary + // term specified by the `if_seq_no` and `if_primary_term` parameters. + // If a mismatch is detected, the operation will result in a + // `VersionConflictException` and a status code of `409`. + // + // **Versioning** + // + // Each document indexed is versioned. + // When deleting a document, the version can be specified to make sure the + // relevant document you are trying to delete is actually being deleted and it + // has not changed in the meantime. + // Every write operation run on a document, deletes included, causes its version + // to be incremented. + // The version number of a deleted document remains available for a short time + // after deletion to allow for control of concurrent operations. + // The length of time for which a deleted document's version remains available + // is determined by the `index.gc_deletes` index setting. + // + // **Routing** + // + // If routing is used during indexing, the routing value also needs to be + // specified to delete a document. + // + // If the `_routing` mapping is set to `required` and no routing value is + // specified, the delete API throws a `RoutingMissingException` and rejects the + // request. + // + // For example: + // + // ``` + // DELETE /my-index-000001/_doc/1?routing=shard-1 + // ``` + // + // This request deletes the document with ID 1, but it is routed based on the + // user. + // The document is not deleted if the correct routing is not specified. + // + // **Distributed** + // + // The delete operation gets hashed into a specific shard ID. + // It then gets redirected into the primary shard within that ID group and + // replicated (if needed) to shard replicas within that ID group. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete Delete core_delete.NewDelete // Delete documents. + // // Deletes documents that match the specified query. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or alias: + // + // * `read` + // * `delete` or `write` + // + // You can specify the query criteria in the request URI or the request body + // using the same syntax as the search API. + // When you submit a delete by query request, Elasticsearch gets a snapshot of + // the data stream or index when it begins processing the request and deletes + // matching documents using internal versioning. + // If a document changes between the time that the snapshot is taken and the + // delete operation is processed, it results in a version conflict and the + // delete operation fails. + // + // NOTE: Documents with a version equal to 0 cannot be deleted using delete by + // query because internal versioning does not support 0 as a valid version + // number. + // + // While processing a delete by query request, Elasticsearch performs multiple + // search requests sequentially to find all of the matching documents to delete. + // A bulk delete request is performed for each batch of matching documents. + // If a search or bulk request is rejected, the requests are retried up to 10 + // times, with exponential back off. + // If the maximum retry limit is reached, processing halts and all failed + // requests are returned in the response. + // Any delete requests that completed successfully still stick, they are not + // rolled back. + // + // You can opt to count version conflicts instead of halting and returning by + // setting `conflicts` to `proceed`. + // Note that if you opt to count version conflicts the operation could attempt + // to delete more documents from the source than `max_docs` until it has + // successfully deleted `max_docs documents`, or it has gone through every + // document in the source query. + // + // **Throttling delete requests** + // + // To control the rate at which delete by query issues batches of delete + // operations, you can set `requests_per_second` to any positive decimal number. + // This pads each batch with a wait time to throttle the rate. + // Set `requests_per_second` to `-1` to disable throttling. + // + // Throttling uses a wait time between batches so that the internal scroll + // requests can be given a timeout that takes the request padding into account. + // The padding time is the difference between the batch size divided by the + // `requests_per_second` and the time spent writing. + // By default the batch size is `1000`, so if `requests_per_second` is set to + // `500`: + // + // ``` + // target_time = 1000 / 500 per second = 2 seconds + // wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds + // ``` + // + // Since the batch is issued as a single `_bulk` request, large batch sizes + // cause Elasticsearch to create many requests and wait before starting the next + // set. + // This is "bursty" instead of "smooth". + // + // **Slicing** + // + // Delete by query supports sliced scroll to parallelize the delete process. + // This can improve efficiency and provide a convenient way to break the request + // down into smaller parts. + // + // Setting `slices` to `auto` lets Elasticsearch choose the number of slices to + // use. + // This setting will use one slice per shard, up to a certain limit. + // If there are multiple source data streams or indices, it will choose the + // number of slices based on the index or backing index with the smallest number + // of shards. + // Adding slices to the delete by query operation creates sub-requests which + // means it has some quirks: + // + // * You can see these requests in the tasks APIs. These sub-requests are + // "child" tasks of the task for the request with slices. + // * Fetching the status of the task for the request with slices only contains + // the status of completed slices. + // * These sub-requests are individually addressable for things like + // cancellation and rethrottling. + // * Rethrottling the request with `slices` will rethrottle the unfinished + // sub-request proportionally. + // * Canceling the request with `slices` will cancel each sub-request. + // * Due to the nature of `slices` each sub-request won't get a perfectly even + // portion of the documents. All documents will be addressed, but some slices + // may be larger than others. Expect larger slices to have a more even + // distribution. + // * Parameters like `requests_per_second` and `max_docs` on a request with + // `slices` are distributed proportionally to each sub-request. Combine that + // with the earlier point about distribution being uneven and you should + // conclude that using `max_docs` with `slices` might not result in exactly + // `max_docs` documents being deleted. + // * Each sub-request gets a slightly different snapshot of the source data + // stream or index though these are all taken at approximately the same time. + // + // If you're slicing manually or otherwise tuning automatic slicing, keep in + // mind that: + // + // * Query performance is most efficient when the number of slices is equal to + // the number of shards in the index or backing index. If that number is large + // (for example, 500), choose a lower number as too many `slices` hurts + // performance. Setting `slices` higher than the number of shards generally does + // not improve efficiency and adds overhead. + // * Delete performance scales linearly across available resources with the + // number of slices. + // + // Whether query or delete performance dominates the runtime depends on the + // documents being reindexed and cluster resources. + // + // **Cancel a delete by query operation** + // + // Any delete by query can be canceled using the task cancel API. For example: + // + // ``` + // POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel + // ``` + // + // The task ID can be found by using the get tasks API. + // + // Cancellation should happen quickly but might take a few seconds. + // The get task status API will continue to list the delete by query task until + // this task checks that it has been cancelled and terminates itself. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query DeleteByQuery core_delete_by_query.NewDeleteByQuery // Throttle a delete by query operation. // @@ -3670,24 +8799,54 @@ type API struct { // Rethrottling that speeds up the query takes effect immediately but // rethrotting that slows down the query takes effect after completing the // current batch to prevent scroll timeouts. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle DeleteByQueryRethrottle core_delete_by_query_rethrottle.NewDeleteByQueryRethrottle // Delete a script or search template. // Deletes a stored script or search template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script DeleteScript core_delete_script.NewDeleteScript // Check a document. - // Checks if a specified document exists. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html + // + // Verify that a document exists. + // For example, check to see if a document with the `_id` 0 exists: + // + // ``` + // HEAD my-index-000001/_doc/0 + // ``` + // + // If the document exists, the API returns a status code of `200 - OK`. + // If the document doesn’t exist, the API returns `404 - Not Found`. + // + // **Versioning support** + // + // You can use the `version` parameter to check the document only if its current + // version is equal to the specified one. + // + // Internally, Elasticsearch has marked the old document as deleted and added an + // entirely new document. + // The old version of the document doesn't disappear immediately, although you + // won't be able to access it. + // Elasticsearch cleans up deleted documents in the background as you continue + // to index more data. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get Exists core_exists.NewExists // Check for a document source. - // Checks if a document's `_source` is stored. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html + // + // Check whether a document source exists in an index. + // For example: + // + // ``` + // HEAD my-index-000001/_source/1 + // ``` + // + // A document's source is not available if it is disabled in the mapping. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get ExistsSource core_exists_source.NewExistsSource // Explain a document match result. - // Returns information about why a specific document matches, or doesn’t match, - // a query. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-explain.html + // Get information about why a specific document matches, or doesn't match, a + // query. + // It computes a score explanation for a query and a specific document. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain Explain core_explain.NewExplain // Get the field capabilities. // @@ -3698,43 +8857,351 @@ type API struct { // It returns runtime fields like any other field. // For example, a runtime field with a type of keyword is returned the same as // any other field that belongs to the `keyword` family. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps FieldCaps core_field_caps.NewFieldCaps // Get a document by its ID. - // Retrieves the document with the specified ID from an index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html + // + // Get a document and its source or stored fields from an index. + // + // By default, this API is realtime and is not affected by the refresh rate of + // the index (when data will become visible for search). + // In the case where stored fields are requested with the `stored_fields` + // parameter and the document has been updated but is not yet refreshed, the API + // will have to parse and analyze the source to extract the stored fields. + // To turn off realtime behavior, set the `realtime` parameter to false. + // + // **Source filtering** + // + // By default, the API returns the contents of the `_source` field unless you + // have used the `stored_fields` parameter or the `_source` field is turned off. + // You can turn off `_source` retrieval by using the `_source` parameter: + // + // ``` + // GET my-index-000001/_doc/0?_source=false + // ``` + // + // If you only need one or two fields from the `_source`, use the + // `_source_includes` or `_source_excludes` parameters to include or filter out + // particular fields. + // This can be helpful with large documents where partial retrieval can save on + // network overhead + // Both parameters take a comma separated list of fields or wildcard + // expressions. + // For example: + // + // ``` + // GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities + // ``` + // + // If you only want to specify includes, you can use a shorter notation: + // + // ``` + // GET my-index-000001/_doc/0?_source=*.id + // ``` + // + // **Routing** + // + // If routing is used during indexing, the routing value also needs to be + // specified to retrieve a document. + // For example: + // + // ``` + // GET my-index-000001/_doc/2?routing=user1 + // ``` + // + // This request gets the document with ID 2, but it is routed based on the user. + // The document is not fetched if the correct routing is not specified. + // + // **Distributed** + // + // The GET operation is hashed into a specific shard ID. + // It is then redirected to one of the replicas within that shard ID and returns + // the result. + // The replicas are the primary shard and its replicas within that shard ID + // group. + // This means that the more replicas you have, the better your GET scaling will + // be. + // + // **Versioning support** + // + // You can use the `version` parameter to retrieve the document only if its + // current version is equal to the specified one. + // + // Internally, Elasticsearch has marked the old document as deleted and added an + // entirely new document. + // The old version of the document doesn't disappear immediately, although you + // won't be able to access it. + // Elasticsearch cleans up deleted documents in the background as you continue + // to index more data. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get Get core_get.NewGet // Get a script or search template. // Retrieves a stored script or search template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script GetScript core_get_script.NewGetScript // Get script contexts. // // Get a list of supported script contexts and their methods. - // https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-contexts.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context GetScriptContext core_get_script_context.NewGetScriptContext // Get script languages. // // Get a list of available script types, languages, and contexts. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages GetScriptLanguages core_get_script_languages.NewGetScriptLanguages // Get a document's source. - // Returns the source of a document. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html + // + // Get the source of a document. + // For example: + // + // ``` + // GET my-index-000001/_source/1 + // ``` + // + // You can use the source filtering parameters to control which parts of the + // `_source` are returned: + // + // ``` + // GET + // my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities + // ``` + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get GetSource core_get_source.NewGetSource - // Returns the health of the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/health-api.html + // Get the cluster health. + // Get a report with the health status of an Elasticsearch cluster. + // The report contains a list of indicators that compose Elasticsearch + // functionality. + // + // Each indicator has a health status of: green, unknown, yellow or red. + // The indicator will provide an explanation and metadata describing the reason + // for its current health status. + // + // The cluster’s status is controlled by the worst indicator status. + // + // In the event that an indicator’s status is non-green, a list of impacts may + // be present in the indicator result which detail the functionalities that are + // negatively affected by the health issue. + // Each impact carries with it a severity level, an area of the system that is + // affected, and a simple description of the impact on the system. + // + // Some health indicators can determine the root cause of a health problem and + // prescribe a set of steps that can be performed in order to improve the health + // of the system. + // The root cause and remediation steps are encapsulated in a diagnosis. + // A diagnosis contains a cause detailing a root cause analysis, an action + // containing a brief description of the steps to take to fix the problem, the + // list of affected resources (if applicable), and a detailed step-by-step + // troubleshooting guide to fix the diagnosed problem. + // + // NOTE: The health indicators perform root cause analysis of non-green health + // statuses. This can be computationally expensive when called frequently. + // When setting up automated polling of the API for health status, set verbose + // to false to disable the more expensive analysis logic. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report HealthReport core_health_report.NewHealthReport - // Index a document. - // Adds a JSON document to the specified data stream or index and makes it + // Create or update a document in an index. + // + // Add a JSON document to the specified data stream or index and make it // searchable. // If the target is an index and the document already exists, the request // updates the document and increments its version. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html + // + // NOTE: You cannot use this API to send update requests for existing documents + // in a data stream. + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or index alias: + // + // * To add or overwrite a document using the `PUT //_doc/<_id>` request + // format, you must have the `create`, `index`, or `write` index privilege. + // * To add a document using the `POST //_doc/` request format, you must + // have the `create_doc`, `create`, `index`, or `write` index privilege. + // * To automatically create a data stream or index with this API request, you + // must have the `auto_configure`, `create_index`, or `manage` index privilege. + // + // Automatic data stream creation requires a matching index template with data + // stream enabled. + // + // NOTE: Replica shards might not all be started when an indexing operation + // returns successfully. + // By default, only the primary is required. Set `wait_for_active_shards` to + // change this default behavior. + // + // **Automatically create data streams and indices** + // + // If the request's target doesn't exist and matches an index template with a + // `data_stream` definition, the index operation automatically creates the data + // stream. + // + // If the target doesn't exist and doesn't match a data stream template, the + // operation automatically creates the index and applies any matching index + // templates. + // + // NOTE: Elasticsearch includes several built-in index templates. To avoid + // naming collisions with these templates, refer to index pattern documentation. + // + // If no mapping exists, the index operation creates a dynamic mapping. + // By default, new fields and objects are automatically added to the mapping if + // needed. + // + // Automatic index creation is controlled by the `action.auto_create_index` + // setting. + // If it is `true`, any index can be created automatically. + // You can modify this setting to explicitly allow or block automatic creation + // of indices that match specified patterns or set it to `false` to turn off + // automatic index creation entirely. + // Specify a comma-separated list of patterns you want to allow or prefix each + // pattern with `+` or `-` to indicate whether it should be allowed or blocked. + // When a list is specified, the default behaviour is to disallow. + // + // NOTE: The `action.auto_create_index` setting affects the automatic creation + // of indices only. + // It does not affect the creation of data streams. + // + // **Optimistic concurrency control** + // + // Index operations can be made conditional and only be performed if the last + // modification to the document was assigned the sequence number and primary + // term specified by the `if_seq_no` and `if_primary_term` parameters. + // If a mismatch is detected, the operation will result in a + // `VersionConflictException` and a status code of `409`. + // + // **Routing** + // + // By default, shard placement — or routing — is controlled by using a hash of + // the document's ID value. + // For more explicit control, the value fed into the hash function used by the + // router can be directly specified on a per-operation basis using the `routing` + // parameter. + // + // When setting up explicit mapping, you can also use the `_routing` field to + // direct the index operation to extract the routing value from the document + // itself. + // This does come at the (very minimal) cost of an additional document parsing + // pass. + // If the `_routing` mapping is defined and set to be required, the index + // operation will fail if no routing value is provided or extracted. + // + // NOTE: Data streams do not support custom routing unless they were created + // with the `allow_custom_routing` setting enabled in the template. + // + // **Distributed** + // + // The index operation is directed to the primary shard based on its route and + // performed on the actual node containing this shard. + // After the primary shard completes the operation, if needed, the update is + // distributed to applicable replicas. + // + // **Active shards** + // + // To improve the resiliency of writes to the system, indexing operations can be + // configured to wait for a certain number of active shard copies before + // proceeding with the operation. + // If the requisite number of active shard copies are not available, then the + // write operation must wait and retry, until either the requisite shard copies + // have started or a timeout occurs. + // By default, write operations only wait for the primary shards to be active + // before proceeding (that is to say `wait_for_active_shards` is `1`). + // This default can be overridden in the index settings dynamically by setting + // `index.write.wait_for_active_shards`. + // To alter this behavior per operation, use the `wait_for_active_shards + // request` parameter. + // + // Valid values are all or any positive integer up to the total number of + // configured copies per shard in the index (which is `number_of_replicas`+1). + // Specifying a negative value or a number greater than the number of shard + // copies will throw an error. + // + // For example, suppose you have a cluster of three nodes, A, B, and C and you + // create an index index with the number of replicas set to 3 (resulting in 4 + // shard copies, one more copy than there are nodes). + // If you attempt an indexing operation, by default the operation will only + // ensure the primary copy of each shard is available before proceeding. + // This means that even if B and C went down and A hosted the primary shard + // copies, the indexing operation would still proceed with only one copy of the + // data. + // If `wait_for_active_shards` is set on the request to `3` (and all three nodes + // are up), the indexing operation will require 3 active shard copies before + // proceeding. + // This requirement should be met because there are 3 active nodes in the + // cluster, each one holding a copy of the shard. + // However, if you set `wait_for_active_shards` to `all` (or to `4`, which is + // the same in this situation), the indexing operation will not proceed as you + // do not have all 4 copies of each shard active in the index. + // The operation will timeout unless a new node is brought up in the cluster to + // host the fourth copy of the shard. + // + // It is important to note that this setting greatly reduces the chances of the + // write operation not writing to the requisite number of shard copies, but it + // does not completely eliminate the possibility, because this check occurs + // before the write operation starts. + // After the write operation is underway, it is still possible for replication + // to fail on any number of shard copies but still succeed on the primary. + // The `_shards` section of the API response reveals the number of shard copies + // on which replication succeeded and failed. + // + // **No operation (noop) updates** + // + // When updating a document by using this API, a new version of the document is + // always created even if the document hasn't changed. + // If this isn't acceptable use the `_update` API with `detect_noop` set to + // `true`. + // The `detect_noop` option isn't available on this API because it doesn’t fetch + // the old source and isn't able to compare it against the new source. + // + // There isn't a definitive rule for when noop updates aren't acceptable. + // It's a combination of lots of factors like how frequently your data source + // sends updates that are actually noops and how many queries per second + // Elasticsearch runs on the shard receiving the updates. + // + // **Versioning** + // + // Each indexed document is given a version number. + // By default, internal versioning is used that starts at 1 and increments with + // each update, deletes included. + // Optionally, the version number can be set to an external value (for example, + // if maintained in a database). + // To enable this functionality, `version_type` should be set to `external`. + // The value provided must be a numeric, long value greater than or equal to 0, + // and less than around `9.2e+18`. + // + // NOTE: Versioning is completely real time, and is not affected by the near + // real time aspects of search operations. + // If no version is provided, the operation runs without any version checks. + // + // When using the external version type, the system checks to see if the version + // number passed to the index request is greater than the version of the + // currently stored document. + // If true, the document will be indexed and the new version number used. + // If the value provided is less than or equal to the stored document's version + // number, a version conflict will occur and the index operation will fail. For + // example: + // + // ``` + // PUT my-index-000001/_doc/1?version=2&version_type=external + // { + // "user": { + // "id": "elkbee" + // } + // } + // + // In this example, the operation will succeed since the supplied version of 2 + // is higher than the current document version of 1. + // If the document was already updated and its version was set to 2 or higher, + // the indexing command will fail and result in a conflict (409 HTTP status + // code). + // + // A nice side effect is that there is no need to maintain strict ordering of + // async indexing operations run as a result of changes to a source database, as + // long as version numbers from the source database are used. + // Even the simple case of updating the Elasticsearch index using data from a + // database is simplified if external versioning is used, as only the latest + // version will be used if the index operations arrive out of order. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create Index core_index.NewIndex // Get cluster info. - // Returns basic information about the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html + // Get basic build, version, and cluster information. + // https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info Info core_info.NewInfo // Run a knn search. // @@ -3753,7 +9220,16 @@ type API struct { // // The kNN search API supports restricting the search using a filter. // The search will return the top k documents that also match the filter query. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html + // + // A kNN search response has the exact same structure as a search API response. + // However, certain sections have a meaning specific to kNN search: + // + // * The document `_score` is determined by the similarity between the query and + // document vector. + // * The `hits.total` object contains the total number of nearest neighbor + // candidates considered, which is `num_candidates * num_shards`. The + // `hits.total.relation` will always be `eq`, indicating an exact value. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html KnnSearch core_knn_search.NewKnnSearch // Get multiple documents. // @@ -3762,7 +9238,24 @@ type API struct { // document IDs in the request body. // To ensure fast responses, this multi get (mget) API responds with partial // results if one or more shards fail. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html + // + // **Filter source fields** + // + // By default, the `_source` field is returned for every document (if stored). + // Use the `_source` and `_source_include` or `source_exclude` attributes to + // filter what fields are returned for a particular document. + // You can include the `_source`, `_source_includes`, and `_source_excludes` + // query parameters in the request URI to specify the defaults to use when there + // are no per-document instructions. + // + // **Get stored fields** + // + // Use the `stored_fields` attribute to specify the set of stored fields you + // want to retrieve. + // Any requested fields that are not stored are ignored. + // You can include the `stored_fields` query parameter in the request URI to + // specify the defaults to use when there are no per-document instructions. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget Mget core_mget.NewMget // Run multiple searches. // @@ -3784,19 +9277,43 @@ type API struct { // Each newline character may be preceded by a carriage return `\r`. // When sending requests to this endpoint the `Content-Type` header should be // set to `application/x-ndjson`. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch Msearch core_msearch.NewMsearch // Run multiple templated searches. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html + // + // Run multiple templated searches with a single request. + // If you are providing a text file or text input to `curl`, use the + // `--data-binary` flag instead of `-d` to preserve newlines. + // For example: + // + // ``` + // $ cat requests + // { "index": "my-index" } + // { "id": "my-search-template", "params": { "query_string": "hello world", + // "from": 0, "size": 10 }} + // { "index": "my-other-index" } + // { "id": "my-other-search-template", "params": { "query_type": "match_all" }} + // + // $ curl -H "Content-Type: application/x-ndjson" -XGET + // localhost:9200/_msearch/template --data-binary "@requests"; echo + // ``` + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template MsearchTemplate core_msearch_template.NewMsearchTemplate // Get multiple term vectors. // + // Get multiple term vectors with a single request. // You can specify existing documents by index and ID or provide artificial // documents in the body of the request. // You can specify the index in the request body or request URI. // The response contains a `docs` array with all the fetched termvectors. // Each element has the structure provided by the termvectors API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-termvectors.html + // + // **Artificial documents** + // + // You can also use `mtermvectors` to generate term vectors for artificial + // documents provided in the body of the request. + // The mapping used is determined by the specified `_index`. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors Mtermvectors core_mtermvectors.NewMtermvectors // Open a point in time. // @@ -3814,41 +9331,382 @@ type API struct { // // A point in time must be opened explicitly before being used in search // requests. - // The `keep_alive` parameter tells Elasticsearch how long it should persist. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html + // + // A subsequent search request with the `pit` parameter must not specify + // `index`, `routing`, or `preference` values as these parameters are copied + // from the point in time. + // + // Just like regular searches, you can use `from` and `size` to page through + // point in time search results, up to the first 10,000 hits. + // If you want to retrieve more hits, use PIT with `search_after`. + // + // IMPORTANT: The open point in time request and each subsequent search request + // can return different identifiers; always use the most recently received ID + // for the next search request. + // + // When a PIT that contains shard failures is used in a search request, the + // missing are always reported in the search response as a + // `NoShardAvailableActionException` exception. + // To get rid of these exceptions, a new PIT needs to be created so that shards + // missing from the previous PIT can be handled, assuming they become available + // in the meantime. + // + // **Keeping point in time alive** + // + // The `keep_alive` parameter, which is passed to a open point in time request + // and search request, extends the time to live of the corresponding point in + // time. + // The value does not need to be long enough to process all data — it just needs + // to be long enough for the next request. + // + // Normally, the background merge process optimizes the index by merging + // together smaller segments to create new, bigger segments. + // Once the smaller segments are no longer needed they are deleted. + // However, open point-in-times prevent the old segments from being deleted + // since they are still in use. + // + // TIP: Keeping older segments alive means that more disk space and file handles + // are needed. + // Ensure that you have configured your nodes to have ample free file handles. + // + // Additionally, if a segment contains deleted or updated documents then the + // point in time must keep track of whether each document in the segment was + // live at the time of the initial search request. + // Ensure that your nodes have sufficient heap space if you have many open + // point-in-times on an index that is subject to ongoing deletes or updates. + // Note that a point-in-time doesn't prevent its associated indices from being + // deleted. + // You can check how many point-in-times (that is, search contexts) are open + // with the nodes stats API. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time OpenPointInTime core_open_point_in_time.NewOpenPointInTime // Ping the cluster. - // Returns whether the cluster is running. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html + // Get information about whether the cluster is running. + // https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster Ping core_ping.NewPing // Create or update a script or search template. // Creates or updates a stored script or search template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script PutScript core_put_script.NewPutScript // Evaluate ranked search results. // // Evaluate the quality of ranked search results over a set of typical search // queries. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval RankEval core_rank_eval.NewRankEval // Reindex documents. - // Copies documents from a source to a destination. The source can be any - // existing index, alias, or data stream. The destination must differ from the - // source. For example, you cannot reindex a data stream into itself. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html + // + // Copy documents from a source to a destination. + // You can copy all documents to the destination index or reindex a subset of + // the documents. + // The source can be any existing index, alias, or data stream. + // The destination must differ from the source. + // For example, you cannot reindex a data stream into itself. + // + // IMPORTANT: Reindex requires `_source` to be enabled for all documents in the + // source. + // The destination should be configured as wanted before calling the reindex + // API. + // Reindex does not copy the settings from the source or its associated + // template. + // Mappings, shard counts, and replicas, for example, must be configured ahead + // of time. + // + // If the Elasticsearch security features are enabled, you must have the + // following security privileges: + // + // * The `read` index privilege for the source data stream, index, or alias. + // * The `write` index privilege for the destination data stream, index, or + // index alias. + // * To automatically create a data stream or index with a reindex API request, + // you must have the `auto_configure`, `create_index`, or `manage` index + // privilege for the destination data stream, index, or alias. + // * If reindexing from a remote cluster, the `source.remote.user` must have the + // `monitor` cluster privilege and the `read` index privilege for the source + // data stream, index, or alias. + // + // If reindexing from a remote cluster, you must explicitly allow the remote + // host in the `reindex.remote.whitelist` setting. + // Automatic data stream creation requires a matching index template with data + // stream enabled. + // + // The `dest` element can be configured like the index API to control optimistic + // concurrency control. + // Omitting `version_type` or setting it to `internal` causes Elasticsearch to + // blindly dump documents into the destination, overwriting any that happen to + // have the same ID. + // + // Setting `version_type` to `external` causes Elasticsearch to preserve the + // `version` from the source, create any documents that are missing, and update + // any documents that have an older version in the destination than they do in + // the source. + // + // Setting `op_type` to `create` causes the reindex API to create only missing + // documents in the destination. + // All existing documents will cause a version conflict. + // + // IMPORTANT: Because data streams are append-only, any reindex request to a + // destination data stream must have an `op_type` of `create`. + // A reindex can only add new documents to a destination data stream. + // It cannot update existing documents in a destination data stream. + // + // By default, version conflicts abort the reindex process. + // To continue reindexing if there are conflicts, set the `conflicts` request + // body property to `proceed`. + // In this case, the response includes a count of the version conflicts that + // were encountered. + // Note that the handling of other error types is unaffected by the `conflicts` + // property. + // Additionally, if you opt to count version conflicts, the operation could + // attempt to reindex more documents from the source than `max_docs` until it + // has successfully indexed `max_docs` documents into the target or it has gone + // through every document in the source query. + // + // NOTE: The reindex API makes no effort to handle ID collisions. + // The last document written will "win" but the order isn't usually predictable + // so it is not a good idea to rely on this behavior. + // Instead, make sure that IDs are unique by using a script. + // + // **Running reindex asynchronously** + // + // If the request contains `wait_for_completion=false`, Elasticsearch performs + // some preflight checks, launches the request, and returns a task you can use + // to cancel or get the status of the task. + // Elasticsearch creates a record of this task as a document at + // `_tasks/`. + // + // **Reindex from multiple sources** + // + // If you have many sources to reindex it is generally better to reindex them + // one at a time rather than using a glob pattern to pick up multiple sources. + // That way you can resume the process if there are any errors by removing the + // partially completed source and starting over. + // It also makes parallelizing the process fairly simple: split the list of + // sources to reindex and run each list in parallel. + // + // For example, you can use a bash script like this: + // + // ``` + // for index in i1 i2 i3 i4 i5; do + // curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty + // -d'{ + // "source": { + // "index": "'$index'" + // }, + // "dest": { + // "index": "'$index'-reindexed" + // } + // }' + // done + // ``` + // + // **Throttling** + // + // Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, + // for example) to throttle the rate at which reindex issues batches of index + // operations. + // Requests are throttled by padding each batch with a wait time. + // To turn off throttling, set `requests_per_second` to `-1`. + // + // The throttling is done by waiting between batches so that the scroll that + // reindex uses internally can be given a timeout that takes into account the + // padding. + // The padding time is the difference between the batch size divided by the + // `requests_per_second` and the time spent writing. + // By default the batch size is `1000`, so if `requests_per_second` is set to + // `500`: + // + // ``` + // target_time = 1000 / 500 per second = 2 seconds + // wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds + // ``` + // + // Since the batch is issued as a single bulk request, large batch sizes cause + // Elasticsearch to create many requests and then wait for a while before + // starting the next set. + // This is "bursty" instead of "smooth". + // + // **Slicing** + // + // Reindex supports sliced scroll to parallelize the reindexing process. + // This parallelization can improve efficiency and provide a convenient way to + // break the request down into smaller parts. + // + // NOTE: Reindexing from remote clusters does not support manual or automatic + // slicing. + // + // You can slice a reindex request manually by providing a slice ID and total + // number of slices to each request. + // You can also let reindex automatically parallelize by using sliced scroll to + // slice on `_id`. + // The `slices` parameter specifies the number of slices to use. + // + // Adding `slices` to the reindex request just automates the manual process, + // creating sub-requests which means it has some quirks: + // + // * You can see these requests in the tasks API. These sub-requests are "child" + // tasks of the task for the request with slices. + // * Fetching the status of the task for the request with `slices` only contains + // the status of completed slices. + // * These sub-requests are individually addressable for things like + // cancellation and rethrottling. + // * Rethrottling the request with `slices` will rethrottle the unfinished + // sub-request proportionally. + // * Canceling the request with `slices` will cancel each sub-request. + // * Due to the nature of `slices`, each sub-request won't get a perfectly even + // portion of the documents. All documents will be addressed, but some slices + // may be larger than others. Expect larger slices to have a more even + // distribution. + // * Parameters like `requests_per_second` and `max_docs` on a request with + // `slices` are distributed proportionally to each sub-request. Combine that + // with the previous point about distribution being uneven and you should + // conclude that using `max_docs` with `slices` might not result in exactly + // `max_docs` documents being reindexed. + // * Each sub-request gets a slightly different snapshot of the source, though + // these are all taken at approximately the same time. + // + // If slicing automatically, setting `slices` to `auto` will choose a reasonable + // number for most indices. + // If slicing manually or otherwise tuning automatic slicing, use the following + // guidelines. + // + // Query performance is most efficient when the number of slices is equal to the + // number of shards in the index. + // If that number is large (for example, `500`), choose a lower number as too + // many slices will hurt performance. + // Setting slices higher than the number of shards generally does not improve + // efficiency and adds overhead. + // + // Indexing performance scales linearly across available resources with the + // number of slices. + // + // Whether query or indexing performance dominates the runtime depends on the + // documents being reindexed and cluster resources. + // + // **Modify documents during reindexing** + // + // Like `_update_by_query`, reindex operations support a script that modifies + // the document. + // Unlike `_update_by_query`, the script is allowed to modify the document's + // metadata. + // + // Just as in `_update_by_query`, you can set `ctx.op` to change the operation + // that is run on the destination. + // For example, set `ctx.op` to `noop` if your script decides that the document + // doesn’t have to be indexed in the destination. This "no operation" will be + // reported in the `noop` counter in the response body. + // Set `ctx.op` to `delete` if your script decides that the document must be + // deleted from the destination. + // The deletion will be reported in the `deleted` counter in the response body. + // Setting `ctx.op` to anything else will return an error, as will setting any + // other field in `ctx`. + // + // Think of the possibilities! Just be careful; you are able to change: + // + // * `_id` + // * `_index` + // * `_version` + // * `_routing` + // + // Setting `_version` to `null` or clearing it from the `ctx` map is just like + // not sending the version in an indexing request. + // It will cause the document to be overwritten in the destination regardless of + // the version on the target or the version type you use in the reindex API. + // + // **Reindex from remote** + // + // Reindex supports reindexing from a remote Elasticsearch cluster. + // The `host` parameter must contain a scheme, host, port, and optional path. + // The `username` and `password` parameters are optional and when they are + // present the reindex operation will connect to the remote Elasticsearch node + // using basic authentication. + // Be sure to use HTTPS when using basic authentication or the password will be + // sent in plain text. + // There are a range of settings available to configure the behavior of the + // HTTPS connection. + // + // When using Elastic Cloud, it is also possible to authenticate against the + // remote cluster through the use of a valid API key. + // Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` + // setting. + // It can be set to a comma delimited list of allowed remote host and port + // combinations. + // Scheme is ignored; only the host and port are used. + // For example: + // + // ``` + // reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, + // localhost:*"] + // ``` + // + // The list of allowed hosts must be configured on any nodes that will + // coordinate the reindex. + // This feature should work with remote clusters of any version of + // Elasticsearch. + // This should enable you to upgrade from any version of Elasticsearch to the + // current version by reindexing from a cluster of the old version. + // + // WARNING: Elasticsearch does not support forward compatibility across major + // versions. + // For example, you cannot reindex from a 7.x cluster into a 6.x cluster. + // + // To enable queries sent to older versions of Elasticsearch, the `query` + // parameter is sent directly to the remote host without validation or + // modification. + // + // NOTE: Reindexing from remote clusters does not support manual or automatic + // slicing. + // + // Reindexing from a remote server uses an on-heap buffer that defaults to a + // maximum size of 100mb. + // If the remote index includes very large documents you'll need to use a + // smaller batch size. + // It is also possible to set the socket read timeout on the remote connection + // with the `socket_timeout` field and the connection timeout with the + // `connect_timeout` field. + // Both default to 30 seconds. + // + // **Configuring SSL parameters** + // + // Reindex from remote supports configurable SSL settings. + // These must be specified in the `elasticsearch.yml` file, with the exception + // of the secure settings, which you add in the Elasticsearch keystore. + // It is not possible to configure SSL in the body of the reindex request. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex Reindex core_reindex.NewReindex // Throttle a reindex operation. // // Change the number of requests per second for a particular reindex operation. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html + // For example: + // + // ``` + // POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 + // ``` + // + // Rethrottling that speeds up the query takes effect immediately. + // Rethrottling that slows down the query will take effect after completing the + // current batch. + // This behavior prevents scroll timeouts. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex ReindexRethrottle core_reindex_rethrottle.NewReindexRethrottle // Render a search template. // // Render a search template as a search request body. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/render-search-template-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template RenderSearchTemplate core_render_search_template.NewRenderSearchTemplate // Run a script. + // // Runs a script and returns a result. + // Use this API to build and test scripts, such as when defining a script for a + // runtime field. + // This API requires very few dependencies and is especially useful if you don't + // have permissions to write documents on a cluster. + // + // The API uses several _contexts_, which control how scripts are run, what + // variables are available at runtime, and what the return type is. + // + // Each context requires a script, but additional parameters depend on the + // context you're using for that script. // https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html ScriptsPainlessExecute core_scripts_painless_execute.NewScriptsPainlessExecute // Run a scrolling search. @@ -3877,7 +9735,7 @@ type API struct { // IMPORTANT: Results from a scrolling search reflect the state of the index at // the time of the initial search request. Subsequent indexing or document // changes only affect later search and scroll requests. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html#request-body-search-scroll + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll Scroll core_scroll.NewScroll // Run a search. // @@ -3885,58 +9743,451 @@ type API struct { // You can provide search queries using the `q` query string parameter or the // request body. // If both are specified, only the query parameter is used. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html + // + // If the Elasticsearch security features are enabled, you must have the read + // index privilege for the target data stream, index, or alias. For + // cross-cluster search, refer to the documentation about configuring CCS + // privileges. + // To search a point in time (PIT) for an alias, you must have the `read` index + // privilege for the alias's data streams or indices. + // + // **Search slicing** + // + // When paging through a large number of documents, it can be helpful to split + // the search into multiple slices to consume them independently with the + // `slice` and `pit` properties. + // By default the splitting is done first on the shards, then locally on each + // shard. + // The local splitting partitions the shard into contiguous ranges based on + // Lucene document IDs. + // + // For instance if the number of shards is equal to 2 and you request 4 slices, + // the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are + // assigned to the second shard. + // + // IMPORTANT: The same point-in-time ID should be used for all slices. + // If different PIT IDs are used, slices can overlap and miss documents. + // This situation can occur because the splitting criterion is based on Lucene + // document IDs, which are not stable across changes to the index. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search Search core_search.NewSearch // Search a vector tile. // // Search a vector tile for geospatial values. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-vector-tile-api.html + // Before using this API, you should be familiar with the Mapbox vector tile + // specification. + // The API returns results as a binary mapbox vector tile. + // + // Internally, Elasticsearch translates a vector tile search API request into a + // search containing: + // + // * A `geo_bounding_box` query on the ``. The query uses the + // `//` tile as a bounding box. + // * A `geotile_grid` or `geohex_grid` aggregation on the ``. The + // `grid_agg` parameter determines the aggregation type. The aggregation uses + // the `//` tile as a bounding box. + // * Optionally, a `geo_bounds` aggregation on the ``. The search only + // includes this aggregation if the `exact_bounds` parameter is `true`. + // * If the optional parameter `with_labels` is `true`, the internal search will + // include a dynamic runtime field that calls the `getLabelPosition` function of + // the geometry doc value. This enables the generation of new point features + // containing suggested geometry labels, so that, for example, multi-polygons + // will have only one label. + // + // For example, Elasticsearch may translate a vector tile search API request + // with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of + // `true` into the following search + // + // ``` + // GET my-index/_search + // { + // "size": 10000, + // "query": { + // "geo_bounding_box": { + // "my-geo-field": { + // "top_left": { + // "lat": -40.979898069620134, + // "lon": -45 + // }, + // "bottom_right": { + // "lat": -66.51326044311186, + // "lon": 0 + // } + // } + // } + // }, + // "aggregations": { + // "grid": { + // "geotile_grid": { + // "field": "my-geo-field", + // "precision": 11, + // "size": 65536, + // "bounds": { + // "top_left": { + // "lat": -40.979898069620134, + // "lon": -45 + // }, + // "bottom_right": { + // "lat": -66.51326044311186, + // "lon": 0 + // } + // } + // } + // }, + // "bounds": { + // "geo_bounds": { + // "field": "my-geo-field", + // "wrap_longitude": false + // } + // } + // } + // } + // ``` + // + // The API returns results as a binary Mapbox vector tile. + // Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the + // tile contains three layers: + // + // * A `hits` layer containing a feature for each `` value matching the + // `geo_bounding_box` query. + // * An `aggs` layer containing a feature for each cell of the `geotile_grid` or + // `geohex_grid`. The layer only contains features for cells with matching data. + // * A meta layer containing: + // * A feature containing a bounding box. By default, this is the bounding box + // of the tile. + // * Value ranges for any sub-aggregations on the `geotile_grid` or + // `geohex_grid`. + // * Metadata for the search. + // + // The API only returns features that can display at its zoom level. + // For example, if a polygon feature has no area at its zoom level, the API + // omits it. + // The API returns errors as UTF-8 encoded JSON. + // + // IMPORTANT: You can specify several options for this API as either a query + // parameter or request body parameter. + // If you specify both parameters, the query parameter takes precedence. + // + // **Grid precision for geotile** + // + // For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles + // for lower zoom levels. + // `grid_precision` represents the additional zoom levels available through + // these cells. The final precision is computed by as follows: ` + + // grid_precision`. + // For example, if `` is 7 and `grid_precision` is 8, then the + // `geotile_grid` aggregation will use a precision of 15. + // The maximum final precision is 29. + // The `grid_precision` also determines the number of cells for the grid as + // follows: `(2^grid_precision) x (2^grid_precision)`. + // For example, a value of 8 divides the tile into a grid of 256 x 256 cells. + // The `aggs` layer only contains features for cells with matching data. + // + // **Grid precision for geohex** + // + // For a `grid_agg` of `geohex`, Elasticsearch uses `` and + // `grid_precision` to calculate a final precision as follows: ` + + // grid_precision`. + // + // This precision determines the H3 resolution of the hexagonal cells produced + // by the `geohex` aggregation. + // The following table maps the H3 resolution for each precision. + // For example, if `` is 3 and `grid_precision` is 3, the precision is 6. + // At a precision of 6, hexagonal cells have an H3 resolution of 2. + // If `` is 3 and `grid_precision` is 4, the precision is 7. + // At a precision of 7, hexagonal cells have an H3 resolution of 3. + // + // | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | + // | --------- | ---------------- | ------------- | ----------------| ----- | + // | 1 | 4 | 0 | 122 | 30.5 | + // | 2 | 16 | 0 | 122 | 7.625 | + // | 3 | 64 | 1 | 842 | 13.15625 | + // | 4 | 256 | 1 | 842 | 3.2890625 | + // | 5 | 1024 | 2 | 5882 | 5.744140625 | + // | 6 | 4096 | 2 | 5882 | 1.436035156 | + // | 7 | 16384 | 3 | 41162 | 2.512329102 | + // | 8 | 65536 | 3 | 41162 | 0.6280822754 | + // | 9 | 262144 | 4 | 288122 | 1.099098206 | + // | 10 | 1048576 | 4 | 288122 | 0.2747745514 | + // | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | + // | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | + // | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | + // | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | + // | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | + // | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | + // | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | + // | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | + // | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | + // | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | + // | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | + // | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | + // | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | + // | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | + // | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | + // | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | + // | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | + // | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | + // | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | + // + // Hexagonal cells don't align perfectly on a vector tile. + // Some cells may intersect more than one vector tile. + // To compute the H3 resolution for each precision, Elasticsearch compares the + // average density of hexagonal bins at each resolution with the average density + // of tile bins at each zoom level. + // Elasticsearch uses the H3 resolution that is closest to the corresponding + // geotile density. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt SearchMvt core_search_mvt.NewSearchMvt // Get the search shards. // // Get the indices and shards that a search request would be run against. // This information can be useful for working out issues or planning // optimizations with routing and shard preferences. - // When filtered aliases are used, the filter is returned as part of the indices - // section. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-shards.html + // When filtered aliases are used, the filter is returned as part of the + // `indices` section. + // + // If the Elasticsearch security features are enabled, you must have the + // `view_index_metadata` or `manage` index privilege for the target data stream, + // index, or alias. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards SearchShards core_search_shards.NewSearchShards // Run a search with a search template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template SearchTemplate core_search_template.NewSearchTemplate // Get terms in an index. // // Discover terms that match a partial string in an index. - // This "terms enum" API is designed for low-latency look-ups used in - // auto-complete scenarios. + // This API is designed for low-latency look-ups used in auto-complete + // scenarios. // - // If the `complete` property in the response is false, the returned terms set - // may be incomplete and should be treated as approximate. - // This can occur due to a few reasons, such as a request timeout or a node - // error. - // - // NOTE: The terms enum API may return terms from deleted documents. Deleted + // > info + // > The terms enum API may return terms from deleted documents. Deleted // documents are initially only marked as deleted. It is not until their // segments are merged that documents are actually deleted. Until that happens, // the terms enum API will return terms from these documents. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-terms-enum.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum TermsEnum core_terms_enum.NewTermsEnum // Get term vector information. // // Get information and statistics about terms in the fields of a particular // document. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-termvectors.html + // + // You can retrieve term vectors for documents stored in the index or for + // artificial documents passed in the body of the request. + // You can specify the fields you are interested in through the `fields` + // parameter or by adding the fields to the request body. + // For example: + // + // ``` + // GET /my-index-000001/_termvectors/1?fields=message + // ``` + // + // Fields can be specified using wildcards, similar to the multi match query. + // + // Term vectors are real-time by default, not near real-time. + // This can be changed by setting `realtime` parameter to `false`. + // + // You can request three types of values: _term information_, _term statistics_, + // and _field statistics_. + // By default, all term information and field statistics are returned for all + // fields but term statistics are excluded. + // + // **Term information** + // + // * term frequency in the field (always returned) + // * term positions (`positions: true`) + // * start and end offsets (`offsets: true`) + // * term payloads (`payloads: true`), as base64 encoded bytes + // + // If the requested information wasn't stored in the index, it will be computed + // on the fly if possible. + // Additionally, term vectors could be computed for documents not even existing + // in the index, but instead provided by the user. + // + // > warn + // > Start and end offsets assume UTF-16 encoding is being used. If you want to + // use these offsets in order to get the original text that produced this token, + // you should make sure that the string you are taking a sub-string of is also + // encoded using UTF-16. + // + // **Behaviour** + // + // The term and field statistics are not accurate. + // Deleted documents are not taken into account. + // The information is only retrieved for the shard the requested document + // resides in. + // The term and field statistics are therefore only useful as relative measures + // whereas the absolute numbers have no meaning in this context. + // By default, when requesting term vectors of artificial documents, a shard to + // get the statistics from is randomly selected. + // Use `routing` only to hit a particular shard. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors Termvectors core_termvectors.NewTermvectors // Update a document. - // Updates a document by running a script or passing a partial document. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html + // + // Update a document by running a script or passing a partial document. + // + // If the Elasticsearch security features are enabled, you must have the `index` + // or `write` index privilege for the target index or index alias. + // + // The script can update, delete, or skip modifying the document. + // The API also supports passing a partial document, which is merged into the + // existing document. + // To fully replace an existing document, use the index API. + // This operation: + // + // * Gets the document (collocated with the shard) from the index. + // * Runs the specified script. + // * Indexes the result. + // + // The document must still be reindexed, but using this API removes some network + // roundtrips and reduces chances of version conflicts between the GET and the + // index operation. + // + // The `_source` field must be enabled to use this API. + // In addition to `_source`, you can access the following variables through the + // `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the + // current timestamp). + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update Update core_update.NewUpdate // Update documents. // Updates documents that match the specified query. // If no query is specified, performs an update on every document in the data // stream or index without modifying the source, which is useful for picking up // mapping changes. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or alias: + // + // * `read` + // * `index` or `write` + // + // You can specify the query criteria in the request URI or the request body + // using the same syntax as the search API. + // + // When you submit an update by query request, Elasticsearch gets a snapshot of + // the data stream or index when it begins processing the request and updates + // matching documents using internal versioning. + // When the versions match, the document is updated and the version number is + // incremented. + // If a document changes between the time that the snapshot is taken and the + // update operation is processed, it results in a version conflict and the + // operation fails. + // You can opt to count version conflicts instead of halting and returning by + // setting `conflicts` to `proceed`. + // Note that if you opt to count version conflicts, the operation could attempt + // to update more documents from the source than `max_docs` until it has + // successfully updated `max_docs` documents or it has gone through every + // document in the source query. + // + // NOTE: Documents with a version equal to 0 cannot be updated using update by + // query because internal versioning does not support 0 as a valid version + // number. + // + // While processing an update by query request, Elasticsearch performs multiple + // search requests sequentially to find all of the matching documents. + // A bulk update request is performed for each batch of matching documents. + // Any query or update failures cause the update by query request to fail and + // the failures are shown in the response. + // Any update requests that completed successfully still stick, they are not + // rolled back. + // + // **Throttling update requests** + // + // To control the rate at which update by query issues batches of update + // operations, you can set `requests_per_second` to any positive decimal number. + // This pads each batch with a wait time to throttle the rate. + // Set `requests_per_second` to `-1` to turn off throttling. + // + // Throttling uses a wait time between batches so that the internal scroll + // requests can be given a timeout that takes the request padding into account. + // The padding time is the difference between the batch size divided by the + // `requests_per_second` and the time spent writing. + // By default the batch size is 1000, so if `requests_per_second` is set to + // `500`: + // + // ``` + // target_time = 1000 / 500 per second = 2 seconds + // wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds + // ``` + // + // Since the batch is issued as a single _bulk request, large batch sizes cause + // Elasticsearch to create many requests and wait before starting the next set. + // This is "bursty" instead of "smooth". + // + // **Slicing** + // + // Update by query supports sliced scroll to parallelize the update process. + // This can improve efficiency and provide a convenient way to break the request + // down into smaller parts. + // + // Setting `slices` to `auto` chooses a reasonable number for most data streams + // and indices. + // This setting will use one slice per shard, up to a certain limit. + // If there are multiple source data streams or indices, it will choose the + // number of slices based on the index or backing index with the smallest number + // of shards. + // + // Adding `slices` to `_update_by_query` just automates the manual process of + // creating sub-requests, which means it has some quirks: + // + // * You can see these requests in the tasks APIs. These sub-requests are + // "child" tasks of the task for the request with slices. + // * Fetching the status of the task for the request with `slices` only contains + // the status of completed slices. + // * These sub-requests are individually addressable for things like + // cancellation and rethrottling. + // * Rethrottling the request with `slices` will rethrottle the unfinished + // sub-request proportionally. + // * Canceling the request with slices will cancel each sub-request. + // * Due to the nature of slices each sub-request won't get a perfectly even + // portion of the documents. All documents will be addressed, but some slices + // may be larger than others. Expect larger slices to have a more even + // distribution. + // * Parameters like `requests_per_second` and `max_docs` on a request with + // slices are distributed proportionally to each sub-request. Combine that with + // the point above about distribution being uneven and you should conclude that + // using `max_docs` with `slices` might not result in exactly `max_docs` + // documents being updated. + // * Each sub-request gets a slightly different snapshot of the source data + // stream or index though these are all taken at approximately the same time. + // + // If you're slicing manually or otherwise tuning automatic slicing, keep in + // mind that: + // + // * Query performance is most efficient when the number of slices is equal to + // the number of shards in the index or backing index. If that number is large + // (for example, 500), choose a lower number as too many slices hurts + // performance. Setting slices higher than the number of shards generally does + // not improve efficiency and adds overhead. + // * Update performance scales linearly across available resources with the + // number of slices. + // + // Whether query or update performance dominates the runtime depends on the + // documents being reindexed and cluster resources. + // + // **Update the document source** + // + // Update by query supports scripts to update the document source. + // As with the update API, you can set `ctx.op` to change the operation that is + // performed. + // + // Set `ctx.op = "noop"` if your script decides that it doesn't have to make any + // changes. + // The update by query operation skips updating the document and increments the + // `noop` counter. + // + // Set `ctx.op = "delete"` if your script decides that the document should be + // deleted. + // The update by query operation deletes the document and increments the + // `deleted` counter. + // + // Update by query supports only `index`, `noop`, and `delete`. + // Setting `ctx.op` to anything else is an error. + // Setting any other field in `ctx` is an error. + // This API enables you to only modify the source of matching documents; you + // cannot move them. + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query UpdateByQuery core_update_by_query.NewUpdateByQuery // Throttle an update by query operation. // @@ -3945,7 +10196,7 @@ type API struct { // Rethrottling that speeds up the query takes effect immediately but // rethrotting that slows down the query takes effect after completing the // current batch to prevent scroll timeouts. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html + // https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle UpdateByQueryRethrottle core_update_by_query_rethrottle.NewUpdateByQueryRethrottle } @@ -4050,14 +10301,19 @@ func New(tp elastictransport.Interface) *API { Put: connector_put.NewPutFunc(tp), SecretPost: connector_secret_post.NewSecretPostFunc(tp), SyncJobCancel: connector_sync_job_cancel.NewSyncJobCancelFunc(tp), + SyncJobCheckIn: connector_sync_job_check_in.NewSyncJobCheckInFunc(tp), + SyncJobClaim: connector_sync_job_claim.NewSyncJobClaimFunc(tp), SyncJobDelete: connector_sync_job_delete.NewSyncJobDeleteFunc(tp), + SyncJobError: connector_sync_job_error.NewSyncJobErrorFunc(tp), SyncJobGet: connector_sync_job_get.NewSyncJobGetFunc(tp), SyncJobList: connector_sync_job_list.NewSyncJobListFunc(tp), SyncJobPost: connector_sync_job_post.NewSyncJobPostFunc(tp), + SyncJobUpdateStats: connector_sync_job_update_stats.NewSyncJobUpdateStatsFunc(tp), UpdateActiveFiltering: connector_update_active_filtering.NewUpdateActiveFilteringFunc(tp), UpdateApiKeyId: connector_update_api_key_id.NewUpdateApiKeyIdFunc(tp), UpdateConfiguration: connector_update_configuration.NewUpdateConfigurationFunc(tp), UpdateError: connector_update_error.NewUpdateErrorFunc(tp), + UpdateFeatures: connector_update_features.NewUpdateFeaturesFunc(tp), UpdateFiltering: connector_update_filtering.NewUpdateFilteringFunc(tp), UpdateFilteringValidation: connector_update_filtering_validation.NewUpdateFilteringValidationFunc(tp), UpdateIndexName: connector_update_index_name.NewUpdateIndexNameFunc(tp), @@ -4143,8 +10399,11 @@ func New(tp elastictransport.Interface) *API { // Esql Esql: Esql{ - AsyncQuery: esql_async_query.NewAsyncQueryFunc(tp), - Query: esql_query.NewQueryFunc(tp), + AsyncQuery: esql_async_query.NewAsyncQueryFunc(tp), + AsyncQueryDelete: esql_async_query_delete.NewAsyncQueryDeleteFunc(tp), + AsyncQueryGet: esql_async_query_get.NewAsyncQueryGetFunc(tp), + AsyncQueryStop: esql_async_query_stop.NewAsyncQueryStopFunc(tp), + Query: esql_query.NewQueryFunc(tp), }, // Features @@ -4183,86 +10442,101 @@ func New(tp elastictransport.Interface) *API { // Indices Indices: Indices{ - AddBlock: indices_add_block.NewAddBlockFunc(tp), - Analyze: indices_analyze.NewAnalyzeFunc(tp), - ClearCache: indices_clear_cache.NewClearCacheFunc(tp), - Clone: indices_clone.NewCloneFunc(tp), - Close: indices_close.NewCloseFunc(tp), - Create: indices_create.NewCreateFunc(tp), - CreateDataStream: indices_create_data_stream.NewCreateDataStreamFunc(tp), - DataStreamsStats: indices_data_streams_stats.NewDataStreamsStatsFunc(tp), - Delete: indices_delete.NewDeleteFunc(tp), - DeleteAlias: indices_delete_alias.NewDeleteAliasFunc(tp), - DeleteDataLifecycle: indices_delete_data_lifecycle.NewDeleteDataLifecycleFunc(tp), - DeleteDataStream: indices_delete_data_stream.NewDeleteDataStreamFunc(tp), - DeleteIndexTemplate: indices_delete_index_template.NewDeleteIndexTemplateFunc(tp), - DeleteTemplate: indices_delete_template.NewDeleteTemplateFunc(tp), - DiskUsage: indices_disk_usage.NewDiskUsageFunc(tp), - Downsample: indices_downsample.NewDownsampleFunc(tp), - Exists: indices_exists.NewExistsFunc(tp), - ExistsAlias: indices_exists_alias.NewExistsAliasFunc(tp), - ExistsIndexTemplate: indices_exists_index_template.NewExistsIndexTemplateFunc(tp), - ExistsTemplate: indices_exists_template.NewExistsTemplateFunc(tp), - ExplainDataLifecycle: indices_explain_data_lifecycle.NewExplainDataLifecycleFunc(tp), - FieldUsageStats: indices_field_usage_stats.NewFieldUsageStatsFunc(tp), - Flush: indices_flush.NewFlushFunc(tp), - Forcemerge: indices_forcemerge.NewForcemergeFunc(tp), - Get: indices_get.NewGetFunc(tp), - GetAlias: indices_get_alias.NewGetAliasFunc(tp), - GetDataLifecycle: indices_get_data_lifecycle.NewGetDataLifecycleFunc(tp), - GetDataStream: indices_get_data_stream.NewGetDataStreamFunc(tp), - GetFieldMapping: indices_get_field_mapping.NewGetFieldMappingFunc(tp), - GetIndexTemplate: indices_get_index_template.NewGetIndexTemplateFunc(tp), - GetMapping: indices_get_mapping.NewGetMappingFunc(tp), - GetSettings: indices_get_settings.NewGetSettingsFunc(tp), - GetTemplate: indices_get_template.NewGetTemplateFunc(tp), - MigrateToDataStream: indices_migrate_to_data_stream.NewMigrateToDataStreamFunc(tp), - ModifyDataStream: indices_modify_data_stream.NewModifyDataStreamFunc(tp), - Open: indices_open.NewOpenFunc(tp), - PromoteDataStream: indices_promote_data_stream.NewPromoteDataStreamFunc(tp), - PutAlias: indices_put_alias.NewPutAliasFunc(tp), - PutDataLifecycle: indices_put_data_lifecycle.NewPutDataLifecycleFunc(tp), - PutIndexTemplate: indices_put_index_template.NewPutIndexTemplateFunc(tp), - PutMapping: indices_put_mapping.NewPutMappingFunc(tp), - PutSettings: indices_put_settings.NewPutSettingsFunc(tp), - PutTemplate: indices_put_template.NewPutTemplateFunc(tp), - Recovery: indices_recovery.NewRecoveryFunc(tp), - Refresh: indices_refresh.NewRefreshFunc(tp), - ReloadSearchAnalyzers: indices_reload_search_analyzers.NewReloadSearchAnalyzersFunc(tp), - ResolveCluster: indices_resolve_cluster.NewResolveClusterFunc(tp), - ResolveIndex: indices_resolve_index.NewResolveIndexFunc(tp), - Rollover: indices_rollover.NewRolloverFunc(tp), - Segments: indices_segments.NewSegmentsFunc(tp), - ShardStores: indices_shard_stores.NewShardStoresFunc(tp), - Shrink: indices_shrink.NewShrinkFunc(tp), - SimulateIndexTemplate: indices_simulate_index_template.NewSimulateIndexTemplateFunc(tp), - SimulateTemplate: indices_simulate_template.NewSimulateTemplateFunc(tp), - Split: indices_split.NewSplitFunc(tp), - Stats: indices_stats.NewStatsFunc(tp), - Unfreeze: indices_unfreeze.NewUnfreezeFunc(tp), - UpdateAliases: indices_update_aliases.NewUpdateAliasesFunc(tp), - ValidateQuery: indices_validate_query.NewValidateQueryFunc(tp), + AddBlock: indices_add_block.NewAddBlockFunc(tp), + Analyze: indices_analyze.NewAnalyzeFunc(tp), + CancelMigrateReindex: indices_cancel_migrate_reindex.NewCancelMigrateReindexFunc(tp), + ClearCache: indices_clear_cache.NewClearCacheFunc(tp), + Clone: indices_clone.NewCloneFunc(tp), + Close: indices_close.NewCloseFunc(tp), + Create: indices_create.NewCreateFunc(tp), + CreateDataStream: indices_create_data_stream.NewCreateDataStreamFunc(tp), + CreateFrom: indices_create_from.NewCreateFromFunc(tp), + DataStreamsStats: indices_data_streams_stats.NewDataStreamsStatsFunc(tp), + Delete: indices_delete.NewDeleteFunc(tp), + DeleteAlias: indices_delete_alias.NewDeleteAliasFunc(tp), + DeleteDataLifecycle: indices_delete_data_lifecycle.NewDeleteDataLifecycleFunc(tp), + DeleteDataStream: indices_delete_data_stream.NewDeleteDataStreamFunc(tp), + DeleteIndexTemplate: indices_delete_index_template.NewDeleteIndexTemplateFunc(tp), + DeleteTemplate: indices_delete_template.NewDeleteTemplateFunc(tp), + DiskUsage: indices_disk_usage.NewDiskUsageFunc(tp), + Downsample: indices_downsample.NewDownsampleFunc(tp), + Exists: indices_exists.NewExistsFunc(tp), + ExistsAlias: indices_exists_alias.NewExistsAliasFunc(tp), + ExistsIndexTemplate: indices_exists_index_template.NewExistsIndexTemplateFunc(tp), + ExistsTemplate: indices_exists_template.NewExistsTemplateFunc(tp), + ExplainDataLifecycle: indices_explain_data_lifecycle.NewExplainDataLifecycleFunc(tp), + FieldUsageStats: indices_field_usage_stats.NewFieldUsageStatsFunc(tp), + Flush: indices_flush.NewFlushFunc(tp), + Forcemerge: indices_forcemerge.NewForcemergeFunc(tp), + Get: indices_get.NewGetFunc(tp), + GetAlias: indices_get_alias.NewGetAliasFunc(tp), + GetDataLifecycle: indices_get_data_lifecycle.NewGetDataLifecycleFunc(tp), + GetDataLifecycleStats: indices_get_data_lifecycle_stats.NewGetDataLifecycleStatsFunc(tp), + GetDataStream: indices_get_data_stream.NewGetDataStreamFunc(tp), + GetFieldMapping: indices_get_field_mapping.NewGetFieldMappingFunc(tp), + GetIndexTemplate: indices_get_index_template.NewGetIndexTemplateFunc(tp), + GetMapping: indices_get_mapping.NewGetMappingFunc(tp), + GetMigrateReindexStatus: indices_get_migrate_reindex_status.NewGetMigrateReindexStatusFunc(tp), + GetSettings: indices_get_settings.NewGetSettingsFunc(tp), + GetTemplate: indices_get_template.NewGetTemplateFunc(tp), + MigrateReindex: indices_migrate_reindex.NewMigrateReindexFunc(tp), + MigrateToDataStream: indices_migrate_to_data_stream.NewMigrateToDataStreamFunc(tp), + ModifyDataStream: indices_modify_data_stream.NewModifyDataStreamFunc(tp), + Open: indices_open.NewOpenFunc(tp), + PromoteDataStream: indices_promote_data_stream.NewPromoteDataStreamFunc(tp), + PutAlias: indices_put_alias.NewPutAliasFunc(tp), + PutDataLifecycle: indices_put_data_lifecycle.NewPutDataLifecycleFunc(tp), + PutIndexTemplate: indices_put_index_template.NewPutIndexTemplateFunc(tp), + PutMapping: indices_put_mapping.NewPutMappingFunc(tp), + PutSettings: indices_put_settings.NewPutSettingsFunc(tp), + PutTemplate: indices_put_template.NewPutTemplateFunc(tp), + Recovery: indices_recovery.NewRecoveryFunc(tp), + Refresh: indices_refresh.NewRefreshFunc(tp), + ReloadSearchAnalyzers: indices_reload_search_analyzers.NewReloadSearchAnalyzersFunc(tp), + ResolveCluster: indices_resolve_cluster.NewResolveClusterFunc(tp), + ResolveIndex: indices_resolve_index.NewResolveIndexFunc(tp), + Rollover: indices_rollover.NewRolloverFunc(tp), + Segments: indices_segments.NewSegmentsFunc(tp), + ShardStores: indices_shard_stores.NewShardStoresFunc(tp), + Shrink: indices_shrink.NewShrinkFunc(tp), + SimulateIndexTemplate: indices_simulate_index_template.NewSimulateIndexTemplateFunc(tp), + SimulateTemplate: indices_simulate_template.NewSimulateTemplateFunc(tp), + Split: indices_split.NewSplitFunc(tp), + Stats: indices_stats.NewStatsFunc(tp), + UpdateAliases: indices_update_aliases.NewUpdateAliasesFunc(tp), + ValidateQuery: indices_validate_query.NewValidateQueryFunc(tp), }, // Inference Inference: Inference{ - Delete: inference_delete.NewDeleteFunc(tp), - Get: inference_get.NewGetFunc(tp), - Inference: inference_inference.NewInferenceFunc(tp), - Put: inference_put.NewPutFunc(tp), + ChatCompletionUnified: inference_chat_completion_unified.NewChatCompletionUnifiedFunc(tp), + Completion: inference_completion.NewCompletionFunc(tp), + Delete: inference_delete.NewDeleteFunc(tp), + Get: inference_get.NewGetFunc(tp), + Put: inference_put.NewPutFunc(tp), + PutOpenai: inference_put_openai.NewPutOpenaiFunc(tp), + PutWatsonx: inference_put_watsonx.NewPutWatsonxFunc(tp), + Rerank: inference_rerank.NewRerankFunc(tp), + SparseEmbedding: inference_sparse_embedding.NewSparseEmbeddingFunc(tp), + StreamCompletion: inference_stream_completion.NewStreamCompletionFunc(tp), + TextEmbedding: inference_text_embedding.NewTextEmbeddingFunc(tp), + Update: inference_update.NewUpdateFunc(tp), }, // Ingest Ingest: Ingest{ - DeleteGeoipDatabase: ingest_delete_geoip_database.NewDeleteGeoipDatabaseFunc(tp), - DeletePipeline: ingest_delete_pipeline.NewDeletePipelineFunc(tp), - GeoIpStats: ingest_geo_ip_stats.NewGeoIpStatsFunc(tp), - GetGeoipDatabase: ingest_get_geoip_database.NewGetGeoipDatabaseFunc(tp), - GetPipeline: ingest_get_pipeline.NewGetPipelineFunc(tp), - ProcessorGrok: ingest_processor_grok.NewProcessorGrokFunc(tp), - PutGeoipDatabase: ingest_put_geoip_database.NewPutGeoipDatabaseFunc(tp), - PutPipeline: ingest_put_pipeline.NewPutPipelineFunc(tp), - Simulate: ingest_simulate.NewSimulateFunc(tp), + DeleteGeoipDatabase: ingest_delete_geoip_database.NewDeleteGeoipDatabaseFunc(tp), + DeleteIpLocationDatabase: ingest_delete_ip_location_database.NewDeleteIpLocationDatabaseFunc(tp), + DeletePipeline: ingest_delete_pipeline.NewDeletePipelineFunc(tp), + GeoIpStats: ingest_geo_ip_stats.NewGeoIpStatsFunc(tp), + GetGeoipDatabase: ingest_get_geoip_database.NewGetGeoipDatabaseFunc(tp), + GetIpLocationDatabase: ingest_get_ip_location_database.NewGetIpLocationDatabaseFunc(tp), + GetPipeline: ingest_get_pipeline.NewGetPipelineFunc(tp), + ProcessorGrok: ingest_processor_grok.NewProcessorGrokFunc(tp), + PutGeoipDatabase: ingest_put_geoip_database.NewPutGeoipDatabaseFunc(tp), + PutIpLocationDatabase: ingest_put_ip_location_database.NewPutIpLocationDatabaseFunc(tp), + PutPipeline: ingest_put_pipeline.NewPutPipelineFunc(tp), + Simulate: ingest_simulate.NewSimulateFunc(tp), }, // License @@ -4417,14 +10691,16 @@ func New(tp elastictransport.Interface) *API { // SearchApplication SearchApplication: SearchApplication{ - Delete: search_application_delete.NewDeleteFunc(tp), - DeleteBehavioralAnalytics: search_application_delete_behavioral_analytics.NewDeleteBehavioralAnalyticsFunc(tp), - Get: search_application_get.NewGetFunc(tp), - GetBehavioralAnalytics: search_application_get_behavioral_analytics.NewGetBehavioralAnalyticsFunc(tp), - List: search_application_list.NewListFunc(tp), - Put: search_application_put.NewPutFunc(tp), - PutBehavioralAnalytics: search_application_put_behavioral_analytics.NewPutBehavioralAnalyticsFunc(tp), - Search: search_application_search.NewSearchFunc(tp), + Delete: search_application_delete.NewDeleteFunc(tp), + DeleteBehavioralAnalytics: search_application_delete_behavioral_analytics.NewDeleteBehavioralAnalyticsFunc(tp), + Get: search_application_get.NewGetFunc(tp), + GetBehavioralAnalytics: search_application_get_behavioral_analytics.NewGetBehavioralAnalyticsFunc(tp), + List: search_application_list.NewListFunc(tp), + PostBehavioralAnalyticsEvent: search_application_post_behavioral_analytics_event.NewPostBehavioralAnalyticsEventFunc(tp), + Put: search_application_put.NewPutFunc(tp), + PutBehavioralAnalytics: search_application_put_behavioral_analytics.NewPutBehavioralAnalyticsFunc(tp), + RenderQuery: search_application_render_query.NewRenderQueryFunc(tp), + Search: search_application_search.NewSearchFunc(tp), }, // SearchableSnapshots @@ -4451,6 +10727,7 @@ func New(tp elastictransport.Interface) *API { CreateApiKey: security_create_api_key.NewCreateApiKeyFunc(tp), CreateCrossClusterApiKey: security_create_cross_cluster_api_key.NewCreateCrossClusterApiKeyFunc(tp), CreateServiceToken: security_create_service_token.NewCreateServiceTokenFunc(tp), + DelegatePki: security_delegate_pki.NewDelegatePkiFunc(tp), DeletePrivileges: security_delete_privileges.NewDeletePrivilegesFunc(tp), DeleteRole: security_delete_role.NewDeleteRoleFunc(tp), DeleteRoleMapping: security_delete_role_mapping.NewDeleteRoleMappingFunc(tp), @@ -4509,6 +10786,11 @@ func New(tp elastictransport.Interface) *API { PutNode: shutdown_put_node.NewPutNodeFunc(tp), }, + // Simulate + Simulate: Simulate{ + Ingest: simulate_ingest.NewIngestFunc(tp), + }, + // Slm Slm: Slm{ DeleteLifecycle: slm_delete_lifecycle.NewDeleteLifecycleFunc(tp), @@ -4532,6 +10814,7 @@ func New(tp elastictransport.Interface) *API { DeleteRepository: snapshot_delete_repository.NewDeleteRepositoryFunc(tp), Get: snapshot_get.NewGetFunc(tp), GetRepository: snapshot_get_repository.NewGetRepositoryFunc(tp), + RepositoryAnalyze: snapshot_repository_analyze.NewRepositoryAnalyzeFunc(tp), RepositoryVerifyIntegrity: snapshot_repository_verify_integrity.NewRepositoryVerifyIntegrityFunc(tp), Restore: snapshot_restore.NewRestoreFunc(tp), Status: snapshot_status.NewStatusFunc(tp), diff --git a/typedapi/asyncsearch/delete/delete.go b/typedapi/asyncsearch/delete/delete.go index f636184d2a..6c77208914 100644 --- a/typedapi/asyncsearch/delete/delete.go +++ b/typedapi/asyncsearch/delete/delete.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete an async search. // @@ -92,7 +92,7 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { // the original search request; users that have the `cancel_task` cluster // privilege. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit func New(tp elastictransport.Interface) *Delete { r := &Delete{ transport: tp, diff --git a/typedapi/asyncsearch/delete/response.go b/typedapi/asyncsearch/delete/response.go index b63ec552c8..bd0fb5e870 100644 --- a/typedapi/asyncsearch/delete/response.go +++ b/typedapi/asyncsearch/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/async_search/delete/AsyncSearchDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/async_search/delete/AsyncSearchDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/asyncsearch/get/get.go b/typedapi/asyncsearch/get/get.go index 30eb6f9854..53b6323e66 100644 --- a/typedapi/asyncsearch/get/get.go +++ b/typedapi/asyncsearch/get/get.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get async search results. // @@ -88,7 +88,7 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { // a specific async search is restricted to the user or API key that submitted // it. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit func New(tp elastictransport.Interface) *Get { r := &Get{ transport: tp, @@ -309,7 +309,7 @@ func (r *Get) _id(id string) *Get { return r } -// KeepAlive Specifies how long the async search should be available in the cluster. +// KeepAlive The length of time that the async search should be available in the cluster. // When not specified, the `keep_alive` set with the corresponding submit async // request will be used. // Otherwise, it is possible to override the value and extend the validity of diff --git a/typedapi/asyncsearch/get/response.go b/typedapi/asyncsearch/get/response.go index 6f1f9897d8..6a8536b7d1 100644 --- a/typedapi/asyncsearch/get/response.go +++ b/typedapi/asyncsearch/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package get @@ -33,11 +33,11 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/async_search/get/AsyncSearchGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/async_search/get/AsyncSearchGetResponse.ts#L22-L24 type Response struct { - // CompletionTime Indicates when the async search completed. Only present - // when the search has completed. + // CompletionTime Indicates when the async search completed. + // It is present only when the search has completed. CompletionTime types.DateTime `json:"completion_time,omitempty"` CompletionTimeInMillis *int64 `json:"completion_time_in_millis,omitempty"` // ExpirationTime Indicates when the async search will expire. @@ -49,8 +49,10 @@ type Response struct { // While the query is running, `is_partial` is always set to `true`. IsPartial bool `json:"is_partial"` // IsRunning Indicates whether the search is still running or has completed. - // NOTE: If the search failed after some shards returned their results or the - // node that is coordinating the async search dies, results may be partial even + // + // > info + // > If the search failed after some shards returned their results or the node + // that is coordinating the async search dies, results may be partial even // though `is_running` is `false`. IsRunning bool `json:"is_running"` Response types.AsyncSearch `json:"response"` diff --git a/typedapi/asyncsearch/status/response.go b/typedapi/asyncsearch/status/response.go index 62bf54596a..7d4685cd83 100644 --- a/typedapi/asyncsearch/status/response.go +++ b/typedapi/asyncsearch/status/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package status @@ -33,19 +33,20 @@ import ( // Response holds the response body struct for the package status // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/async_search/status/AsyncSearchStatusResponse.ts#L39-L41 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/async_search/status/AsyncSearchStatusResponse.ts#L39-L41 type Response struct { // Clusters_ Metadata about clusters involved in the cross-cluster search. - // Not shown for local-only searches. + // It is not shown for local-only searches. Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` // CompletionStatus If the async search completed, this field shows the status code of the // search. - // For example, 200 indicates that the async search was successfully completed. - // 503 indicates that the async search was completed with an error. + // For example, `200` indicates that the async search was successfully + // completed. + // `503` indicates that the async search was completed with an error. CompletionStatus *int `json:"completion_status,omitempty"` - // CompletionTime Indicates when the async search completed. Only present - // when the search has completed. + // CompletionTime Indicates when the async search completed. + // It is present only when the search has completed. CompletionTime types.DateTime `json:"completion_time,omitempty"` CompletionTimeInMillis *int64 `json:"completion_time_in_millis,omitempty"` // ExpirationTime Indicates when the async search will expire. @@ -57,11 +58,13 @@ type Response struct { // While the query is running, `is_partial` is always set to `true`. IsPartial bool `json:"is_partial"` // IsRunning Indicates whether the search is still running or has completed. - // NOTE: If the search failed after some shards returned their results or the - // node that is coordinating the async search dies, results may be partial even + // + // > info + // > If the search failed after some shards returned their results or the node + // that is coordinating the async search dies, results may be partial even // though `is_running` is `false`. IsRunning bool `json:"is_running"` - // Shards_ Indicates how many shards have run the query so far. + // Shards_ The number of shards that have run the query so far. Shards_ types.ShardStatistics `json:"_shards"` StartTime types.DateTime `json:"start_time,omitempty"` StartTimeInMillis int64 `json:"start_time_in_millis"` diff --git a/typedapi/asyncsearch/status/status.go b/typedapi/asyncsearch/status/status.go index 126d9e9d38..25640f444e 100644 --- a/typedapi/asyncsearch/status/status.go +++ b/typedapi/asyncsearch/status/status.go @@ -16,14 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get the async search status. // // Get the status of a previously submitted async search request given its // identifier, without retrieving search results. -// If the Elasticsearch security features are enabled, use of this API is -// restricted to the `monitoring_user` role. +// If the Elasticsearch security features are enabled, the access to the status +// of a specific async search is restricted to: +// +// * The user or API key that submitted the original async search request. +// * Users that have the `monitor` cluster privilege or greater privileges. package status import ( @@ -85,10 +88,13 @@ func NewStatusFunc(tp elastictransport.Interface) NewStatus { // // Get the status of a previously submitted async search request given its // identifier, without retrieving search results. -// If the Elasticsearch security features are enabled, use of this API is -// restricted to the `monitoring_user` role. +// If the Elasticsearch security features are enabled, the access to the status +// of a specific async search is restricted to: +// +// * The user or API key that submitted the original async search request. +// * Users that have the `monitor` cluster privilege or greater privileges. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit func New(tp elastictransport.Interface) *Status { r := &Status{ transport: tp, @@ -309,6 +315,16 @@ func (r *Status) _id(id string) *Status { return r } +// KeepAlive The length of time that the async search needs to be available. +// Ongoing async searches and any saved search results are deleted after this +// period. +// API name: keep_alive +func (r *Status) KeepAlive(duration string) *Status { + r.values.Set("keep_alive", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/asyncsearch/submit/request.go b/typedapi/asyncsearch/submit/request.go index 64a6d82420..122aad6ea0 100644 --- a/typedapi/asyncsearch/submit/request.go +++ b/typedapi/asyncsearch/submit/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package submit @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package submit // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/async_search/submit/AsyncSearchSubmitRequest.ts#L55-L290 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/async_search/submit/AsyncSearchSubmitRequest.ts#L54-L294 type Request struct { Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` Collapse *types.FieldCollapse `json:"collapse,omitempty"` diff --git a/typedapi/asyncsearch/submit/response.go b/typedapi/asyncsearch/submit/response.go index f095a81727..7c7d38a7c1 100644 --- a/typedapi/asyncsearch/submit/response.go +++ b/typedapi/asyncsearch/submit/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package submit @@ -33,11 +33,11 @@ import ( // Response holds the response body struct for the package submit // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/async_search/submit/AsyncSearchSubmitResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/async_search/submit/AsyncSearchSubmitResponse.ts#L22-L24 type Response struct { - // CompletionTime Indicates when the async search completed. Only present - // when the search has completed. + // CompletionTime Indicates when the async search completed. + // It is present only when the search has completed. CompletionTime types.DateTime `json:"completion_time,omitempty"` CompletionTimeInMillis *int64 `json:"completion_time_in_millis,omitempty"` // ExpirationTime Indicates when the async search will expire. @@ -49,8 +49,10 @@ type Response struct { // While the query is running, `is_partial` is always set to `true`. IsPartial bool `json:"is_partial"` // IsRunning Indicates whether the search is still running or has completed. - // NOTE: If the search failed after some shards returned their results or the - // node that is coordinating the async search dies, results may be partial even + // + // > info + // > If the search failed after some shards returned their results or the node + // that is coordinating the async search dies, results may be partial even // though `is_running` is `false`. IsRunning bool `json:"is_running"` Response types.AsyncSearch `json:"response"` diff --git a/typedapi/asyncsearch/submit/submit.go b/typedapi/asyncsearch/submit/submit.go index ddee38cfa0..e4641daa00 100644 --- a/typedapi/asyncsearch/submit/submit.go +++ b/typedapi/asyncsearch/submit/submit.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Run an async search. // @@ -109,7 +109,7 @@ func NewSubmitFunc(tp elastictransport.Interface) NewSubmit { // The maximum allowed size for a stored async search response can be set by // changing the `search.max_async_search_response_size` cluster level setting. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit func New(tp elastictransport.Interface) *Submit { r := &Submit{ transport: tp, @@ -117,8 +117,6 @@ func New(tp elastictransport.Interface) *Submit { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -357,15 +355,6 @@ func (r *Submit) WaitForCompletionTimeout(duration string) *Submit { return r } -// KeepOnCompletion If `true`, results are stored for later retrieval when the search completes -// within the `wait_for_completion_timeout`. -// API name: keep_on_completion -func (r *Submit) KeepOnCompletion(keeponcompletion bool) *Submit { - r.values.Set("keep_on_completion", strconv.FormatBool(keeponcompletion)) - - return r -} - // KeepAlive Specifies how long the async search needs to be available. // Ongoing async searches and any saved search results are deleted after this // period. @@ -376,6 +365,15 @@ func (r *Submit) KeepAlive(duration string) *Submit { return r } +// KeepOnCompletion If `true`, results are stored for later retrieval when the search completes +// within the `wait_for_completion_timeout`. +// API name: keep_on_completion +func (r *Submit) KeepOnCompletion(keeponcompletion bool) *Submit { + r.values.Set("keep_on_completion", strconv.FormatBool(keeponcompletion)) + + return r +} + // AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete // indices. (This includes `_all` string or when no indices have been specified) // API name: allow_no_indices @@ -497,13 +495,6 @@ func (r *Submit) MaxConcurrentShardRequests(maxconcurrentshardrequests string) * return r } -// API name: min_compatible_shard_node -func (r *Submit) MinCompatibleShardNode(versionstring string) *Submit { - r.values.Set("min_compatible_shard_node", versionstring) - - return r -} - // Preference Specify the node or shard the operation should be performed on (default: // random) // API name: preference @@ -513,16 +504,6 @@ func (r *Submit) Preference(preference string) *Submit { return r } -// PreFilterShardSize The default value cannot be changed, which enforces the execution of a -// pre-filter roundtrip to retrieve statistics from each shard so that the ones -// that surely don’t hold any document matching the query get skipped. -// API name: pre_filter_shard_size -func (r *Submit) PreFilterShardSize(prefiltershardsize string) *Submit { - r.values.Set("pre_filter_shard_size", prefiltershardsize) - - return r -} - // RequestCache Specify if request cache should be used for this request or not, defaults to // true // API name: request_cache @@ -540,13 +521,6 @@ func (r *Submit) Routing(routing string) *Submit { return r } -// API name: scroll -func (r *Submit) Scroll(duration string) *Submit { - r.values.Set("scroll", duration) - - return r -} - // SearchType Search operation type // API name: search_type func (r *Submit) SearchType(searchtype searchtype.SearchType) *Submit { @@ -596,6 +570,8 @@ func (r *Submit) TypedKeys(typedkeys bool) *Submit { return r } +// RestTotalHitsAsInt Indicates whether hits.total should be rendered as an integer or an object in +// the rest search response // API name: rest_total_hits_as_int func (r *Submit) RestTotalHitsAsInt(resttotalhitsasint bool) *Submit { r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) @@ -673,221 +649,401 @@ func (r *Submit) Pretty(pretty bool) *Submit { // API name: aggregations func (r *Submit) Aggregations(aggregations map[string]types.Aggregations) *Submit { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aggregations = aggregations + return r +} + +func (r *Submit) AddAggregation(key string, value types.AggregationsVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + tmp[key] = *value.AggregationsCaster() + + r.req.Aggregations = tmp return r } // API name: collapse -func (r *Submit) Collapse(collapse *types.FieldCollapse) *Submit { +func (r *Submit) Collapse(collapse types.FieldCollapseVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Collapse = collapse + r.req.Collapse = collapse.FieldCollapseCaster() return r } -// DocvalueFields Array of wildcard (*) patterns. The request returns doc values for field +// Array of wildcard (*) patterns. The request returns doc values for field // names matching these patterns in the hits.fields property of the response. // API name: docvalue_fields -func (r *Submit) DocvalueFields(docvaluefields ...types.FieldAndFormat) *Submit { - r.req.DocvalueFields = docvaluefields +func (r *Submit) DocvalueFields(docvaluefields ...types.FieldAndFormatVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docvaluefields { + r.req.DocvalueFields = append(r.req.DocvalueFields, *v.FieldAndFormatCaster()) + + } return r } -// Explain If true, returns detailed information about score computation as part of a +// If true, returns detailed information about score computation as part of a // hit. // API name: explain func (r *Submit) Explain(explain bool) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Explain = &explain return r } -// Ext Configuration of search extensions defined by Elasticsearch plugins. +// Configuration of search extensions defined by Elasticsearch plugins. // API name: ext func (r *Submit) Ext(ext map[string]json.RawMessage) *Submit { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Ext = ext + return r +} + +func (r *Submit) AddExt(key string, value json.RawMessage) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Ext == nil { + r.req.Ext = make(map[string]json.RawMessage) + } else { + tmp = r.req.Ext + } + + tmp[key] = value + r.req.Ext = tmp return r } -// Fields Array of wildcard (*) patterns. The request returns values for field names +// Array of wildcard (*) patterns. The request returns values for field names // matching these patterns in the hits.fields property of the response. // API name: fields -func (r *Submit) Fields(fields ...types.FieldAndFormat) *Submit { - r.req.Fields = fields +func (r *Submit) Fields(fields ...types.FieldAndFormatVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range fields { + + r.req.Fields = append(r.req.Fields, *v.FieldAndFormatCaster()) + } return r } -// From Starting document offset. By default, you cannot page through more than +// Starting document offset. By default, you cannot page through more than // 10,000 // hits using the from and size parameters. To page through more hits, use the // search_after parameter. // API name: from func (r *Submit) From(from int) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.From = &from return r } // API name: highlight -func (r *Submit) Highlight(highlight *types.Highlight) *Submit { +func (r *Submit) Highlight(highlight types.HighlightVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Highlight = highlight + r.req.Highlight = highlight.HighlightCaster() return r } -// IndicesBoost Boosts the _score of documents from specified indices. +// Boosts the _score of documents from specified indices. // API name: indices_boost -func (r *Submit) IndicesBoost(indicesboosts ...map[string]types.Float64) *Submit { - r.req.IndicesBoost = indicesboosts +func (r *Submit) IndicesBoost(indicesboost []map[string]types.Float64) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndicesBoost = indicesboost return r } -// Knn Defines the approximate kNN search to run. +// Defines the approximate kNN search to run. // API name: knn -func (r *Submit) Knn(knns ...types.KnnSearch) *Submit { - r.req.Knn = knns +func (r *Submit) Knn(knns ...types.KnnSearchVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Knn = make([]types.KnnSearch, len(knns)) + for i, v := range knns { + r.req.Knn[i] = *v.KnnSearchCaster() + } return r } -// MinScore Minimum _score for matching documents. Documents with a lower _score are +// Minimum _score for matching documents. Documents with a lower _score are // not included in the search results. // API name: min_score func (r *Submit) MinScore(minscore types.Float64) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MinScore = &minscore return r } -// Pit Limits the search to a point in time (PIT). If you provide a PIT, you +// Limits the search to a point in time (PIT). If you provide a PIT, you // cannot specify an in the request path. // API name: pit -func (r *Submit) Pit(pit *types.PointInTimeReference) *Submit { +func (r *Submit) Pit(pit types.PointInTimeReferenceVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Pit = pit + r.req.Pit = pit.PointInTimeReferenceCaster() return r } // API name: post_filter -func (r *Submit) PostFilter(postfilter *types.Query) *Submit { +func (r *Submit) PostFilter(postfilter types.QueryVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.PostFilter = postfilter + r.req.PostFilter = postfilter.QueryCaster() return r } // API name: profile func (r *Submit) Profile(profile bool) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Profile = &profile return r } -// Query Defines the search definition using the Query DSL. +// Defines the search definition using the Query DSL. // API name: query -func (r *Submit) Query(query *types.Query) *Submit { +func (r *Submit) Query(query types.QueryVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } // API name: rescore -func (r *Submit) Rescore(rescores ...types.Rescore) *Submit { - r.req.Rescore = rescores +func (r *Submit) Rescore(rescores ...types.RescoreVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Rescore = make([]types.Rescore, len(rescores)) + for i, v := range rescores { + r.req.Rescore[i] = *v.RescoreCaster() + } return r } -// RuntimeMappings Defines one or more runtime fields in the search request. These fields take +// Defines one or more runtime fields in the search request. These fields take // precedence over mapped fields with the same name. // API name: runtime_mappings -func (r *Submit) RuntimeMappings(runtimefields types.RuntimeFields) *Submit { - r.req.RuntimeMappings = runtimefields +func (r *Submit) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } -// ScriptFields Retrieve a script evaluation (based on different fields) for each hit. +// Retrieve a script evaluation (based on different fields) for each hit. // API name: script_fields func (r *Submit) ScriptFields(scriptfields map[string]types.ScriptField) *Submit { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ScriptFields = scriptfields + return r +} + +func (r *Submit) AddScriptField(key string, value types.ScriptFieldVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ScriptField + if r.req.ScriptFields == nil { + r.req.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = r.req.ScriptFields + } + tmp[key] = *value.ScriptFieldCaster() + + r.req.ScriptFields = tmp return r } // API name: search_after -func (r *Submit) SearchAfter(sortresults ...types.FieldValue) *Submit { - r.req.SearchAfter = sortresults +func (r *Submit) SearchAfter(sortresults ...types.FieldValueVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } return r } -// SeqNoPrimaryTerm If true, returns sequence number and primary term of the last modification +// If true, returns sequence number and primary term of the last modification // of each hit. See Optimistic concurrency control. // API name: seq_no_primary_term func (r *Submit) SeqNoPrimaryTerm(seqnoprimaryterm bool) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.SeqNoPrimaryTerm = &seqnoprimaryterm return r } -// Size The number of hits to return. By default, you cannot page through more +// The number of hits to return. By default, you cannot page through more // than 10,000 hits using the from and size parameters. To page through more // hits, use the search_after parameter. // API name: size func (r *Submit) Size(size int) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } // API name: slice -func (r *Submit) Slice(slice *types.SlicedScroll) *Submit { +func (r *Submit) Slice(slice types.SlicedScrollVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Slice = slice + r.req.Slice = slice.SlicedScrollCaster() return r } // API name: sort -func (r *Submit) Sort(sorts ...types.SortCombinations) *Submit { - r.req.Sort = sorts +func (r *Submit) Sort(sorts ...types.SortCombinationsVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } return r } -// Source_ Indicates which source fields are returned for matching documents. These +// Indicates which source fields are returned for matching documents. These // fields are returned in the hits._source property of the search response. // API name: _source -func (r *Submit) Source_(sourceconfig types.SourceConfig) *Submit { - r.req.Source_ = sourceconfig +func (r *Submit) Source_(sourceconfig types.SourceConfigVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source_ = *sourceconfig.SourceConfigCaster() return r } -// Stats Stats groups to associate with the search. Each group maintains a statistics +// Stats groups to associate with the search. Each group maintains a statistics // aggregation for its associated searches. You can retrieve these stats using // the indices stats API. // API name: stats func (r *Submit) Stats(stats ...string) *Submit { - r.req.Stats = stats + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range stats { + + r.req.Stats = append(r.req.Stats, v) + } return r } -// StoredFields List of stored fields to return as part of a hit. If no fields are specified, +// List of stored fields to return as part of a hit. If no fields are specified, // no stored fields are included in the response. If this field is specified, // the _source // parameter defaults to false. You can pass _source: true to return both source @@ -895,20 +1051,29 @@ func (r *Submit) Stats(stats ...string) *Submit { // and stored fields in the search response. // API name: stored_fields func (r *Submit) StoredFields(fields ...string) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.StoredFields = fields return r } // API name: suggest -func (r *Submit) Suggest(suggest *types.Suggester) *Submit { +func (r *Submit) Suggest(suggest types.SuggesterVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Suggest = suggest + r.req.Suggest = suggest.SuggesterCaster() return r } -// TerminateAfter Maximum number of documents to collect for each shard. If a query reaches +// Maximum number of documents to collect for each shard. If a query reaches // this // limit, Elasticsearch terminates the query early. Elasticsearch collects // documents @@ -916,48 +1081,71 @@ func (r *Submit) Suggest(suggest *types.Suggester) *Submit { // early. // API name: terminate_after func (r *Submit) TerminateAfter(terminateafter int64) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.TerminateAfter = &terminateafter return r } -// Timeout Specifies the period of time to wait for a response from each shard. If no +// Specifies the period of time to wait for a response from each shard. If no // response // is received before the timeout expires, the request fails and returns an // error. // Defaults to no timeout. // API name: timeout func (r *Submit) Timeout(timeout string) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Timeout = &timeout return r } -// TrackScores If true, calculate and return document scores, even if the scores are not +// If true, calculate and return document scores, even if the scores are not // used for sorting. // API name: track_scores func (r *Submit) TrackScores(trackscores bool) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TrackScores = &trackscores return r } -// TrackTotalHits Number of hits matching the query to count accurately. If true, the exact +// Number of hits matching the query to count accurately. If true, the exact // number of hits is returned at the cost of some performance. If false, the // response does not include the total number of hits matching the query. // Defaults to 10,000 hits. // API name: track_total_hits -func (r *Submit) TrackTotalHits(trackhits types.TrackHits) *Submit { - r.req.TrackTotalHits = trackhits +func (r *Submit) TrackTotalHits(trackhits types.TrackHitsVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TrackTotalHits = *trackhits.TrackHitsCaster() return r } -// Version If true, returns document version as part of a hit. +// If true, returns document version as part of a hit. // API name: version func (r *Submit) Version(version bool) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &version return r diff --git a/typedapi/autoscaling/deleteautoscalingpolicy/delete_autoscaling_policy.go b/typedapi/autoscaling/deleteautoscalingpolicy/delete_autoscaling_policy.go index 1c095ac26c..199a433186 100644 --- a/typedapi/autoscaling/deleteautoscalingpolicy/delete_autoscaling_policy.go +++ b/typedapi/autoscaling/deleteautoscalingpolicy/delete_autoscaling_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete an autoscaling policy. // @@ -86,7 +86,7 @@ func NewDeleteAutoscalingPolicyFunc(tp elastictransport.Interface) NewDeleteAuto // Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not // supported. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-delete-autoscaling-policy.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy func New(tp elastictransport.Interface) *DeleteAutoscalingPolicy { r := &DeleteAutoscalingPolicy{ transport: tp, @@ -307,6 +307,25 @@ func (r *DeleteAutoscalingPolicy) _name(name string) *DeleteAutoscalingPolicy { return r } +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *DeleteAutoscalingPolicy) MasterTimeout(duration string) *DeleteAutoscalingPolicy { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *DeleteAutoscalingPolicy) Timeout(duration string) *DeleteAutoscalingPolicy { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/autoscaling/deleteautoscalingpolicy/response.go b/typedapi/autoscaling/deleteautoscalingpolicy/response.go index 3437e0835c..34094e3db1 100644 --- a/typedapi/autoscaling/deleteautoscalingpolicy/response.go +++ b/typedapi/autoscaling/deleteautoscalingpolicy/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deleteautoscalingpolicy // Response holds the response body struct for the package deleteautoscalingpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/autoscaling/delete_autoscaling_policy/DeleteAutoscalingPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/autoscaling/delete_autoscaling_policy/DeleteAutoscalingPolicyResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/autoscaling/getautoscalingcapacity/get_autoscaling_capacity.go b/typedapi/autoscaling/getautoscalingcapacity/get_autoscaling_capacity.go index 6833c9a60d..6e6830755d 100644 --- a/typedapi/autoscaling/getautoscalingcapacity/get_autoscaling_capacity.go +++ b/typedapi/autoscaling/getautoscalingcapacity/get_autoscaling_capacity.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get the autoscaling capacity. // @@ -114,7 +114,7 @@ func NewGetAutoscalingCapacityFunc(tp elastictransport.Interface) NewGetAutoscal // This information is provided for diagnosis only. // Do not use this information to make autoscaling decisions. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-capacity.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity func New(tp elastictransport.Interface) *GetAutoscalingCapacity { r := &GetAutoscalingCapacity{ transport: tp, @@ -320,6 +320,16 @@ func (r *GetAutoscalingCapacity) Header(key, value string) *GetAutoscalingCapaci return r } +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetAutoscalingCapacity) MasterTimeout(duration string) *GetAutoscalingCapacity { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/autoscaling/getautoscalingcapacity/response.go b/typedapi/autoscaling/getautoscalingcapacity/response.go index 341b4c7d3f..deb4fb6b4a 100644 --- a/typedapi/autoscaling/getautoscalingcapacity/response.go +++ b/typedapi/autoscaling/getautoscalingcapacity/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getautoscalingcapacity @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getautoscalingcapacity // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L25-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L25-L29 type Response struct { Policies map[string]types.AutoscalingDeciders `json:"policies"` } diff --git a/typedapi/autoscaling/getautoscalingpolicy/get_autoscaling_policy.go b/typedapi/autoscaling/getautoscalingpolicy/get_autoscaling_policy.go index 0177b638fe..f7338dd6d2 100644 --- a/typedapi/autoscaling/getautoscalingpolicy/get_autoscaling_policy.go +++ b/typedapi/autoscaling/getautoscalingpolicy/get_autoscaling_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get an autoscaling policy. // @@ -86,7 +86,7 @@ func NewGetAutoscalingPolicyFunc(tp elastictransport.Interface) NewGetAutoscalin // Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not // supported. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-capacity.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity func New(tp elastictransport.Interface) *GetAutoscalingPolicy { r := &GetAutoscalingPolicy{ transport: tp, @@ -307,6 +307,16 @@ func (r *GetAutoscalingPolicy) _name(name string) *GetAutoscalingPolicy { return r } +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetAutoscalingPolicy) MasterTimeout(duration string) *GetAutoscalingPolicy { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/autoscaling/getautoscalingpolicy/response.go b/typedapi/autoscaling/getautoscalingpolicy/response.go index c0a288e8ca..135d6de60c 100644 --- a/typedapi/autoscaling/getautoscalingpolicy/response.go +++ b/typedapi/autoscaling/getautoscalingpolicy/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getautoscalingpolicy @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getautoscalingpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/autoscaling/get_autoscaling_policy/GetAutoscalingPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/autoscaling/get_autoscaling_policy/GetAutoscalingPolicyResponse.ts#L22-L24 type Response struct { // Deciders Decider settings. diff --git a/typedapi/autoscaling/putautoscalingpolicy/put_autoscaling_policy.go b/typedapi/autoscaling/putautoscalingpolicy/put_autoscaling_policy.go index 293e816b6f..ca94ae81fb 100644 --- a/typedapi/autoscaling/putautoscalingpolicy/put_autoscaling_policy.go +++ b/typedapi/autoscaling/putautoscalingpolicy/put_autoscaling_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create or update an autoscaling policy. // @@ -91,7 +91,7 @@ func NewPutAutoscalingPolicyFunc(tp elastictransport.Interface) NewPutAutoscalin // Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not // supported. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-put-autoscaling-policy.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy func New(tp elastictransport.Interface) *PutAutoscalingPolicy { r := &PutAutoscalingPolicy{ transport: tp, @@ -99,8 +99,6 @@ func New(tp elastictransport.Interface) *PutAutoscalingPolicy { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -323,6 +321,25 @@ func (r *PutAutoscalingPolicy) _name(name string) *PutAutoscalingPolicy { return r } +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *PutAutoscalingPolicy) MasterTimeout(duration string) *PutAutoscalingPolicy { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *PutAutoscalingPolicy) Timeout(duration string) *PutAutoscalingPolicy { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -367,18 +384,46 @@ func (r *PutAutoscalingPolicy) Pretty(pretty bool) *PutAutoscalingPolicy { return r } -// Deciders Decider settings. +// Decider settings. // API name: deciders func (r *PutAutoscalingPolicy) Deciders(deciders map[string]json.RawMessage) *PutAutoscalingPolicy { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Deciders = deciders + return r +} + +func (r *PutAutoscalingPolicy) AddDecider(key string, value json.RawMessage) *PutAutoscalingPolicy { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + var tmp map[string]json.RawMessage + if r.req.Deciders == nil { + r.req.Deciders = make(map[string]json.RawMessage) + } else { + tmp = r.req.Deciders + } + + tmp[key] = value + + r.req.Deciders = tmp return r } // API name: roles func (r *PutAutoscalingPolicy) Roles(roles ...string) *PutAutoscalingPolicy { - r.req.Roles = roles + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range roles { + r.req.Roles = append(r.req.Roles, v) + + } return r } diff --git a/typedapi/autoscaling/putautoscalingpolicy/request.go b/typedapi/autoscaling/putautoscalingpolicy/request.go index 791f76d267..a7a9576aa4 100644 --- a/typedapi/autoscaling/putautoscalingpolicy/request.go +++ b/typedapi/autoscaling/putautoscalingpolicy/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putautoscalingpolicy @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package putautoscalingpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/autoscaling/put_autoscaling_policy/PutAutoscalingPolicyRequest.ts#L24-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/autoscaling/put_autoscaling_policy/PutAutoscalingPolicyRequest.ts#L25-L57 type Request = types.AutoscalingPolicy // NewRequest returns a Request diff --git a/typedapi/autoscaling/putautoscalingpolicy/response.go b/typedapi/autoscaling/putautoscalingpolicy/response.go index 54d06c438f..f9d346df8f 100644 --- a/typedapi/autoscaling/putautoscalingpolicy/response.go +++ b/typedapi/autoscaling/putautoscalingpolicy/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putautoscalingpolicy // Response holds the response body struct for the package putautoscalingpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/autoscaling/put_autoscaling_policy/PutAutoscalingPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/autoscaling/put_autoscaling_policy/PutAutoscalingPolicyResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/capabilities/capabilities.go b/typedapi/capabilities/capabilities.go index e9077abad2..a2ead259d1 100644 --- a/typedapi/capabilities/capabilities.go +++ b/typedapi/capabilities/capabilities.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Checks if the specified combination of method, API, parameters, and arbitrary // capabilities are supported diff --git a/typedapi/cat/aliases/aliases.go b/typedapi/cat/aliases/aliases.go index 587437693a..6662df549f 100644 --- a/typedapi/cat/aliases/aliases.go +++ b/typedapi/cat/aliases/aliases.go @@ -16,16 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get aliases. -// Retrieves the cluster’s index aliases, including filter and routing -// information. -// The API does not return data stream aliases. // -// CAT APIs are only intended for human consumption using the command line or -// the Kibana console. They are not intended for use by applications. For -// application consumption, use the aliases API. +// Get the cluster's index aliases, including filter and routing information. +// This API does not return data stream aliases. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or the Kibana console. They are not intended for use by applications. +// For application consumption, use the aliases API. package aliases import ( @@ -83,15 +83,15 @@ func NewAliasesFunc(tp elastictransport.Interface) NewAliases { } // Get aliases. -// Retrieves the cluster’s index aliases, including filter and routing -// information. -// The API does not return data stream aliases. // -// CAT APIs are only intended for human consumption using the command line or -// the Kibana console. They are not intended for use by applications. For -// application consumption, use the aliases API. +// Get the cluster's index aliases, including filter and routing information. +// This API does not return data stream aliases. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or the Kibana console. They are not intended for use by applications. +// For application consumption, use the aliases API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-alias.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases func New(tp elastictransport.Interface) *Aliases { r := &Aliases{ transport: tp, @@ -320,8 +320,28 @@ func (r *Aliases) Name(name string) *Aliases { return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *Aliases) H(names ...string) *Aliases { + r.values.Set("h", strings.Join(names, ",")) + + return r +} + +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Aliases) S(names ...string) *Aliases { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// ExpandWildcards The type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// It supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards func (r *Aliases) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Aliases { tmp := []string{} @@ -333,6 +353,17 @@ func (r *Aliases) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildca return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicated that the request should never timeout, you can set it to `-1`. +// API name: master_timeout +func (r *Aliases) MasterTimeout(duration string) *Aliases { + r.values.Set("master_timeout", duration) + + return r +} + // Format Specifies the format to return the columnar data in, can be set to // `text`, `json`, `cbor`, `yaml`, or `smile`. // API name: format @@ -342,14 +373,6 @@ func (r *Aliases) Format(format string) *Aliases { return r } -// H List of columns to appear in the response. Supports simple wildcards. -// API name: h -func (r *Aliases) H(names ...string) *Aliases { - r.values.Set("h", strings.Join(names, ",")) - - return r -} - // Help When set to `true` will output available columns. This option // can't be combined with any other query string option. // API name: help @@ -359,35 +382,6 @@ func (r *Aliases) Help(help bool) *Aliases { return r } -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Aliases) Local(local bool) *Aliases { - r.values.Set("local", strconv.FormatBool(local)) - - return r -} - -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Aliases) MasterTimeout(duration string) *Aliases { - r.values.Set("master_timeout", duration) - - return r -} - -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Aliases) S(names ...string) *Aliases { - r.values.Set("s", strings.Join(names, ",")) - - return r -} - // V When set to `true` will enable verbose output. // API name: v func (r *Aliases) V(v bool) *Aliases { diff --git a/typedapi/cat/aliases/response.go b/typedapi/cat/aliases/response.go index 91a85f102a..e79d2bcd00 100644 --- a/typedapi/cat/aliases/response.go +++ b/typedapi/cat/aliases/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package aliases @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package aliases // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/aliases/CatAliasesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/aliases/CatAliasesResponse.ts#L22-L24 type Response []types.AliasesRecord diff --git a/typedapi/cat/allocation/allocation.go b/typedapi/cat/allocation/allocation.go index 085e5965f8..1a7c738e40 100644 --- a/typedapi/cat/allocation/allocation.go +++ b/typedapi/cat/allocation/allocation.go @@ -16,11 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Provides a snapshot of the number of shards allocated to each data node and -// their disk space. -// IMPORTANT: cat APIs are only intended for human consumption using the command +// Get shard allocation information. +// +// Get a snapshot of the number of shards allocated to each data node and their +// disk space. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. package allocation @@ -78,12 +81,15 @@ func NewAllocationFunc(tp elastictransport.Interface) NewAllocation { } } -// Provides a snapshot of the number of shards allocated to each data node and -// their disk space. -// IMPORTANT: cat APIs are only intended for human consumption using the command +// Get shard allocation information. +// +// Get a snapshot of the number of shards allocated to each data node and their +// disk space. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-allocation.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation func New(tp elastictransport.Interface) *Allocation { r := &Allocation{ transport: tp, @@ -302,8 +308,8 @@ func (r *Allocation) Header(key, value string) *Allocation { return r } -// NodeId Comma-separated list of node identifiers or names used to limit the returned -// information. +// NodeId A comma-separated list of node identifiers or names used to limit the +// returned information. // API Name: nodeid func (r *Allocation) NodeId(nodeid string) *Allocation { r.paramSet |= nodeidMask @@ -320,15 +326,6 @@ func (r *Allocation) Bytes(bytes bytes.Bytes) *Allocation { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Allocation) Format(format string) *Allocation { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Allocation) H(names ...string) *Allocation { @@ -337,11 +334,12 @@ func (r *Allocation) H(names ...string) *Allocation { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Allocation) Help(help bool) *Allocation { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Allocation) S(names ...string) *Allocation { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -365,12 +363,20 @@ func (r *Allocation) MasterTimeout(duration string) *Allocation { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Allocation) S(names ...string) *Allocation { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Allocation) Format(format string) *Allocation { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Allocation) Help(help bool) *Allocation { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/allocation/response.go b/typedapi/cat/allocation/response.go index 010cb0bbaf..73cba10324 100644 --- a/typedapi/cat/allocation/response.go +++ b/typedapi/cat/allocation/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package allocation @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package allocation // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/allocation/CatAllocationResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/allocation/CatAllocationResponse.ts#L22-L24 type Response []types.AllocationRecord diff --git a/typedapi/cat/componenttemplates/component_templates.go b/typedapi/cat/componenttemplates/component_templates.go index bd30313a8e..b858c6321e 100644 --- a/typedapi/cat/componenttemplates/component_templates.go +++ b/typedapi/cat/componenttemplates/component_templates.go @@ -16,15 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get component templates. -// Returns information about component templates in a cluster. +// +// Get information about component templates in a cluster. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. // -// CAT APIs are only intended for human consumption using the command line or -// Kibana console. +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. // They are not intended for use by applications. For application consumption, // use the get component template API. package componenttemplates @@ -83,16 +84,17 @@ func NewComponentTemplatesFunc(tp elastictransport.Interface) NewComponentTempla } // Get component templates. -// Returns information about component templates in a cluster. +// +// Get information about component templates in a cluster. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. // -// CAT APIs are only intended for human consumption using the command line or -// Kibana console. +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. // They are not intended for use by applications. For application consumption, // use the get component template API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-component-templates.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates func New(tp elastictransport.Interface) *ComponentTemplates { r := &ComponentTemplates{ transport: tp, @@ -311,8 +313,9 @@ func (r *ComponentTemplates) Header(key, value string) *ComponentTemplates { return r } -// Name The name of the component template. Accepts wildcard expressions. If omitted, -// all component templates are returned. +// Name The name of the component template. +// It accepts wildcard expressions. +// If it is omitted, all component templates are returned. // API Name: name func (r *ComponentTemplates) Name(name string) *ComponentTemplates { r.paramSet |= nameMask @@ -321,15 +324,6 @@ func (r *ComponentTemplates) Name(name string) *ComponentTemplates { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *ComponentTemplates) Format(format string) *ComponentTemplates { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *ComponentTemplates) H(names ...string) *ComponentTemplates { @@ -338,11 +332,12 @@ func (r *ComponentTemplates) H(names ...string) *ComponentTemplates { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *ComponentTemplates) Help(help bool) *ComponentTemplates { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *ComponentTemplates) S(names ...string) *ComponentTemplates { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -358,7 +353,7 @@ func (r *ComponentTemplates) Local(local bool) *ComponentTemplates { return r } -// MasterTimeout Period to wait for a connection to the master node. +// MasterTimeout The period to wait for a connection to the master node. // API name: master_timeout func (r *ComponentTemplates) MasterTimeout(duration string) *ComponentTemplates { r.values.Set("master_timeout", duration) @@ -366,12 +361,20 @@ func (r *ComponentTemplates) MasterTimeout(duration string) *ComponentTemplates return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *ComponentTemplates) S(names ...string) *ComponentTemplates { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *ComponentTemplates) Format(format string) *ComponentTemplates { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *ComponentTemplates) Help(help bool) *ComponentTemplates { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/componenttemplates/response.go b/typedapi/cat/componenttemplates/response.go index 0cdc248784..5dba8fccc6 100644 --- a/typedapi/cat/componenttemplates/response.go +++ b/typedapi/cat/componenttemplates/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package componenttemplates @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package componenttemplates // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/component_templates/CatComponentTemplatesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/component_templates/CatComponentTemplatesResponse.ts#L22-L24 type Response []types.CatComponentTemplate diff --git a/typedapi/cat/count/count.go b/typedapi/cat/count/count.go index 1ecddac3b6..75f16ee128 100644 --- a/typedapi/cat/count/count.go +++ b/typedapi/cat/count/count.go @@ -16,16 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get a document count. -// Provides quick access to a document count for a data stream, an index, or an +// +// Get quick access to a document count for a data stream, an index, or an // entire cluster. // The document count only includes live documents, not deleted documents which // have not yet been removed by the merge process. // -// CAT APIs are only intended for human consumption using the command line or -// Kibana console. +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. // They are not intended for use by applications. For application consumption, // use the count API. package count @@ -84,17 +85,18 @@ func NewCountFunc(tp elastictransport.Interface) NewCount { } // Get a document count. -// Provides quick access to a document count for a data stream, an index, or an +// +// Get quick access to a document count for a data stream, an index, or an // entire cluster. // The document count only includes live documents, not deleted documents which // have not yet been removed by the merge process. // -// CAT APIs are only intended for human consumption using the command line or -// Kibana console. +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. // They are not intended for use by applications. For application consumption, // use the count API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-count.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count func New(tp elastictransport.Interface) *Count { r := &Count{ transport: tp, @@ -313,10 +315,11 @@ func (r *Count) Header(key, value string) *Count { return r } -// Index Comma-separated list of data streams, indices, and aliases used to limit the -// request. -// Supports wildcards (`*`). To target all data streams and indices, omit this -// parameter or use `*` or `_all`. +// Index A comma-separated list of data streams, indices, and aliases used to limit +// the request. +// It supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index func (r *Count) Index(index string) *Count { r.paramSet |= indexMask @@ -325,15 +328,6 @@ func (r *Count) Index(index string) *Count { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Count) Format(format string) *Count { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Count) H(names ...string) *Count { @@ -342,40 +336,30 @@ func (r *Count) H(names ...string) *Count { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Count) Help(help bool) *Count { - r.values.Set("help", strconv.FormatBool(help)) - - return r -} - -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Count) Local(local bool) *Count { - r.values.Set("local", strconv.FormatBool(local)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Count) S(names ...string) *Count { + r.values.Set("s", strings.Join(names, ",")) return r } -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Count) MasterTimeout(duration string) *Count { - r.values.Set("master_timeout", duration) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Count) Format(format string) *Count { + r.values.Set("format", format) return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Count) S(names ...string) *Count { - r.values.Set("s", strings.Join(names, ",")) +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Count) Help(help bool) *Count { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/count/response.go b/typedapi/cat/count/response.go index 43ca8447ad..46468ebb40 100644 --- a/typedapi/cat/count/response.go +++ b/typedapi/cat/count/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package count @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package count // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/count/CatCountResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/count/CatCountResponse.ts#L22-L24 type Response []types.CountRecord diff --git a/typedapi/cat/fielddata/fielddata.go b/typedapi/cat/fielddata/fielddata.go index 42cbed9774..cd8fe41370 100644 --- a/typedapi/cat/fielddata/fielddata.go +++ b/typedapi/cat/fielddata/fielddata.go @@ -16,10 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns the amount of heap memory currently used by the field data cache on -// every data node in the cluster. +// Get field data cache information. +// +// Get the amount of heap memory currently used by the field data cache on every +// data node in the cluster. +// // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. // They are not intended for use by applications. For application consumption, @@ -80,14 +83,17 @@ func NewFielddataFunc(tp elastictransport.Interface) NewFielddata { } } -// Returns the amount of heap memory currently used by the field data cache on -// every data node in the cluster. +// Get field data cache information. +// +// Get the amount of heap memory currently used by the field data cache on every +// data node in the cluster. +// // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. // They are not intended for use by applications. For application consumption, // use the nodes stats API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-fielddata.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata func New(tp elastictransport.Interface) *Fielddata { r := &Fielddata{ transport: tp, @@ -324,15 +330,6 @@ func (r *Fielddata) Bytes(bytes bytes.Bytes) *Fielddata { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Fielddata) Format(format string) *Fielddata { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Fielddata) H(names ...string) *Fielddata { @@ -341,40 +338,30 @@ func (r *Fielddata) H(names ...string) *Fielddata { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Fielddata) Help(help bool) *Fielddata { - r.values.Set("help", strconv.FormatBool(help)) - - return r -} - -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Fielddata) Local(local bool) *Fielddata { - r.values.Set("local", strconv.FormatBool(local)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Fielddata) S(names ...string) *Fielddata { + r.values.Set("s", strings.Join(names, ",")) return r } -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Fielddata) MasterTimeout(duration string) *Fielddata { - r.values.Set("master_timeout", duration) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Fielddata) Format(format string) *Fielddata { + r.values.Set("format", format) return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Fielddata) S(names ...string) *Fielddata { - r.values.Set("s", strings.Join(names, ",")) +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Fielddata) Help(help bool) *Fielddata { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/fielddata/response.go b/typedapi/cat/fielddata/response.go index a626e56c6c..a7358cfbce 100644 --- a/typedapi/cat/fielddata/response.go +++ b/typedapi/cat/fielddata/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package fielddata @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package fielddata // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/fielddata/CatFielddataResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/fielddata/CatFielddataResponse.ts#L22-L24 type Response []types.FielddataRecord diff --git a/typedapi/cat/health/health.go b/typedapi/cat/health/health.go index c216951d5f..2b993444b8 100644 --- a/typedapi/cat/health/health.go +++ b/typedapi/cat/health/health.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns the health status of a cluster, similar to the cluster health API. -// IMPORTANT: cat APIs are only intended for human consumption using the command +// Get the cluster health status. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command // line or Kibana console. // They are not intended for use by applications. For application consumption, // use the cluster health API. @@ -83,8 +84,9 @@ func NewHealthFunc(tp elastictransport.Interface) NewHealth { } } -// Returns the health status of a cluster, similar to the cluster health API. -// IMPORTANT: cat APIs are only intended for human consumption using the command +// Get the cluster health status. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command // line or Kibana console. // They are not intended for use by applications. For application consumption, // use the cluster health API. @@ -99,7 +101,7 @@ func NewHealthFunc(tp elastictransport.Interface) NewHealth { // You also can use the API to track the recovery of a large cluster over a // longer period of time. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-health.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health func New(tp elastictransport.Interface) *Health { r := &Health{ transport: tp, @@ -321,15 +323,6 @@ func (r *Health) Ts(ts bool) *Health { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Health) Format(format string) *Health { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Health) H(names ...string) *Health { @@ -338,40 +331,30 @@ func (r *Health) H(names ...string) *Health { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Health) Help(help bool) *Health { - r.values.Set("help", strconv.FormatBool(help)) - - return r -} - -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Health) Local(local bool) *Health { - r.values.Set("local", strconv.FormatBool(local)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Health) S(names ...string) *Health { + r.values.Set("s", strings.Join(names, ",")) return r } -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Health) MasterTimeout(duration string) *Health { - r.values.Set("master_timeout", duration) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Health) Format(format string) *Health { + r.values.Set("format", format) return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Health) S(names ...string) *Health { - r.values.Set("s", strings.Join(names, ",")) +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Health) Help(help bool) *Health { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/health/response.go b/typedapi/cat/health/response.go index 2fe1b3b08d..d05b4609ac 100644 --- a/typedapi/cat/health/response.go +++ b/typedapi/cat/health/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package health @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package health // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/health/CatHealthResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/health/CatHealthResponse.ts#L22-L24 type Response []types.HealthRecord diff --git a/typedapi/cat/help/help.go b/typedapi/cat/help/help.go index 51b870a540..a9f670cb94 100644 --- a/typedapi/cat/help/help.go +++ b/typedapi/cat/help/help.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get CAT help. -// Returns help for the CAT APIs. +// +// Get help for the CAT APIs. package help import ( @@ -30,7 +31,6 @@ import ( "io" "net/http" "net/url" - "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" @@ -70,9 +70,10 @@ func NewHelpFunc(tp elastictransport.Interface) NewHelp { } // Get CAT help. -// Returns help for the CAT APIs. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat.html +// Get help for the CAT APIs. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat func New(tp elastictransport.Interface) *Help { r := &Help{ transport: tp, @@ -177,7 +178,7 @@ func (r Help) Perform(providedCtx context.Context) (*http.Response, error) { } // Do runs the request through the transport, handle the response and returns a help.Response -func (r Help) Do(providedCtx context.Context) (Response, error) { +func (r Help) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -200,7 +201,7 @@ func (r Help) Do(providedCtx context.Context) (Response, error) { defer res.Body.Close() if res.StatusCode < 299 { - err = json.NewDecoder(res.Body).Decode(&response) + err = json.NewDecoder(res.Body).Decode(response) if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) @@ -275,110 +276,3 @@ func (r *Help) Header(key, value string) *Help { return r } - -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Help) Format(format string) *Help { - r.values.Set("format", format) - - return r -} - -// H List of columns to appear in the response. Supports simple wildcards. -// API name: h -func (r *Help) H(names ...string) *Help { - r.values.Set("h", strings.Join(names, ",")) - - return r -} - -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Help) Help(help bool) *Help { - r.values.Set("help", strconv.FormatBool(help)) - - return r -} - -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Help) Local(local bool) *Help { - r.values.Set("local", strconv.FormatBool(local)) - - return r -} - -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Help) MasterTimeout(duration string) *Help { - r.values.Set("master_timeout", duration) - - return r -} - -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Help) S(names ...string) *Help { - r.values.Set("s", strings.Join(names, ",")) - - return r -} - -// V When set to `true` will enable verbose output. -// API name: v -func (r *Help) V(v bool) *Help { - r.values.Set("v", strconv.FormatBool(v)) - - return r -} - -// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors -// when they occur. -// API name: error_trace -func (r *Help) ErrorTrace(errortrace bool) *Help { - r.values.Set("error_trace", strconv.FormatBool(errortrace)) - - return r -} - -// FilterPath Comma-separated list of filters in dot notation which reduce the response -// returned by Elasticsearch. -// API name: filter_path -func (r *Help) FilterPath(filterpaths ...string) *Help { - tmp := []string{} - for _, item := range filterpaths { - tmp = append(tmp, fmt.Sprintf("%v", item)) - } - r.values.Set("filter_path", strings.Join(tmp, ",")) - - return r -} - -// Human When set to `true` will return statistics in a format suitable for humans. -// For example `"exists_time": "1h"` for humans and -// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human -// readable values will be omitted. This makes sense for responses being -// consumed -// only by machines. -// API name: human -func (r *Help) Human(human bool) *Help { - r.values.Set("human", strconv.FormatBool(human)) - - return r -} - -// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use -// this option for debugging only. -// API name: pretty -func (r *Help) Pretty(pretty bool) *Help { - r.values.Set("pretty", strconv.FormatBool(pretty)) - - return r -} diff --git a/typedapi/cat/help/response.go b/typedapi/cat/help/response.go index 33a815c47b..cfa43905b4 100644 --- a/typedapi/cat/help/response.go +++ b/typedapi/cat/help/response.go @@ -16,22 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package help -import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types" -) - // Response holds the response body struct for the package help // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/help/CatHelpResponse.ts#L22-L24 - -type Response []types.HelpRecord +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/help/CatHelpResponse.ts#L20-L25 +type Response struct { +} // NewResponse returns a Response -func NewResponse() Response { - r := Response{} +func NewResponse() *Response { + r := &Response{} return r } diff --git a/typedapi/cat/indices/indices.go b/typedapi/cat/indices/indices.go index 1b6a8d5a8f..9675568991 100644 --- a/typedapi/cat/indices/indices.go +++ b/typedapi/cat/indices/indices.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get index information. -// Returns high-level information about indices in a cluster, including backing +// +// Get high-level information about indices in a cluster, including backing // indices for data streams. // // Use this request to get the following information for each index in a @@ -100,7 +101,8 @@ func NewIndicesFunc(tp elastictransport.Interface) NewIndices { } // Get index information. -// Returns high-level information about indices in a cluster, including backing +// +// Get high-level information about indices in a cluster, including backing // indices for data streams. // // Use this request to get the following information for each index in a @@ -122,7 +124,7 @@ func NewIndicesFunc(tp elastictransport.Interface) NewIndices { // They are not intended for use by applications. For application consumption, // use an index endpoint. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-indices.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices func New(tp elastictransport.Interface) *Indices { r := &Indices{ transport: tp, @@ -407,11 +409,10 @@ func (r *Indices) Time(time timeunit.TimeUnit) *Indices { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Indices) Format(format string) *Indices { - r.values.Set("format", format) +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Indices) MasterTimeout(duration string) *Indices { + r.values.Set("master_timeout", duration) return r } @@ -424,40 +425,30 @@ func (r *Indices) H(names ...string) *Indices { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Indices) Help(help bool) *Indices { - r.values.Set("help", strconv.FormatBool(help)) - - return r -} - -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Indices) Local(local bool) *Indices { - r.values.Set("local", strconv.FormatBool(local)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Indices) S(names ...string) *Indices { + r.values.Set("s", strings.Join(names, ",")) return r } -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Indices) MasterTimeout(duration string) *Indices { - r.values.Set("master_timeout", duration) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Indices) Format(format string) *Indices { + r.values.Set("format", format) return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Indices) S(names ...string) *Indices { - r.values.Set("s", strings.Join(names, ",")) +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Indices) Help(help bool) *Indices { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/indices/response.go b/typedapi/cat/indices/response.go index e85eb2a840..f3d856c506 100644 --- a/typedapi/cat/indices/response.go +++ b/typedapi/cat/indices/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package indices @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package indices // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/indices/CatIndicesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/indices/CatIndicesResponse.ts#L22-L24 type Response []types.IndicesRecord diff --git a/typedapi/cat/master/master.go b/typedapi/cat/master/master.go index 61ab1a9726..9d28f7d7f2 100644 --- a/typedapi/cat/master/master.go +++ b/typedapi/cat/master/master.go @@ -16,10 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information about the master node, including the ID, bound IP -// address, and name. +// Get master node information. +// +// Get information about the master node, including the ID, bound IP address, +// and name. +// // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. @@ -72,13 +75,16 @@ func NewMasterFunc(tp elastictransport.Interface) NewMaster { } } -// Returns information about the master node, including the ID, bound IP -// address, and name. +// Get master node information. +// +// Get information about the master node, including the ID, bound IP address, +// and name. +// // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-master.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master func New(tp elastictransport.Interface) *Master { r := &Master{ transport: tp, @@ -284,15 +290,6 @@ func (r *Master) Header(key, value string) *Master { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Master) Format(format string) *Master { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Master) H(names ...string) *Master { @@ -301,11 +298,12 @@ func (r *Master) H(names ...string) *Master { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Master) Help(help bool) *Master { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Master) S(names ...string) *Master { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -329,12 +327,20 @@ func (r *Master) MasterTimeout(duration string) *Master { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Master) S(names ...string) *Master { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Master) Format(format string) *Master { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Master) Help(help bool) *Master { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/master/response.go b/typedapi/cat/master/response.go index 66b2fbcd58..d278d707e3 100644 --- a/typedapi/cat/master/response.go +++ b/typedapi/cat/master/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package master @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package master // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/master/CatMasterResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/master/CatMasterResponse.ts#L22-L24 type Response []types.MasterRecord diff --git a/typedapi/cat/mldatafeeds/ml_datafeeds.go b/typedapi/cat/mldatafeeds/ml_datafeeds.go index 13dbfa122b..899d31df8c 100644 --- a/typedapi/cat/mldatafeeds/ml_datafeeds.go +++ b/typedapi/cat/mldatafeeds/ml_datafeeds.go @@ -16,16 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get datafeeds. -// Returns configuration and usage information about datafeeds. +// +// Get configuration and usage information about datafeeds. // This API returns a maximum of 10,000 datafeeds. // If the Elasticsearch security features are enabled, you must have // `monitor_ml`, `monitor`, `manage_ml`, or `manage` // cluster privileges to use this API. // -// CAT APIs are only intended for human consumption using the Kibana +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get datafeed statistics API. package mldatafeeds @@ -86,17 +87,18 @@ func NewMlDatafeedsFunc(tp elastictransport.Interface) NewMlDatafeeds { } // Get datafeeds. -// Returns configuration and usage information about datafeeds. +// +// Get configuration and usage information about datafeeds. // This API returns a maximum of 10,000 datafeeds. // If the Elasticsearch security features are enabled, you must have // `monitor_ml`, `monitor`, `manage_ml`, or `manage` // cluster privileges to use this API. // -// CAT APIs are only intended for human consumption using the Kibana +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get datafeed statistics API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-datafeeds.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds func New(tp elastictransport.Interface) *MlDatafeeds { r := &MlDatafeeds{ transport: tp, @@ -397,25 +399,6 @@ func (r *MlDatafeeds) Help(help bool) *MlDatafeeds { return r } -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *MlDatafeeds) Local(local bool) *MlDatafeeds { - r.values.Set("local", strconv.FormatBool(local)) - - return r -} - -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *MlDatafeeds) MasterTimeout(duration string) *MlDatafeeds { - r.values.Set("master_timeout", duration) - - return r -} - // V When set to `true` will enable verbose output. // API name: v func (r *MlDatafeeds) V(v bool) *MlDatafeeds { diff --git a/typedapi/cat/mldatafeeds/response.go b/typedapi/cat/mldatafeeds/response.go index 2f79a08f24..759ccedf37 100644 --- a/typedapi/cat/mldatafeeds/response.go +++ b/typedapi/cat/mldatafeeds/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package mldatafeeds @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mldatafeeds // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/ml_datafeeds/CatDatafeedsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/ml_datafeeds/CatDatafeedsResponse.ts#L22-L24 type Response []types.DatafeedsRecord diff --git a/typedapi/cat/mldataframeanalytics/ml_data_frame_analytics.go b/typedapi/cat/mldataframeanalytics/ml_data_frame_analytics.go index e677e86945..f2cef2c2c9 100644 --- a/typedapi/cat/mldataframeanalytics/ml_data_frame_analytics.go +++ b/typedapi/cat/mldataframeanalytics/ml_data_frame_analytics.go @@ -16,12 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get data frame analytics jobs. -// Returns configuration and usage information about data frame analytics jobs. // -// CAT APIs are only intended for human consumption using the Kibana +// Get configuration and usage information about data frame analytics jobs. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get data frame analytics jobs statistics // API. @@ -42,6 +43,7 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catdfacolumn" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) const ( @@ -83,14 +85,15 @@ func NewMlDataFrameAnalyticsFunc(tp elastictransport.Interface) NewMlDataFrameAn } // Get data frame analytics jobs. -// Returns configuration and usage information about data frame analytics jobs. // -// CAT APIs are only intended for human consumption using the Kibana +// Get configuration and usage information about data frame analytics jobs. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get data frame analytics jobs statistics // API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-dfanalytics.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics func New(tp elastictransport.Interface) *MlDataFrameAnalytics { r := &MlDataFrameAnalytics{ transport: tp, @@ -370,8 +373,8 @@ func (r *MlDataFrameAnalytics) S(catdfacolumns ...catdfacolumn.CatDfaColumn) *Ml // Time Unit used to display time values. // API name: time -func (r *MlDataFrameAnalytics) Time(duration string) *MlDataFrameAnalytics { - r.values.Set("time", duration) +func (r *MlDataFrameAnalytics) Time(time timeunit.TimeUnit) *MlDataFrameAnalytics { + r.values.Set("time", time.String()) return r } @@ -394,25 +397,6 @@ func (r *MlDataFrameAnalytics) Help(help bool) *MlDataFrameAnalytics { return r } -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *MlDataFrameAnalytics) Local(local bool) *MlDataFrameAnalytics { - r.values.Set("local", strconv.FormatBool(local)) - - return r -} - -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *MlDataFrameAnalytics) MasterTimeout(duration string) *MlDataFrameAnalytics { - r.values.Set("master_timeout", duration) - - return r -} - // V When set to `true` will enable verbose output. // API name: v func (r *MlDataFrameAnalytics) V(v bool) *MlDataFrameAnalytics { diff --git a/typedapi/cat/mldataframeanalytics/response.go b/typedapi/cat/mldataframeanalytics/response.go index bdbdc2ab9a..394dbc1866 100644 --- a/typedapi/cat/mldataframeanalytics/response.go +++ b/typedapi/cat/mldataframeanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package mldataframeanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mldataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/ml_data_frame_analytics/CatDataFrameAnalyticsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/ml_data_frame_analytics/CatDataFrameAnalyticsResponse.ts#L22-L24 type Response []types.DataFrameAnalyticsRecord diff --git a/typedapi/cat/mljobs/ml_jobs.go b/typedapi/cat/mljobs/ml_jobs.go index f39d8f85ae..b92ff79791 100644 --- a/typedapi/cat/mljobs/ml_jobs.go +++ b/typedapi/cat/mljobs/ml_jobs.go @@ -16,16 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get anomaly detection jobs. -// Returns configuration and usage information for anomaly detection jobs. +// +// Get configuration and usage information for anomaly detection jobs. // This API returns a maximum of 10,000 jobs. // If the Elasticsearch security features are enabled, you must have // `monitor_ml`, // `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. // -// CAT APIs are only intended for human consumption using the Kibana +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get anomaly detection job statistics API. package mljobs @@ -87,17 +88,18 @@ func NewMlJobsFunc(tp elastictransport.Interface) NewMlJobs { } // Get anomaly detection jobs. -// Returns configuration and usage information for anomaly detection jobs. +// +// Get configuration and usage information for anomaly detection jobs. // This API returns a maximum of 10,000 jobs. // If the Elasticsearch security features are enabled, you must have // `monitor_ml`, // `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. // -// CAT APIs are only intended for human consumption using the Kibana +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get anomaly detection job statistics API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-anomaly-detectors.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs func New(tp elastictransport.Interface) *MlJobs { r := &MlJobs{ transport: tp, @@ -406,25 +408,6 @@ func (r *MlJobs) Help(help bool) *MlJobs { return r } -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *MlJobs) Local(local bool) *MlJobs { - r.values.Set("local", strconv.FormatBool(local)) - - return r -} - -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *MlJobs) MasterTimeout(duration string) *MlJobs { - r.values.Set("master_timeout", duration) - - return r -} - // V When set to `true` will enable verbose output. // API name: v func (r *MlJobs) V(v bool) *MlJobs { diff --git a/typedapi/cat/mljobs/response.go b/typedapi/cat/mljobs/response.go index 1756dd6e98..62262d7189 100644 --- a/typedapi/cat/mljobs/response.go +++ b/typedapi/cat/mljobs/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package mljobs @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mljobs // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/ml_jobs/CatJobsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/ml_jobs/CatJobsResponse.ts#L22-L24 type Response []types.JobsRecord diff --git a/typedapi/cat/mltrainedmodels/ml_trained_models.go b/typedapi/cat/mltrainedmodels/ml_trained_models.go index 30dcd32b5f..6c8cde9d4b 100644 --- a/typedapi/cat/mltrainedmodels/ml_trained_models.go +++ b/typedapi/cat/mltrainedmodels/ml_trained_models.go @@ -16,12 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get trained models. -// Returns configuration and usage information about inference trained models. // -// CAT APIs are only intended for human consumption using the Kibana +// Get configuration and usage information about inference trained models. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get trained models statistics API. package mltrainedmodels @@ -41,6 +42,7 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cattrainedmodelscolumn" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) const ( @@ -82,13 +84,14 @@ func NewMlTrainedModelsFunc(tp elastictransport.Interface) NewMlTrainedModels { } // Get trained models. -// Returns configuration and usage information about inference trained models. // -// CAT APIs are only intended for human consumption using the Kibana +// Get configuration and usage information about inference trained models. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get trained models statistics API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-trained-model.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models func New(tp elastictransport.Interface) *MlTrainedModels { r := &MlTrainedModels{ transport: tp, @@ -383,6 +386,14 @@ func (r *MlTrainedModels) Size(size int) *MlTrainedModels { return r } +// Time Unit used to display time values. +// API name: time +func (r *MlTrainedModels) Time(time timeunit.TimeUnit) *MlTrainedModels { + r.values.Set("time", time.String()) + + return r +} + // Format Specifies the format to return the columnar data in, can be set to // `text`, `json`, `cbor`, `yaml`, or `smile`. // API name: format @@ -401,25 +412,6 @@ func (r *MlTrainedModels) Help(help bool) *MlTrainedModels { return r } -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *MlTrainedModels) Local(local bool) *MlTrainedModels { - r.values.Set("local", strconv.FormatBool(local)) - - return r -} - -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *MlTrainedModels) MasterTimeout(duration string) *MlTrainedModels { - r.values.Set("master_timeout", duration) - - return r -} - // V When set to `true` will enable verbose output. // API name: v func (r *MlTrainedModels) V(v bool) *MlTrainedModels { diff --git a/typedapi/cat/mltrainedmodels/response.go b/typedapi/cat/mltrainedmodels/response.go index dff931ed40..d9aece1ea9 100644 --- a/typedapi/cat/mltrainedmodels/response.go +++ b/typedapi/cat/mltrainedmodels/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package mltrainedmodels @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mltrainedmodels // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/ml_trained_models/CatTrainedModelsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/ml_trained_models/CatTrainedModelsResponse.ts#L22-L24 type Response []types.TrainedModelsRecord diff --git a/typedapi/cat/nodeattrs/nodeattrs.go b/typedapi/cat/nodeattrs/nodeattrs.go index 9d25ba4b17..c849c93f3a 100644 --- a/typedapi/cat/nodeattrs/nodeattrs.go +++ b/typedapi/cat/nodeattrs/nodeattrs.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information about custom node attributes. +// Get node attribute information. +// +// Get information about custom node attributes. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. @@ -71,12 +73,14 @@ func NewNodeattrsFunc(tp elastictransport.Interface) NewNodeattrs { } } -// Returns information about custom node attributes. +// Get node attribute information. +// +// Get information about custom node attributes. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodeattrs.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs func New(tp elastictransport.Interface) *Nodeattrs { r := &Nodeattrs{ transport: tp, @@ -282,15 +286,6 @@ func (r *Nodeattrs) Header(key, value string) *Nodeattrs { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Nodeattrs) Format(format string) *Nodeattrs { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Nodeattrs) H(names ...string) *Nodeattrs { @@ -299,11 +294,12 @@ func (r *Nodeattrs) H(names ...string) *Nodeattrs { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Nodeattrs) Help(help bool) *Nodeattrs { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Nodeattrs) S(names ...string) *Nodeattrs { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -327,12 +323,20 @@ func (r *Nodeattrs) MasterTimeout(duration string) *Nodeattrs { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Nodeattrs) S(names ...string) *Nodeattrs { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Nodeattrs) Format(format string) *Nodeattrs { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Nodeattrs) Help(help bool) *Nodeattrs { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/nodeattrs/response.go b/typedapi/cat/nodeattrs/response.go index ad63beba46..f60a4f92e7 100644 --- a/typedapi/cat/nodeattrs/response.go +++ b/typedapi/cat/nodeattrs/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package nodeattrs @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package nodeattrs // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/nodeattrs/CatNodeAttributesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/nodeattrs/CatNodeAttributesResponse.ts#L22-L24 type Response []types.NodeAttributesRecord diff --git a/typedapi/cat/nodes/nodes.go b/typedapi/cat/nodes/nodes.go index 86b6a11d17..b6474e95c3 100644 --- a/typedapi/cat/nodes/nodes.go +++ b/typedapi/cat/nodes/nodes.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information about the nodes in a cluster. +// Get node information. +// +// Get information about the nodes in a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. @@ -38,6 +40,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -72,12 +75,14 @@ func NewNodesFunc(tp elastictransport.Interface) NewNodes { } } -// Returns information about the nodes in a cluster. +// Get node information. +// +// Get information about the nodes in a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodes.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes func New(tp elastictransport.Interface) *Nodes { r := &Nodes{ transport: tp, @@ -308,15 +313,6 @@ func (r *Nodes) IncludeUnloadedSegments(includeunloadedsegments bool) *Nodes { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Nodes) Format(format string) *Nodes { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Nodes) H(names ...string) *Nodes { @@ -325,22 +321,12 @@ func (r *Nodes) H(names ...string) *Nodes { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Nodes) Help(help bool) *Nodes { - r.values.Set("help", strconv.FormatBool(help)) - - return r -} - -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Nodes) Local(local bool) *Nodes { - r.values.Set("local", strconv.FormatBool(local)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Nodes) S(names ...string) *Nodes { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -353,12 +339,28 @@ func (r *Nodes) MasterTimeout(duration string) *Nodes { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Nodes) S(names ...string) *Nodes { - r.values.Set("s", strings.Join(names, ",")) +// Time Unit used to display time values. +// API name: time +func (r *Nodes) Time(time timeunit.TimeUnit) *Nodes { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Nodes) Format(format string) *Nodes { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Nodes) Help(help bool) *Nodes { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/nodes/response.go b/typedapi/cat/nodes/response.go index 27738767e8..35ab5b5f52 100644 --- a/typedapi/cat/nodes/response.go +++ b/typedapi/cat/nodes/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package nodes @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package nodes // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/nodes/CatNodesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/nodes/CatNodesResponse.ts#L22-L24 type Response []types.NodesRecord diff --git a/typedapi/cat/pendingtasks/pending_tasks.go b/typedapi/cat/pendingtasks/pending_tasks.go index b0b4808dfe..70cb2548c8 100644 --- a/typedapi/cat/pendingtasks/pending_tasks.go +++ b/typedapi/cat/pendingtasks/pending_tasks.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns cluster-level changes that have not yet been executed. +// Get pending task information. +// +// Get information about cluster-level changes that have not yet taken effect. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the pending cluster tasks API. @@ -37,6 +39,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -71,12 +74,14 @@ func NewPendingTasksFunc(tp elastictransport.Interface) NewPendingTasks { } } -// Returns cluster-level changes that have not yet been executed. +// Get pending task information. +// +// Get information about cluster-level changes that have not yet taken effect. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the pending cluster tasks API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-pending-tasks.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks func New(tp elastictransport.Interface) *PendingTasks { r := &PendingTasks{ transport: tp, @@ -282,15 +287,6 @@ func (r *PendingTasks) Header(key, value string) *PendingTasks { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *PendingTasks) Format(format string) *PendingTasks { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *PendingTasks) H(names ...string) *PendingTasks { @@ -299,11 +295,12 @@ func (r *PendingTasks) H(names ...string) *PendingTasks { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *PendingTasks) Help(help bool) *PendingTasks { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *PendingTasks) S(names ...string) *PendingTasks { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -327,12 +324,28 @@ func (r *PendingTasks) MasterTimeout(duration string) *PendingTasks { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *PendingTasks) S(names ...string) *PendingTasks { - r.values.Set("s", strings.Join(names, ",")) +// Time Unit used to display time values. +// API name: time +func (r *PendingTasks) Time(time timeunit.TimeUnit) *PendingTasks { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *PendingTasks) Format(format string) *PendingTasks { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *PendingTasks) Help(help bool) *PendingTasks { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/pendingtasks/response.go b/typedapi/cat/pendingtasks/response.go index 26e227b5d4..9615178af7 100644 --- a/typedapi/cat/pendingtasks/response.go +++ b/typedapi/cat/pendingtasks/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package pendingtasks @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package pendingtasks // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/pending_tasks/CatPendingTasksResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/pending_tasks/CatPendingTasksResponse.ts#L22-L24 type Response []types.PendingTasksRecord diff --git a/typedapi/cat/plugins/plugins.go b/typedapi/cat/plugins/plugins.go index fe4eebb718..802a7e5212 100644 --- a/typedapi/cat/plugins/plugins.go +++ b/typedapi/cat/plugins/plugins.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns a list of plugins running on each node of a cluster. +// Get plugin information. +// +// Get a list of plugins running on each node of a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. @@ -71,12 +73,14 @@ func NewPluginsFunc(tp elastictransport.Interface) NewPlugins { } } -// Returns a list of plugins running on each node of a cluster. +// Get plugin information. +// +// Get a list of plugins running on each node of a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-plugins.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins func New(tp elastictransport.Interface) *Plugins { r := &Plugins{ transport: tp, @@ -282,15 +286,6 @@ func (r *Plugins) Header(key, value string) *Plugins { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Plugins) Format(format string) *Plugins { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Plugins) H(names ...string) *Plugins { @@ -299,11 +294,20 @@ func (r *Plugins) H(names ...string) *Plugins { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Plugins) Help(help bool) *Plugins { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Plugins) S(names ...string) *Plugins { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// IncludeBootstrap Include bootstrap plugins in the response +// API name: include_bootstrap +func (r *Plugins) IncludeBootstrap(includebootstrap bool) *Plugins { + r.values.Set("include_bootstrap", strconv.FormatBool(includebootstrap)) return r } @@ -327,12 +331,20 @@ func (r *Plugins) MasterTimeout(duration string) *Plugins { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Plugins) S(names ...string) *Plugins { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Plugins) Format(format string) *Plugins { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Plugins) Help(help bool) *Plugins { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/plugins/response.go b/typedapi/cat/plugins/response.go index 2e3ac87312..7423f01a97 100644 --- a/typedapi/cat/plugins/response.go +++ b/typedapi/cat/plugins/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package plugins @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package plugins // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/plugins/CatPluginsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/plugins/CatPluginsResponse.ts#L22-L24 type Response []types.PluginsRecord diff --git a/typedapi/cat/recovery/recovery.go b/typedapi/cat/recovery/recovery.go index f620783b3d..d1fa83a2b7 100644 --- a/typedapi/cat/recovery/recovery.go +++ b/typedapi/cat/recovery/recovery.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information about ongoing and completed shard recoveries. +// Get shard recovery information. +// +// Get information about ongoing and completed shard recoveries. // Shard recovery is the process of initializing a shard copy, such as restoring // a primary shard from a snapshot or syncing a replica shard from a primary // shard. When a shard recovery completes, the recovered shard is available for @@ -44,6 +46,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) const ( @@ -84,7 +87,9 @@ func NewRecoveryFunc(tp elastictransport.Interface) NewRecovery { } } -// Returns information about ongoing and completed shard recoveries. +// Get shard recovery information. +// +// Get information about ongoing and completed shard recoveries. // Shard recovery is the process of initializing a shard copy, such as restoring // a primary shard from a snapshot or syncing a replica shard from a primary // shard. When a shard recovery completes, the recovered shard is available for @@ -95,7 +100,7 @@ func NewRecoveryFunc(tp elastictransport.Interface) NewRecovery { // line or Kibana console. They are not intended for use by applications. For // application consumption, use the index recovery API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-recovery.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery func New(tp elastictransport.Interface) *Recovery { r := &Recovery{ transport: tp, @@ -350,15 +355,6 @@ func (r *Recovery) Detailed(detailed bool) *Recovery { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Recovery) Format(format string) *Recovery { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Recovery) H(names ...string) *Recovery { @@ -367,40 +363,38 @@ func (r *Recovery) H(names ...string) *Recovery { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Recovery) Help(help bool) *Recovery { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Recovery) S(names ...string) *Recovery { + r.values.Set("s", strings.Join(names, ",")) return r } -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Recovery) Local(local bool) *Recovery { - r.values.Set("local", strconv.FormatBool(local)) +// Time Unit used to display time values. +// API name: time +func (r *Recovery) Time(time timeunit.TimeUnit) *Recovery { + r.values.Set("time", time.String()) return r } -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Recovery) MasterTimeout(duration string) *Recovery { - r.values.Set("master_timeout", duration) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Recovery) Format(format string) *Recovery { + r.values.Set("format", format) return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Recovery) S(names ...string) *Recovery { - r.values.Set("s", strings.Join(names, ",")) +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Recovery) Help(help bool) *Recovery { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/recovery/response.go b/typedapi/cat/recovery/response.go index 418d67a157..490cafaeaf 100644 --- a/typedapi/cat/recovery/response.go +++ b/typedapi/cat/recovery/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package recovery @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package recovery // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/recovery/CatRecoveryResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/recovery/CatRecoveryResponse.ts#L22-L24 type Response []types.RecoveryRecord diff --git a/typedapi/cat/repositories/repositories.go b/typedapi/cat/repositories/repositories.go index fb390ec22d..8df6350033 100644 --- a/typedapi/cat/repositories/repositories.go +++ b/typedapi/cat/repositories/repositories.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns the snapshot repositories for a cluster. +// Get snapshot repository information. +// +// Get a list of snapshot repositories for a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the get snapshot repository API. @@ -71,12 +73,14 @@ func NewRepositoriesFunc(tp elastictransport.Interface) NewRepositories { } } -// Returns the snapshot repositories for a cluster. +// Get snapshot repository information. +// +// Get a list of snapshot repositories for a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the get snapshot repository API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-repositories.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories func New(tp elastictransport.Interface) *Repositories { r := &Repositories{ transport: tp, @@ -282,15 +286,6 @@ func (r *Repositories) Header(key, value string) *Repositories { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Repositories) Format(format string) *Repositories { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Repositories) H(names ...string) *Repositories { @@ -299,11 +294,12 @@ func (r *Repositories) H(names ...string) *Repositories { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Repositories) Help(help bool) *Repositories { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Repositories) S(names ...string) *Repositories { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -327,12 +323,20 @@ func (r *Repositories) MasterTimeout(duration string) *Repositories { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Repositories) S(names ...string) *Repositories { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Repositories) Format(format string) *Repositories { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Repositories) Help(help bool) *Repositories { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/repositories/response.go b/typedapi/cat/repositories/response.go index f2266e2253..15fad05492 100644 --- a/typedapi/cat/repositories/response.go +++ b/typedapi/cat/repositories/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package repositories @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package repositories // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/repositories/CatRepositoriesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/repositories/CatRepositoriesResponse.ts#L22-L24 type Response []types.RepositoriesRecord diff --git a/typedapi/cat/segments/response.go b/typedapi/cat/segments/response.go index 7a949eb1d8..fa6a8e2e8a 100644 --- a/typedapi/cat/segments/response.go +++ b/typedapi/cat/segments/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package segments @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package segments // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/segments/CatSegmentsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/segments/CatSegmentsResponse.ts#L22-L24 type Response []types.SegmentsRecord diff --git a/typedapi/cat/segments/segments.go b/typedapi/cat/segments/segments.go index 95db9a2241..bda2e59d9d 100644 --- a/typedapi/cat/segments/segments.go +++ b/typedapi/cat/segments/segments.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns low-level information about the Lucene segments in index shards. +// Get segment information. +// +// Get low-level information about the Lucene segments in index shards. // For data streams, the API returns information about the backing indices. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For @@ -79,13 +81,15 @@ func NewSegmentsFunc(tp elastictransport.Interface) NewSegments { } } -// Returns low-level information about the Lucene segments in index shards. +// Get segment information. +// +// Get low-level information about the Lucene segments in index shards. // For data streams, the API returns information about the backing indices. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the index segments API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-segments.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments func New(tp elastictransport.Interface) *Segments { r := &Segments{ transport: tp, @@ -325,15 +329,6 @@ func (r *Segments) Bytes(bytes bytes.Bytes) *Segments { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Segments) Format(format string) *Segments { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Segments) H(names ...string) *Segments { @@ -342,11 +337,12 @@ func (r *Segments) H(names ...string) *Segments { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Segments) Help(help bool) *Segments { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Segments) S(names ...string) *Segments { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -370,12 +366,20 @@ func (r *Segments) MasterTimeout(duration string) *Segments { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Segments) S(names ...string) *Segments { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Segments) Format(format string) *Segments { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Segments) Help(help bool) *Segments { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/shards/response.go b/typedapi/cat/shards/response.go index 561df8631f..43bcd7607c 100644 --- a/typedapi/cat/shards/response.go +++ b/typedapi/cat/shards/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package shards @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package shards // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/shards/CatShardsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/shards/CatShardsResponse.ts#L22-L24 type Response []types.ShardsRecord diff --git a/typedapi/cat/shards/shards.go b/typedapi/cat/shards/shards.go index 29b5607b36..d42754e740 100644 --- a/typedapi/cat/shards/shards.go +++ b/typedapi/cat/shards/shards.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information about the shards in a cluster. +// Get shard information. +// +// Get information about the shards in a cluster. // For data streams, the API returns information about the backing indices. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. @@ -38,6 +40,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) const ( @@ -78,12 +81,14 @@ func NewShardsFunc(tp elastictransport.Interface) NewShards { } } -// Returns information about the shards in a cluster. +// Get shard information. +// +// Get information about the shards in a cluster. // For data streams, the API returns information about the backing indices. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-shards.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards func New(tp elastictransport.Interface) *Shards { r := &Shards{ transport: tp, @@ -323,15 +328,6 @@ func (r *Shards) Bytes(bytes bytes.Bytes) *Shards { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Shards) Format(format string) *Shards { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Shards) H(names ...string) *Shards { @@ -340,22 +336,12 @@ func (r *Shards) H(names ...string) *Shards { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Shards) Help(help bool) *Shards { - r.values.Set("help", strconv.FormatBool(help)) - - return r -} - -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Shards) Local(local bool) *Shards { - r.values.Set("local", strconv.FormatBool(local)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Shards) S(names ...string) *Shards { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -368,12 +354,28 @@ func (r *Shards) MasterTimeout(duration string) *Shards { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Shards) S(names ...string) *Shards { - r.values.Set("s", strings.Join(names, ",")) +// Time Unit used to display time values. +// API name: time +func (r *Shards) Time(time timeunit.TimeUnit) *Shards { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Shards) Format(format string) *Shards { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Shards) Help(help bool) *Shards { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/snapshots/response.go b/typedapi/cat/snapshots/response.go index 493ef281e7..500438612f 100644 --- a/typedapi/cat/snapshots/response.go +++ b/typedapi/cat/snapshots/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package snapshots @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package snapshots // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/snapshots/CatSnapshotsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/snapshots/CatSnapshotsResponse.ts#L22-L24 type Response []types.SnapshotsRecord diff --git a/typedapi/cat/snapshots/snapshots.go b/typedapi/cat/snapshots/snapshots.go index 57f913e8e5..a393beb774 100644 --- a/typedapi/cat/snapshots/snapshots.go +++ b/typedapi/cat/snapshots/snapshots.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information about the snapshots stored in one or more repositories. +// Get snapshot information. +// +// Get information about the snapshots stored in one or more repositories. // A snapshot is a backup of an index or running Elasticsearch cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For @@ -38,6 +40,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) const ( @@ -78,13 +81,15 @@ func NewSnapshotsFunc(tp elastictransport.Interface) NewSnapshots { } } -// Returns information about the snapshots stored in one or more repositories. +// Get snapshot information. +// +// Get information about the snapshots stored in one or more repositories. // A snapshot is a backup of an index or running Elasticsearch cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the get snapshot API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-snapshots.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots func New(tp elastictransport.Interface) *Snapshots { r := &Snapshots{ transport: tp, @@ -324,15 +329,6 @@ func (r *Snapshots) IgnoreUnavailable(ignoreunavailable bool) *Snapshots { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Snapshots) Format(format string) *Snapshots { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Snapshots) H(names ...string) *Snapshots { @@ -341,22 +337,12 @@ func (r *Snapshots) H(names ...string) *Snapshots { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Snapshots) Help(help bool) *Snapshots { - r.values.Set("help", strconv.FormatBool(help)) - - return r -} - -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Snapshots) Local(local bool) *Snapshots { - r.values.Set("local", strconv.FormatBool(local)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Snapshots) S(names ...string) *Snapshots { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -369,12 +355,28 @@ func (r *Snapshots) MasterTimeout(duration string) *Snapshots { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Snapshots) S(names ...string) *Snapshots { - r.values.Set("s", strings.Join(names, ",")) +// Time Unit used to display time values. +// API name: time +func (r *Snapshots) Time(time timeunit.TimeUnit) *Snapshots { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Snapshots) Format(format string) *Snapshots { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Snapshots) Help(help bool) *Snapshots { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/tasks/response.go b/typedapi/cat/tasks/response.go index dc4e017352..7b8a8dae71 100644 --- a/typedapi/cat/tasks/response.go +++ b/typedapi/cat/tasks/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package tasks @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package tasks // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/tasks/CatTasksResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/tasks/CatTasksResponse.ts#L22-L24 type Response []types.TasksRecord diff --git a/typedapi/cat/tasks/tasks.go b/typedapi/cat/tasks/tasks.go index 8c1d1439f3..b14f662b9c 100644 --- a/typedapi/cat/tasks/tasks.go +++ b/typedapi/cat/tasks/tasks.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information about tasks currently executing in the cluster. +// Get task information. +// +// Get information about tasks currently running in the cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the task management API. @@ -37,6 +39,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -71,12 +74,14 @@ func NewTasksFunc(tp elastictransport.Interface) NewTasks { } } -// Returns information about tasks currently executing in the cluster. +// Get task information. +// +// Get information about tasks currently running in the cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the task management API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks func New(tp elastictransport.Interface) *Tasks { r := &Tasks{ transport: tp, @@ -302,14 +307,14 @@ func (r *Tasks) Detailed(detailed bool) *Tasks { return r } -// NodeId Unique node identifiers, which are used to limit the response. -// API name: node_id -func (r *Tasks) NodeId(nodeids ...string) *Tasks { +// Nodes Unique node identifiers, which are used to limit the response. +// API name: nodes +func (r *Tasks) Nodes(nodes ...string) *Tasks { tmp := []string{} - for _, item := range nodeids { + for _, item := range nodes { tmp = append(tmp, fmt.Sprintf("%v", item)) } - r.values.Set("node_id", strings.Join(tmp, ",")) + r.values.Set("nodes", strings.Join(tmp, ",")) return r } @@ -322,15 +327,6 @@ func (r *Tasks) ParentTaskId(parenttaskid string) *Tasks { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Tasks) Format(format string) *Tasks { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Tasks) H(names ...string) *Tasks { @@ -339,40 +335,56 @@ func (r *Tasks) H(names ...string) *Tasks { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Tasks) Help(help bool) *Tasks { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Tasks) S(names ...string) *Tasks { + r.values.Set("s", strings.Join(names, ",")) return r } -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Tasks) Local(local bool) *Tasks { - r.values.Set("local", strconv.FormatBool(local)) +// Time Unit used to display time values. +// API name: time +func (r *Tasks) Time(time timeunit.TimeUnit) *Tasks { + r.values.Set("time", time.String()) return r } -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Tasks) MasterTimeout(duration string) *Tasks { - r.values.Set("master_timeout", duration) +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *Tasks) Timeout(duration string) *Tasks { + r.values.Set("timeout", duration) return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Tasks) S(names ...string) *Tasks { - r.values.Set("s", strings.Join(names, ",")) +// WaitForCompletion If `true`, the request blocks until the task has completed. +// API name: wait_for_completion +func (r *Tasks) WaitForCompletion(waitforcompletion bool) *Tasks { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Tasks) Format(format string) *Tasks { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Tasks) Help(help bool) *Tasks { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/templates/response.go b/typedapi/cat/templates/response.go index 7ded386768..39623d402d 100644 --- a/typedapi/cat/templates/response.go +++ b/typedapi/cat/templates/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package templates @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package templates // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/templates/CatTemplatesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/templates/CatTemplatesResponse.ts#L22-L24 type Response []types.TemplatesRecord diff --git a/typedapi/cat/templates/templates.go b/typedapi/cat/templates/templates.go index 38a16a4a53..72554d8ece 100644 --- a/typedapi/cat/templates/templates.go +++ b/typedapi/cat/templates/templates.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information about index templates in a cluster. +// Get index template information. +// +// Get information about the index templates in a cluster. // You can use index templates to apply index settings and field mappings to new // indices at creation. // IMPORTANT: cat APIs are only intended for human consumption using the command @@ -79,14 +81,16 @@ func NewTemplatesFunc(tp elastictransport.Interface) NewTemplates { } } -// Returns information about index templates in a cluster. +// Get index template information. +// +// Get information about the index templates in a cluster. // You can use index templates to apply index settings and field mappings to new // indices at creation. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the get index template API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-templates.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates func New(tp elastictransport.Interface) *Templates { r := &Templates{ transport: tp, @@ -315,15 +319,6 @@ func (r *Templates) Name(name string) *Templates { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Templates) Format(format string) *Templates { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Templates) H(names ...string) *Templates { @@ -332,11 +327,12 @@ func (r *Templates) H(names ...string) *Templates { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Templates) Help(help bool) *Templates { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Templates) S(names ...string) *Templates { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -360,12 +356,20 @@ func (r *Templates) MasterTimeout(duration string) *Templates { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Templates) S(names ...string) *Templates { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Templates) Format(format string) *Templates { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Templates) Help(help bool) *Templates { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/threadpool/response.go b/typedapi/cat/threadpool/response.go index cdad54c1bf..6470ce059f 100644 --- a/typedapi/cat/threadpool/response.go +++ b/typedapi/cat/threadpool/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package threadpool @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package threadpool // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/thread_pool/CatThreadPoolResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/thread_pool/CatThreadPoolResponse.ts#L22-L24 type Response []types.ThreadPoolRecord diff --git a/typedapi/cat/threadpool/thread_pool.go b/typedapi/cat/threadpool/thread_pool.go index c3a23e8775..4d158eac86 100644 --- a/typedapi/cat/threadpool/thread_pool.go +++ b/typedapi/cat/threadpool/thread_pool.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns thread pool statistics for each node in a cluster. +// Get thread pool statistics. +// +// Get thread pool statistics for each node in a cluster. // Returned information includes all built-in thread pools and custom thread // pools. // IMPORTANT: cat APIs are only intended for human consumption using the command @@ -80,14 +82,16 @@ func NewThreadPoolFunc(tp elastictransport.Interface) NewThreadPool { } } -// Returns thread pool statistics for each node in a cluster. +// Get thread pool statistics. +// +// Get thread pool statistics for each node in a cluster. // Returned information includes all built-in thread pools and custom thread // pools. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-thread-pool.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool func New(tp elastictransport.Interface) *ThreadPool { r := &ThreadPool{ transport: tp, @@ -316,23 +320,6 @@ func (r *ThreadPool) ThreadPoolPatterns(threadpoolpatterns string) *ThreadPool { return r } -// Time The unit used to display time values. -// API name: time -func (r *ThreadPool) Time(time timeunit.TimeUnit) *ThreadPool { - r.values.Set("time", time.String()) - - return r -} - -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *ThreadPool) Format(format string) *ThreadPool { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *ThreadPool) H(names ...string) *ThreadPool { @@ -341,11 +328,20 @@ func (r *ThreadPool) H(names ...string) *ThreadPool { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *ThreadPool) Help(help bool) *ThreadPool { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *ThreadPool) S(names ...string) *ThreadPool { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Time The unit used to display time values. +// API name: time +func (r *ThreadPool) Time(time timeunit.TimeUnit) *ThreadPool { + r.values.Set("time", time.String()) return r } @@ -369,12 +365,20 @@ func (r *ThreadPool) MasterTimeout(duration string) *ThreadPool { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *ThreadPool) S(names ...string) *ThreadPool { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *ThreadPool) Format(format string) *ThreadPool { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *ThreadPool) Help(help bool) *ThreadPool { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/transforms/response.go b/typedapi/cat/transforms/response.go index 2b2dfb665a..4d3e954a47 100644 --- a/typedapi/cat/transforms/response.go +++ b/typedapi/cat/transforms/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package transforms @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package transforms // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/transforms/CatTransformsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/transforms/CatTransformsResponse.ts#L22-L24 type Response []types.TransformsRecord diff --git a/typedapi/cat/transforms/transforms.go b/typedapi/cat/transforms/transforms.go index 4bd65e8a7b..93d0d2f0a9 100644 --- a/typedapi/cat/transforms/transforms.go +++ b/typedapi/cat/transforms/transforms.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Get transforms. -// Returns configuration and usage information about transforms. +// Get transform information. +// +// Get configuration and usage information about transforms. // // CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For @@ -81,14 +82,15 @@ func NewTransformsFunc(tp elastictransport.Interface) NewTransforms { } } -// Get transforms. -// Returns configuration and usage information about transforms. +// Get transform information. +// +// Get configuration and usage information about transforms. // // CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get transform statistics API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-transforms.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms func New(tp elastictransport.Interface) *Transforms { r := &Transforms{ transport: tp, @@ -400,25 +402,6 @@ func (r *Transforms) Help(help bool) *Transforms { return r } -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Transforms) Local(local bool) *Transforms { - r.values.Set("local", strconv.FormatBool(local)) - - return r -} - -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Transforms) MasterTimeout(duration string) *Transforms { - r.values.Set("master_timeout", duration) - - return r -} - // V When set to `true` will enable verbose output. // API name: v func (r *Transforms) V(v bool) *Transforms { diff --git a/typedapi/ccr/deleteautofollowpattern/delete_auto_follow_pattern.go b/typedapi/ccr/deleteautofollowpattern/delete_auto_follow_pattern.go index f4f6fae90a..3434a659f8 100644 --- a/typedapi/ccr/deleteautofollowpattern/delete_auto_follow_pattern.go +++ b/typedapi/ccr/deleteautofollowpattern/delete_auto_follow_pattern.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes auto-follow patterns. +// Delete auto-follow patterns. +// +// Delete a collection of cross-cluster replication auto-follow patterns. package deleteautofollowpattern import ( @@ -76,9 +78,11 @@ func NewDeleteAutoFollowPatternFunc(tp elastictransport.Interface) NewDeleteAuto } } -// Deletes auto-follow patterns. +// Delete auto-follow patterns. +// +// Delete a collection of cross-cluster replication auto-follow patterns. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-delete-auto-follow-pattern.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern func New(tp elastictransport.Interface) *DeleteAutoFollowPattern { r := &DeleteAutoFollowPattern{ transport: tp, @@ -290,7 +294,7 @@ func (r *DeleteAutoFollowPattern) Header(key, value string) *DeleteAutoFollowPat return r } -// Name The name of the auto follow pattern. +// Name The auto-follow pattern collection to delete. // API Name: name func (r *DeleteAutoFollowPattern) _name(name string) *DeleteAutoFollowPattern { r.paramSet |= nameMask @@ -299,6 +303,17 @@ func (r *DeleteAutoFollowPattern) _name(name string) *DeleteAutoFollowPattern { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *DeleteAutoFollowPattern) MasterTimeout(duration string) *DeleteAutoFollowPattern { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ccr/deleteautofollowpattern/response.go b/typedapi/ccr/deleteautofollowpattern/response.go index f050fca211..f6444f21cb 100644 --- a/typedapi/ccr/deleteautofollowpattern/response.go +++ b/typedapi/ccr/deleteautofollowpattern/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deleteautofollowpattern // Response holds the response body struct for the package deleteautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/delete_auto_follow_pattern/DeleteAutoFollowPatternResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/delete_auto_follow_pattern/DeleteAutoFollowPatternResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ccr/follow/follow.go b/typedapi/ccr/follow/follow.go index 81a4e4d34e..785ef463ce 100644 --- a/typedapi/ccr/follow/follow.go +++ b/typedapi/ccr/follow/follow.go @@ -16,10 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Creates a new follower index configured to follow the referenced leader -// index. +// Create a follower. +// Create a cross-cluster replication follower index that follows a specific +// leader index. +// When the API returns, the follower index exists and cross-cluster replication +// starts replicating operations from the leader index to the follower index. package follow import ( @@ -82,10 +85,13 @@ func NewFollowFunc(tp elastictransport.Interface) NewFollow { } } -// Creates a new follower index configured to follow the referenced leader -// index. +// Create a follower. +// Create a cross-cluster replication follower index that follows a specific +// leader index. +// When the API returns, the follower index exists and cross-cluster replication +// starts replicating operations from the leader index to the follower index. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-follow.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow func New(tp elastictransport.Interface) *Follow { r := &Follow{ transport: tp, @@ -93,8 +99,6 @@ func New(tp elastictransport.Interface) *Follow { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -317,6 +321,14 @@ func (r *Follow) _index(index string) *Follow { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Follow) MasterTimeout(duration string) *Follow { + r.values.Set("master_timeout", duration) + + return r +} + // WaitForActiveShards Specifies the number of shards to wait on being active before responding. // This defaults to waiting on none of the shards to be // active. @@ -374,132 +386,198 @@ func (r *Follow) Pretty(pretty bool) *Follow { return r } -// DataStreamName If the leader index is part of a data stream, the name to which the local +// If the leader index is part of a data stream, the name to which the local // data stream for the followed index should be renamed. // API name: data_stream_name func (r *Follow) DataStreamName(datastreamname string) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.DataStreamName = &datastreamname return r } -// LeaderIndex The name of the index in the leader cluster to follow. +// The name of the index in the leader cluster to follow. // API name: leader_index func (r *Follow) LeaderIndex(indexname string) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.LeaderIndex = indexname return r } -// MaxOutstandingReadRequests The maximum number of outstanding reads requests from the remote cluster. +// The maximum number of outstanding reads requests from the remote cluster. // API name: max_outstanding_read_requests func (r *Follow) MaxOutstandingReadRequests(maxoutstandingreadrequests int64) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxOutstandingReadRequests = &maxoutstandingreadrequests return r } -// MaxOutstandingWriteRequests The maximum number of outstanding write requests on the follower. +// The maximum number of outstanding write requests on the follower. // API name: max_outstanding_write_requests func (r *Follow) MaxOutstandingWriteRequests(maxoutstandingwriterequests int) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxOutstandingWriteRequests = &maxoutstandingwriterequests return r } -// MaxReadRequestOperationCount The maximum number of operations to pull per read from the remote cluster. +// The maximum number of operations to pull per read from the remote cluster. // API name: max_read_request_operation_count func (r *Follow) MaxReadRequestOperationCount(maxreadrequestoperationcount int) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxReadRequestOperationCount = &maxreadrequestoperationcount return r } -// MaxReadRequestSize The maximum size in bytes of per read of a batch of operations pulled from +// The maximum size in bytes of per read of a batch of operations pulled from // the remote cluster. // API name: max_read_request_size -func (r *Follow) MaxReadRequestSize(bytesize types.ByteSize) *Follow { - r.req.MaxReadRequestSize = bytesize +func (r *Follow) MaxReadRequestSize(bytesize types.ByteSizeVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxReadRequestSize = *bytesize.ByteSizeCaster() return r } -// MaxRetryDelay The maximum time to wait before retrying an operation that failed +// The maximum time to wait before retrying an operation that failed // exceptionally. An exponential backoff strategy is employed when // retrying. // API name: max_retry_delay -func (r *Follow) MaxRetryDelay(duration types.Duration) *Follow { - r.req.MaxRetryDelay = duration +func (r *Follow) MaxRetryDelay(duration types.DurationVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxRetryDelay = *duration.DurationCaster() return r } -// MaxWriteBufferCount The maximum number of operations that can be queued for writing. When this +// The maximum number of operations that can be queued for writing. When this // limit is reached, reads from the remote cluster will be // deferred until the number of queued operations goes below the limit. // API name: max_write_buffer_count func (r *Follow) MaxWriteBufferCount(maxwritebuffercount int) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxWriteBufferCount = &maxwritebuffercount return r } -// MaxWriteBufferSize The maximum total bytes of operations that can be queued for writing. When +// The maximum total bytes of operations that can be queued for writing. When // this limit is reached, reads from the remote cluster will // be deferred until the total bytes of queued operations goes below the limit. // API name: max_write_buffer_size -func (r *Follow) MaxWriteBufferSize(bytesize types.ByteSize) *Follow { - r.req.MaxWriteBufferSize = bytesize +func (r *Follow) MaxWriteBufferSize(bytesize types.ByteSizeVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteBufferSize = *bytesize.ByteSizeCaster() return r } -// MaxWriteRequestOperationCount The maximum number of operations per bulk write request executed on the +// The maximum number of operations per bulk write request executed on the // follower. // API name: max_write_request_operation_count func (r *Follow) MaxWriteRequestOperationCount(maxwriterequestoperationcount int) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxWriteRequestOperationCount = &maxwriterequestoperationcount return r } -// MaxWriteRequestSize The maximum total bytes of operations per bulk write request executed on the +// The maximum total bytes of operations per bulk write request executed on the // follower. // API name: max_write_request_size -func (r *Follow) MaxWriteRequestSize(bytesize types.ByteSize) *Follow { - r.req.MaxWriteRequestSize = bytesize +func (r *Follow) MaxWriteRequestSize(bytesize types.ByteSizeVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteRequestSize = *bytesize.ByteSizeCaster() return r } -// ReadPollTimeout The maximum time to wait for new operations on the remote cluster when the +// The maximum time to wait for new operations on the remote cluster when the // follower index is synchronized with the leader index. // When the timeout has elapsed, the poll for operations will return to the // follower so that it can update some statistics. // Then the follower will immediately attempt to read from the leader again. // API name: read_poll_timeout -func (r *Follow) ReadPollTimeout(duration types.Duration) *Follow { - r.req.ReadPollTimeout = duration +func (r *Follow) ReadPollTimeout(duration types.DurationVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ReadPollTimeout = *duration.DurationCaster() return r } -// RemoteCluster The remote cluster containing the leader index. +// The remote cluster containing the leader index. // API name: remote_cluster func (r *Follow) RemoteCluster(remotecluster string) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RemoteCluster = remotecluster return r } -// Settings Settings to override from the leader index. +// Settings to override from the leader index. // API name: settings -func (r *Follow) Settings(settings *types.IndexSettings) *Follow { +func (r *Follow) Settings(settings types.IndexSettingsVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Settings = settings + r.req.Settings = settings.IndexSettingsCaster() return r } diff --git a/typedapi/ccr/follow/request.go b/typedapi/ccr/follow/request.go index 0f8e031db5..ecb7c9e834 100644 --- a/typedapi/ccr/follow/request.go +++ b/typedapi/ccr/follow/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package follow @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package follow // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/follow/CreateFollowIndexRequest.ts#L26-L110 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/follow/CreateFollowIndexRequest.ts#L26-L124 type Request struct { // DataStreamName If the leader index is part of a data stream, the name to which the local diff --git a/typedapi/ccr/follow/response.go b/typedapi/ccr/follow/response.go index b08d21b75b..df0fd9c3ef 100644 --- a/typedapi/ccr/follow/response.go +++ b/typedapi/ccr/follow/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package follow // Response holds the response body struct for the package follow // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/follow/CreateFollowIndexResponse.ts#L20-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/follow/CreateFollowIndexResponse.ts#L20-L26 type Response struct { FollowIndexCreated bool `json:"follow_index_created"` FollowIndexShardsAcked bool `json:"follow_index_shards_acked"` diff --git a/typedapi/ccr/followinfo/follow_info.go b/typedapi/ccr/followinfo/follow_info.go index 8b76074f02..821ef8e7e5 100644 --- a/typedapi/ccr/followinfo/follow_info.go +++ b/typedapi/ccr/followinfo/follow_info.go @@ -16,10 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves information about all follower indices, including parameters and -// status for each follower index +// Get follower information. +// +// Get information about all cross-cluster replication follower indices. +// For example, the results include follower index names, leader index names, +// replication options, and whether the follower indices are active or paused. package followinfo import ( @@ -77,10 +80,13 @@ func NewFollowInfoFunc(tp elastictransport.Interface) NewFollowInfo { } } -// Retrieves information about all follower indices, including parameters and -// status for each follower index +// Get follower information. +// +// Get information about all cross-cluster replication follower indices. +// For example, the results include follower index names, leader index names, +// replication options, and whether the follower indices are active or paused. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-info.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info func New(tp elastictransport.Interface) *FollowInfo { r := &FollowInfo{ transport: tp, @@ -292,8 +298,7 @@ func (r *FollowInfo) Header(key, value string) *FollowInfo { return r } -// Index A comma-separated list of index patterns; use `_all` to perform the operation -// on all indices +// Index A comma-delimited list of follower index patterns. // API Name: index func (r *FollowInfo) _index(index string) *FollowInfo { r.paramSet |= indexMask @@ -302,6 +307,17 @@ func (r *FollowInfo) _index(index string) *FollowInfo { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *FollowInfo) MasterTimeout(duration string) *FollowInfo { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ccr/followinfo/response.go b/typedapi/ccr/followinfo/response.go index 344eb00ad5..b645172268 100644 --- a/typedapi/ccr/followinfo/response.go +++ b/typedapi/ccr/followinfo/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package followinfo @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package followinfo // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/follow_info/FollowInfoResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/follow_info/FollowInfoResponse.ts#L22-L24 type Response struct { FollowerIndices []types.FollowerIndex `json:"follower_indices"` } diff --git a/typedapi/ccr/followstats/follow_stats.go b/typedapi/ccr/followstats/follow_stats.go index ae319adfc1..ce49b7e6dc 100644 --- a/typedapi/ccr/followstats/follow_stats.go +++ b/typedapi/ccr/followstats/follow_stats.go @@ -16,10 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves follower stats. return shard-level stats about the following tasks -// associated with each shard for the specified indices. +// Get follower stats. +// +// Get cross-cluster replication follower stats. +// The API returns shard-level stats about the "following tasks" associated with +// each shard for the specified indices. package followstats import ( @@ -77,10 +80,13 @@ func NewFollowStatsFunc(tp elastictransport.Interface) NewFollowStats { } } -// Retrieves follower stats. return shard-level stats about the following tasks -// associated with each shard for the specified indices. +// Get follower stats. +// +// Get cross-cluster replication follower stats. +// The API returns shard-level stats about the "following tasks" associated with +// each shard for the specified indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-stats.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats func New(tp elastictransport.Interface) *FollowStats { r := &FollowStats{ transport: tp, @@ -292,8 +298,7 @@ func (r *FollowStats) Header(key, value string) *FollowStats { return r } -// Index A comma-separated list of index patterns; use `_all` to perform the operation -// on all indices +// Index A comma-delimited list of index patterns. // API Name: index func (r *FollowStats) _index(index string) *FollowStats { r.paramSet |= indexMask @@ -302,6 +307,16 @@ func (r *FollowStats) _index(index string) *FollowStats { return r } +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *FollowStats) Timeout(duration string) *FollowStats { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ccr/followstats/response.go b/typedapi/ccr/followstats/response.go index 400a7f7698..e5c9b222c1 100644 --- a/typedapi/ccr/followstats/response.go +++ b/typedapi/ccr/followstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package followstats @@ -26,8 +26,10 @@ import ( // Response holds the response body struct for the package followstats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/follow_stats/FollowIndexStatsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/follow_stats/FollowIndexStatsResponse.ts#L22-L27 type Response struct { + + // Indices An array of follower index statistics. Indices []types.FollowIndexStats `json:"indices"` } diff --git a/typedapi/ccr/forgetfollower/forget_follower.go b/typedapi/ccr/forgetfollower/forget_follower.go index 86554ff844..2212aa9624 100644 --- a/typedapi/ccr/forgetfollower/forget_follower.go +++ b/typedapi/ccr/forgetfollower/forget_follower.go @@ -16,9 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Removes the follower retention leases from the leader. +// Forget a follower. +// Remove the cross-cluster replication follower retention leases from the +// leader. +// +// A following index takes out retention leases on its leader index. +// These leases are used to increase the likelihood that the shards of the +// leader index retain the history of operations that the shards of the +// following index need to run replication. +// When a follower index is converted to a regular index by the unfollow API +// (either by directly calling the API or by index lifecycle management tasks), +// these leases are removed. +// However, removal of the leases can fail, for example when the remote cluster +// containing the leader index is unavailable. +// While the leases will eventually expire on their own, their extended +// existence can cause the leader index to hold more history than necessary and +// prevent index lifecycle management from performing some operations on the +// leader index. +// This API exists to enable manually removing the leases when the unfollow API +// is unable to do so. +// +// NOTE: This API does not stop replication by a following index. If you use +// this API with a follower index that is still actively following, the +// following index will add back retention leases on the leader. +// The only purpose of this API is to handle the case of failure to remove the +// following retention leases after the unfollow API is invoked. package forgetfollower import ( @@ -81,9 +105,33 @@ func NewForgetFollowerFunc(tp elastictransport.Interface) NewForgetFollower { } } -// Removes the follower retention leases from the leader. +// Forget a follower. +// Remove the cross-cluster replication follower retention leases from the +// leader. +// +// A following index takes out retention leases on its leader index. +// These leases are used to increase the likelihood that the shards of the +// leader index retain the history of operations that the shards of the +// following index need to run replication. +// When a follower index is converted to a regular index by the unfollow API +// (either by directly calling the API or by index lifecycle management tasks), +// these leases are removed. +// However, removal of the leases can fail, for example when the remote cluster +// containing the leader index is unavailable. +// While the leases will eventually expire on their own, their extended +// existence can cause the leader index to hold more history than necessary and +// prevent index lifecycle management from performing some operations on the +// leader index. +// This API exists to enable manually removing the leases when the unfollow API +// is unable to do so. +// +// NOTE: This API does not stop replication by a following index. If you use +// this API with a follower index that is still actively following, the +// following index will add back retention leases on the leader. +// The only purpose of this API is to handle the case of failure to remove the +// following retention leases after the unfollow API is invoked. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-forget-follower.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower func New(tp elastictransport.Interface) *ForgetFollower { r := &ForgetFollower{ transport: tp, @@ -91,8 +139,6 @@ func New(tp elastictransport.Interface) *ForgetFollower { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -316,6 +362,15 @@ func (r *ForgetFollower) _index(index string) *ForgetFollower { return r } +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *ForgetFollower) Timeout(duration string) *ForgetFollower { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -362,6 +417,10 @@ func (r *ForgetFollower) Pretty(pretty bool) *ForgetFollower { // API name: follower_cluster func (r *ForgetFollower) FollowerCluster(followercluster string) *ForgetFollower { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.FollowerCluster = &followercluster @@ -370,6 +429,11 @@ func (r *ForgetFollower) FollowerCluster(followercluster string) *ForgetFollower // API name: follower_index func (r *ForgetFollower) FollowerIndex(indexname string) *ForgetFollower { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FollowerIndex = &indexname return r @@ -377,6 +441,11 @@ func (r *ForgetFollower) FollowerIndex(indexname string) *ForgetFollower { // API name: follower_index_uuid func (r *ForgetFollower) FollowerIndexUuid(uuid string) *ForgetFollower { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FollowerIndexUuid = &uuid return r @@ -384,6 +453,10 @@ func (r *ForgetFollower) FollowerIndexUuid(uuid string) *ForgetFollower { // API name: leader_remote_cluster func (r *ForgetFollower) LeaderRemoteCluster(leaderremotecluster string) *ForgetFollower { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.LeaderRemoteCluster = &leaderremotecluster diff --git a/typedapi/ccr/forgetfollower/request.go b/typedapi/ccr/forgetfollower/request.go index 1d01e41605..6220071913 100644 --- a/typedapi/ccr/forgetfollower/request.go +++ b/typedapi/ccr/forgetfollower/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package forgetfollower @@ -31,7 +31,7 @@ import ( // Request holds the request body struct for the package forgetfollower // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/forget_follower/ForgetFollowerIndexRequest.ts#L23-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/forget_follower/ForgetFollowerIndexRequest.ts#L24-L65 type Request struct { FollowerCluster *string `json:"follower_cluster,omitempty"` FollowerIndex *string `json:"follower_index,omitempty"` diff --git a/typedapi/ccr/forgetfollower/response.go b/typedapi/ccr/forgetfollower/response.go index 5887915fcf..fbb302d54e 100644 --- a/typedapi/ccr/forgetfollower/response.go +++ b/typedapi/ccr/forgetfollower/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package forgetfollower @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package forgetfollower // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/forget_follower/ForgetFollowerIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/forget_follower/ForgetFollowerIndexResponse.ts#L22-L24 type Response struct { Shards_ types.ShardStatistics `json:"_shards"` } diff --git a/typedapi/ccr/getautofollowpattern/get_auto_follow_pattern.go b/typedapi/ccr/getautofollowpattern/get_auto_follow_pattern.go index d14bd81995..8b2a44a5b1 100644 --- a/typedapi/ccr/getautofollowpattern/get_auto_follow_pattern.go +++ b/typedapi/ccr/getautofollowpattern/get_auto_follow_pattern.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Gets configured auto-follow patterns. Returns the specified auto-follow -// pattern collection. +// Get auto-follow patterns. +// +// Get cross-cluster replication auto-follow patterns. package getautofollowpattern import ( @@ -75,10 +76,11 @@ func NewGetAutoFollowPatternFunc(tp elastictransport.Interface) NewGetAutoFollow } } -// Gets configured auto-follow patterns. Returns the specified auto-follow -// pattern collection. +// Get auto-follow patterns. +// +// Get cross-cluster replication auto-follow patterns. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-auto-follow-pattern.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1 func New(tp elastictransport.Interface) *GetAutoFollowPattern { r := &GetAutoFollowPattern{ transport: tp, @@ -297,8 +299,9 @@ func (r *GetAutoFollowPattern) Header(key, value string) *GetAutoFollowPattern { return r } -// Name Specifies the auto-follow pattern collection that you want to retrieve. If -// you do not specify a name, the API returns information for all collections. +// Name The auto-follow pattern collection that you want to retrieve. +// If you do not specify a name, the API returns information for all +// collections. // API Name: name func (r *GetAutoFollowPattern) Name(name string) *GetAutoFollowPattern { r.paramSet |= nameMask @@ -307,6 +310,17 @@ func (r *GetAutoFollowPattern) Name(name string) *GetAutoFollowPattern { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *GetAutoFollowPattern) MasterTimeout(duration string) *GetAutoFollowPattern { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ccr/getautofollowpattern/response.go b/typedapi/ccr/getautofollowpattern/response.go index e8461f93e3..d73f5e3cba 100644 --- a/typedapi/ccr/getautofollowpattern/response.go +++ b/typedapi/ccr/getautofollowpattern/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getautofollowpattern @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/get_auto_follow_pattern/GetAutoFollowPatternResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/get_auto_follow_pattern/GetAutoFollowPatternResponse.ts#L22-L24 type Response struct { Patterns []types.AutoFollowPattern `json:"patterns"` } diff --git a/typedapi/ccr/pauseautofollowpattern/pause_auto_follow_pattern.go b/typedapi/ccr/pauseautofollowpattern/pause_auto_follow_pattern.go index 59756b70a5..64f0fd88bf 100644 --- a/typedapi/ccr/pauseautofollowpattern/pause_auto_follow_pattern.go +++ b/typedapi/ccr/pauseautofollowpattern/pause_auto_follow_pattern.go @@ -16,9 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Pauses an auto-follow pattern +// Pause an auto-follow pattern. +// +// Pause a cross-cluster replication auto-follow pattern. +// When the API returns, the auto-follow pattern is inactive. +// New indices that are created on the remote cluster and match the auto-follow +// patterns are ignored. +// +// You can resume auto-following with the resume auto-follow pattern API. +// When it resumes, the auto-follow pattern is active again and automatically +// configures follower indices for newly created indices on the remote cluster +// that match its patterns. +// Remote indices that were created while the pattern was paused will also be +// followed, unless they have been deleted or closed in the interim. package pauseautofollowpattern import ( @@ -76,9 +88,21 @@ func NewPauseAutoFollowPatternFunc(tp elastictransport.Interface) NewPauseAutoFo } } -// Pauses an auto-follow pattern +// Pause an auto-follow pattern. +// +// Pause a cross-cluster replication auto-follow pattern. +// When the API returns, the auto-follow pattern is inactive. +// New indices that are created on the remote cluster and match the auto-follow +// patterns are ignored. +// +// You can resume auto-following with the resume auto-follow pattern API. +// When it resumes, the auto-follow pattern is active again and automatically +// configures follower indices for newly created indices on the remote cluster +// that match its patterns. +// Remote indices that were created while the pattern was paused will also be +// followed, unless they have been deleted or closed in the interim. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-pause-auto-follow-pattern.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern func New(tp elastictransport.Interface) *PauseAutoFollowPattern { r := &PauseAutoFollowPattern{ transport: tp, @@ -292,8 +316,7 @@ func (r *PauseAutoFollowPattern) Header(key, value string) *PauseAutoFollowPatte return r } -// Name The name of the auto follow pattern that should pause discovering new indices -// to follow. +// Name The name of the auto-follow pattern to pause. // API Name: name func (r *PauseAutoFollowPattern) _name(name string) *PauseAutoFollowPattern { r.paramSet |= nameMask @@ -302,6 +325,17 @@ func (r *PauseAutoFollowPattern) _name(name string) *PauseAutoFollowPattern { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *PauseAutoFollowPattern) MasterTimeout(duration string) *PauseAutoFollowPattern { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ccr/pauseautofollowpattern/response.go b/typedapi/ccr/pauseautofollowpattern/response.go index c8b83c6a11..fb9f138f77 100644 --- a/typedapi/ccr/pauseautofollowpattern/response.go +++ b/typedapi/ccr/pauseautofollowpattern/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package pauseautofollowpattern // Response holds the response body struct for the package pauseautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/pause_auto_follow_pattern/PauseAutoFollowPatternResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/pause_auto_follow_pattern/PauseAutoFollowPatternResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ccr/pausefollow/pause_follow.go b/typedapi/ccr/pausefollow/pause_follow.go index ef0e87d7e5..29f0be1961 100644 --- a/typedapi/ccr/pausefollow/pause_follow.go +++ b/typedapi/ccr/pausefollow/pause_follow.go @@ -16,10 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Pauses a follower index. The follower index will not fetch any additional -// operations from the leader index. +// Pause a follower. +// +// Pause a cross-cluster replication follower index. +// The follower index will not fetch any additional operations from the leader +// index. +// You can resume following with the resume follower API. +// You can pause and resume a follower index to change the configuration of the +// following task. package pausefollow import ( @@ -77,10 +83,16 @@ func NewPauseFollowFunc(tp elastictransport.Interface) NewPauseFollow { } } -// Pauses a follower index. The follower index will not fetch any additional -// operations from the leader index. +// Pause a follower. +// +// Pause a cross-cluster replication follower index. +// The follower index will not fetch any additional operations from the leader +// index. +// You can resume following with the resume follower API. +// You can pause and resume a follower index to change the configuration of the +// following task. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-pause-follow.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow func New(tp elastictransport.Interface) *PauseFollow { r := &PauseFollow{ transport: tp, @@ -292,7 +304,7 @@ func (r *PauseFollow) Header(key, value string) *PauseFollow { return r } -// Index The name of the follower index that should pause following its leader index. +// Index The name of the follower index. // API Name: index func (r *PauseFollow) _index(index string) *PauseFollow { r.paramSet |= indexMask @@ -301,6 +313,17 @@ func (r *PauseFollow) _index(index string) *PauseFollow { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *PauseFollow) MasterTimeout(duration string) *PauseFollow { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ccr/pausefollow/response.go b/typedapi/ccr/pausefollow/response.go index a35f455e98..82acfc4d13 100644 --- a/typedapi/ccr/pausefollow/response.go +++ b/typedapi/ccr/pausefollow/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package pausefollow // Response holds the response body struct for the package pausefollow // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/pause_follow/PauseFollowIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/pause_follow/PauseFollowIndexResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ccr/putautofollowpattern/put_auto_follow_pattern.go b/typedapi/ccr/putautofollowpattern/put_auto_follow_pattern.go index 550df4275c..5ac41b5aa8 100644 --- a/typedapi/ccr/putautofollowpattern/put_auto_follow_pattern.go +++ b/typedapi/ccr/putautofollowpattern/put_auto_follow_pattern.go @@ -16,11 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// Creates a new named collection of auto-follow patterns against a specified -// remote cluster. Newly created indices on the remote cluster matching any of -// the specified patterns will be automatically configured as follower indices. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Create or update auto-follow patterns. +// Create a collection of cross-cluster replication auto-follow patterns for a +// remote cluster. +// Newly created indices on the remote cluster that match any of the patterns +// are automatically configured as follower indices. +// Indices on the remote cluster that were created before the auto-follow +// pattern was created will not be auto-followed even if they match the pattern. +// +// This API can also be used to update auto-follow patterns. +// NOTE: Follower indices that were configured automatically before updating an +// auto-follow pattern will remain unchanged even if they do not match against +// the new patterns. package putautofollowpattern import ( @@ -83,11 +92,20 @@ func NewPutAutoFollowPatternFunc(tp elastictransport.Interface) NewPutAutoFollow } } -// Creates a new named collection of auto-follow patterns against a specified -// remote cluster. Newly created indices on the remote cluster matching any of -// the specified patterns will be automatically configured as follower indices. +// Create or update auto-follow patterns. +// Create a collection of cross-cluster replication auto-follow patterns for a +// remote cluster. +// Newly created indices on the remote cluster that match any of the patterns +// are automatically configured as follower indices. +// Indices on the remote cluster that were created before the auto-follow +// pattern was created will not be auto-followed even if they match the pattern. +// +// This API can also be used to update auto-follow patterns. +// NOTE: Follower indices that were configured automatically before updating an +// auto-follow pattern will remain unchanged even if they do not match against +// the new patterns. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-auto-follow-pattern.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern func New(tp elastictransport.Interface) *PutAutoFollowPattern { r := &PutAutoFollowPattern{ transport: tp, @@ -95,8 +113,6 @@ func New(tp elastictransport.Interface) *PutAutoFollowPattern { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -319,6 +335,14 @@ func (r *PutAutoFollowPattern) _name(name string) *PutAutoFollowPattern { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *PutAutoFollowPattern) MasterTimeout(duration string) *PutAutoFollowPattern { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -363,144 +387,234 @@ func (r *PutAutoFollowPattern) Pretty(pretty bool) *PutAutoFollowPattern { return r } -// FollowIndexPattern The name of follower index. The template {{leader_index}} can be used to +// The name of follower index. The template {{leader_index}} can be used to // derive the name of the follower index from the name of the leader index. When // following a data stream, use {{leader_index}}; CCR does not support changes // to the names of a follower data stream’s backing indices. // API name: follow_index_pattern func (r *PutAutoFollowPattern) FollowIndexPattern(indexpattern string) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FollowIndexPattern = &indexpattern return r } -// LeaderIndexExclusionPatterns An array of simple index patterns that can be used to exclude indices from +// An array of simple index patterns that can be used to exclude indices from // being auto-followed. Indices in the remote cluster whose names are matching // one or more leader_index_patterns and one or more // leader_index_exclusion_patterns won’t be followed. // API name: leader_index_exclusion_patterns func (r *PutAutoFollowPattern) LeaderIndexExclusionPatterns(indexpatterns ...string) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.LeaderIndexExclusionPatterns = indexpatterns return r } -// LeaderIndexPatterns An array of simple index patterns to match against indices in the remote +// An array of simple index patterns to match against indices in the remote // cluster specified by the remote_cluster field. // API name: leader_index_patterns func (r *PutAutoFollowPattern) LeaderIndexPatterns(indexpatterns ...string) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.LeaderIndexPatterns = indexpatterns return r } -// MaxOutstandingReadRequests The maximum number of outstanding reads requests from the remote cluster. +// The maximum number of outstanding reads requests from the remote cluster. // API name: max_outstanding_read_requests func (r *PutAutoFollowPattern) MaxOutstandingReadRequests(maxoutstandingreadrequests int) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxOutstandingReadRequests = &maxoutstandingreadrequests return r } -// MaxOutstandingWriteRequests The maximum number of outstanding reads requests from the remote cluster. +// The maximum number of outstanding reads requests from the remote cluster. // API name: max_outstanding_write_requests func (r *PutAutoFollowPattern) MaxOutstandingWriteRequests(maxoutstandingwriterequests int) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxOutstandingWriteRequests = &maxoutstandingwriterequests return r } -// MaxReadRequestOperationCount The maximum number of operations to pull per read from the remote cluster. +// The maximum number of operations to pull per read from the remote cluster. // API name: max_read_request_operation_count func (r *PutAutoFollowPattern) MaxReadRequestOperationCount(maxreadrequestoperationcount int) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxReadRequestOperationCount = &maxreadrequestoperationcount return r } -// MaxReadRequestSize The maximum size in bytes of per read of a batch of operations pulled from +// The maximum size in bytes of per read of a batch of operations pulled from // the remote cluster. // API name: max_read_request_size -func (r *PutAutoFollowPattern) MaxReadRequestSize(bytesize types.ByteSize) *PutAutoFollowPattern { - r.req.MaxReadRequestSize = bytesize +func (r *PutAutoFollowPattern) MaxReadRequestSize(bytesize types.ByteSizeVariant) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxReadRequestSize = *bytesize.ByteSizeCaster() return r } -// MaxRetryDelay The maximum time to wait before retrying an operation that failed +// The maximum time to wait before retrying an operation that failed // exceptionally. An exponential backoff strategy is employed when retrying. // API name: max_retry_delay -func (r *PutAutoFollowPattern) MaxRetryDelay(duration types.Duration) *PutAutoFollowPattern { - r.req.MaxRetryDelay = duration +func (r *PutAutoFollowPattern) MaxRetryDelay(duration types.DurationVariant) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxRetryDelay = *duration.DurationCaster() return r } -// MaxWriteBufferCount The maximum number of operations that can be queued for writing. When this +// The maximum number of operations that can be queued for writing. When this // limit is reached, reads from the remote cluster will be deferred until the // number of queued operations goes below the limit. // API name: max_write_buffer_count func (r *PutAutoFollowPattern) MaxWriteBufferCount(maxwritebuffercount int) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxWriteBufferCount = &maxwritebuffercount return r } -// MaxWriteBufferSize The maximum total bytes of operations that can be queued for writing. When +// The maximum total bytes of operations that can be queued for writing. When // this limit is reached, reads from the remote cluster will be deferred until // the total bytes of queued operations goes below the limit. // API name: max_write_buffer_size -func (r *PutAutoFollowPattern) MaxWriteBufferSize(bytesize types.ByteSize) *PutAutoFollowPattern { - r.req.MaxWriteBufferSize = bytesize +func (r *PutAutoFollowPattern) MaxWriteBufferSize(bytesize types.ByteSizeVariant) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteBufferSize = *bytesize.ByteSizeCaster() return r } -// MaxWriteRequestOperationCount The maximum number of operations per bulk write request executed on the +// The maximum number of operations per bulk write request executed on the // follower. // API name: max_write_request_operation_count func (r *PutAutoFollowPattern) MaxWriteRequestOperationCount(maxwriterequestoperationcount int) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxWriteRequestOperationCount = &maxwriterequestoperationcount return r } -// MaxWriteRequestSize The maximum total bytes of operations per bulk write request executed on the +// The maximum total bytes of operations per bulk write request executed on the // follower. // API name: max_write_request_size -func (r *PutAutoFollowPattern) MaxWriteRequestSize(bytesize types.ByteSize) *PutAutoFollowPattern { - r.req.MaxWriteRequestSize = bytesize +func (r *PutAutoFollowPattern) MaxWriteRequestSize(bytesize types.ByteSizeVariant) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteRequestSize = *bytesize.ByteSizeCaster() return r } -// ReadPollTimeout The maximum time to wait for new operations on the remote cluster when the +// The maximum time to wait for new operations on the remote cluster when the // follower index is synchronized with the leader index. When the timeout has // elapsed, the poll for operations will return to the follower so that it can // update some statistics. Then the follower will immediately attempt to read // from the leader again. // API name: read_poll_timeout -func (r *PutAutoFollowPattern) ReadPollTimeout(duration types.Duration) *PutAutoFollowPattern { - r.req.ReadPollTimeout = duration +func (r *PutAutoFollowPattern) ReadPollTimeout(duration types.DurationVariant) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ReadPollTimeout = *duration.DurationCaster() return r } -// RemoteCluster The remote cluster containing the leader indices to match against. +// The remote cluster containing the leader indices to match against. // API name: remote_cluster func (r *PutAutoFollowPattern) RemoteCluster(remotecluster string) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RemoteCluster = remotecluster return r } -// Settings Settings to override from the leader index. Note that certain settings can +// Settings to override from the leader index. Note that certain settings can // not be overrode (e.g., index.number_of_shards). // API name: settings func (r *PutAutoFollowPattern) Settings(settings map[string]json.RawMessage) *PutAutoFollowPattern { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Settings = settings + return r +} + +func (r *PutAutoFollowPattern) AddSetting(key string, value json.RawMessage) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Settings == nil { + r.req.Settings = make(map[string]json.RawMessage) + } else { + tmp = r.req.Settings + } + + tmp[key] = value + r.req.Settings = tmp return r } diff --git a/typedapi/ccr/putautofollowpattern/request.go b/typedapi/ccr/putautofollowpattern/request.go index 0e1a561c0f..0b75220e98 100644 --- a/typedapi/ccr/putautofollowpattern/request.go +++ b/typedapi/ccr/putautofollowpattern/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putautofollowpattern @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/put_auto_follow_pattern/PutAutoFollowPatternRequest.ts#L27-L112 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/put_auto_follow_pattern/PutAutoFollowPatternRequest.ts#L27-L133 type Request struct { // FollowIndexPattern The name of follower index. The template {{leader_index}} can be used to diff --git a/typedapi/ccr/putautofollowpattern/response.go b/typedapi/ccr/putautofollowpattern/response.go index 08853fe250..6f91e3c966 100644 --- a/typedapi/ccr/putautofollowpattern/response.go +++ b/typedapi/ccr/putautofollowpattern/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putautofollowpattern // Response holds the response body struct for the package putautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/put_auto_follow_pattern/PutAutoFollowPatternResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/put_auto_follow_pattern/PutAutoFollowPatternResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ccr/resumeautofollowpattern/response.go b/typedapi/ccr/resumeautofollowpattern/response.go index ccd0d41bd0..6eb93036be 100644 --- a/typedapi/ccr/resumeautofollowpattern/response.go +++ b/typedapi/ccr/resumeautofollowpattern/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package resumeautofollowpattern // Response holds the response body struct for the package resumeautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/resume_auto_follow_pattern/ResumeAutoFollowPatternResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/resume_auto_follow_pattern/ResumeAutoFollowPatternResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ccr/resumeautofollowpattern/resume_auto_follow_pattern.go b/typedapi/ccr/resumeautofollowpattern/resume_auto_follow_pattern.go index 323c1896f5..909ce09bfd 100644 --- a/typedapi/ccr/resumeautofollowpattern/resume_auto_follow_pattern.go +++ b/typedapi/ccr/resumeautofollowpattern/resume_auto_follow_pattern.go @@ -16,9 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Resumes an auto-follow pattern that has been paused +// Resume an auto-follow pattern. +// +// Resume a cross-cluster replication auto-follow pattern that was paused. +// The auto-follow pattern will resume configuring following indices for newly +// created indices that match its patterns on the remote cluster. +// Remote indices created while the pattern was paused will also be followed +// unless they have been deleted or closed in the interim. package resumeautofollowpattern import ( @@ -76,9 +82,15 @@ func NewResumeAutoFollowPatternFunc(tp elastictransport.Interface) NewResumeAuto } } -// Resumes an auto-follow pattern that has been paused +// Resume an auto-follow pattern. +// +// Resume a cross-cluster replication auto-follow pattern that was paused. +// The auto-follow pattern will resume configuring following indices for newly +// created indices that match its patterns on the remote cluster. +// Remote indices created while the pattern was paused will also be followed +// unless they have been deleted or closed in the interim. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-resume-auto-follow-pattern.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern func New(tp elastictransport.Interface) *ResumeAutoFollowPattern { r := &ResumeAutoFollowPattern{ transport: tp, @@ -292,8 +304,7 @@ func (r *ResumeAutoFollowPattern) Header(key, value string) *ResumeAutoFollowPat return r } -// Name The name of the auto follow pattern to resume discovering new indices to -// follow. +// Name The name of the auto-follow pattern to resume. // API Name: name func (r *ResumeAutoFollowPattern) _name(name string) *ResumeAutoFollowPattern { r.paramSet |= nameMask @@ -302,6 +313,17 @@ func (r *ResumeAutoFollowPattern) _name(name string) *ResumeAutoFollowPattern { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *ResumeAutoFollowPattern) MasterTimeout(duration string) *ResumeAutoFollowPattern { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ccr/resumefollow/request.go b/typedapi/ccr/resumefollow/request.go index fff476bbea..0c43aa0946 100644 --- a/typedapi/ccr/resumefollow/request.go +++ b/typedapi/ccr/resumefollow/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package resumefollow @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package resumefollow // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/resume_follow/ResumeFollowIndexRequest.ts#L25-L46 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/resume_follow/ResumeFollowIndexRequest.ts#L25-L65 type Request struct { MaxOutstandingReadRequests *int64 `json:"max_outstanding_read_requests,omitempty"` MaxOutstandingWriteRequests *int64 `json:"max_outstanding_write_requests,omitempty"` diff --git a/typedapi/ccr/resumefollow/response.go b/typedapi/ccr/resumefollow/response.go index ac48b645c2..e57c8eb93a 100644 --- a/typedapi/ccr/resumefollow/response.go +++ b/typedapi/ccr/resumefollow/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package resumefollow // Response holds the response body struct for the package resumefollow // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/resume_follow/ResumeFollowIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/resume_follow/ResumeFollowIndexResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ccr/resumefollow/resume_follow.go b/typedapi/ccr/resumefollow/resume_follow.go index fe6dbbe66c..286e448dc1 100644 --- a/typedapi/ccr/resumefollow/resume_follow.go +++ b/typedapi/ccr/resumefollow/resume_follow.go @@ -16,9 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// Resumes a follower index that has been paused +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Resume a follower. +// Resume a cross-cluster replication follower index that was paused. +// The follower index could have been paused with the pause follower API. +// Alternatively it could be paused due to replication that cannot be retried +// due to failures during following tasks. +// When this API returns, the follower index will resume fetching operations +// from the leader index. package resumefollow import ( @@ -81,9 +87,15 @@ func NewResumeFollowFunc(tp elastictransport.Interface) NewResumeFollow { } } -// Resumes a follower index that has been paused +// Resume a follower. +// Resume a cross-cluster replication follower index that was paused. +// The follower index could have been paused with the pause follower API. +// Alternatively it could be paused due to replication that cannot be retried +// due to failures during following tasks. +// When this API returns, the follower index will resume fetching operations +// from the leader index. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-resume-follow.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow func New(tp elastictransport.Interface) *ResumeFollow { r := &ResumeFollow{ transport: tp, @@ -91,8 +103,6 @@ func New(tp elastictransport.Interface) *ResumeFollow { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -315,6 +325,14 @@ func (r *ResumeFollow) _index(index string) *ResumeFollow { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *ResumeFollow) MasterTimeout(duration string) *ResumeFollow { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -361,6 +379,10 @@ func (r *ResumeFollow) Pretty(pretty bool) *ResumeFollow { // API name: max_outstanding_read_requests func (r *ResumeFollow) MaxOutstandingReadRequests(maxoutstandingreadrequests int64) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxOutstandingReadRequests = &maxoutstandingreadrequests @@ -369,6 +391,10 @@ func (r *ResumeFollow) MaxOutstandingReadRequests(maxoutstandingreadrequests int // API name: max_outstanding_write_requests func (r *ResumeFollow) MaxOutstandingWriteRequests(maxoutstandingwriterequests int64) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxOutstandingWriteRequests = &maxoutstandingwriterequests @@ -377,6 +403,10 @@ func (r *ResumeFollow) MaxOutstandingWriteRequests(maxoutstandingwriterequests i // API name: max_read_request_operation_count func (r *ResumeFollow) MaxReadRequestOperationCount(maxreadrequestoperationcount int64) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxReadRequestOperationCount = &maxreadrequestoperationcount @@ -385,6 +415,10 @@ func (r *ResumeFollow) MaxReadRequestOperationCount(maxreadrequestoperationcount // API name: max_read_request_size func (r *ResumeFollow) MaxReadRequestSize(maxreadrequestsize string) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxReadRequestSize = &maxreadrequestsize @@ -392,14 +426,23 @@ func (r *ResumeFollow) MaxReadRequestSize(maxreadrequestsize string) *ResumeFoll } // API name: max_retry_delay -func (r *ResumeFollow) MaxRetryDelay(duration types.Duration) *ResumeFollow { - r.req.MaxRetryDelay = duration +func (r *ResumeFollow) MaxRetryDelay(duration types.DurationVariant) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxRetryDelay = *duration.DurationCaster() return r } // API name: max_write_buffer_count func (r *ResumeFollow) MaxWriteBufferCount(maxwritebuffercount int64) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxWriteBufferCount = &maxwritebuffercount @@ -408,6 +451,10 @@ func (r *ResumeFollow) MaxWriteBufferCount(maxwritebuffercount int64) *ResumeFol // API name: max_write_buffer_size func (r *ResumeFollow) MaxWriteBufferSize(maxwritebuffersize string) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxWriteBufferSize = &maxwritebuffersize @@ -416,6 +463,10 @@ func (r *ResumeFollow) MaxWriteBufferSize(maxwritebuffersize string) *ResumeFoll // API name: max_write_request_operation_count func (r *ResumeFollow) MaxWriteRequestOperationCount(maxwriterequestoperationcount int64) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxWriteRequestOperationCount = &maxwriterequestoperationcount @@ -424,6 +475,10 @@ func (r *ResumeFollow) MaxWriteRequestOperationCount(maxwriterequestoperationcou // API name: max_write_request_size func (r *ResumeFollow) MaxWriteRequestSize(maxwriterequestsize string) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxWriteRequestSize = &maxwriterequestsize @@ -431,8 +486,13 @@ func (r *ResumeFollow) MaxWriteRequestSize(maxwriterequestsize string) *ResumeFo } // API name: read_poll_timeout -func (r *ResumeFollow) ReadPollTimeout(duration types.Duration) *ResumeFollow { - r.req.ReadPollTimeout = duration +func (r *ResumeFollow) ReadPollTimeout(duration types.DurationVariant) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ReadPollTimeout = *duration.DurationCaster() return r } diff --git a/typedapi/ccr/stats/response.go b/typedapi/ccr/stats/response.go index ebf5f94b8e..5165fbdf49 100644 --- a/typedapi/ccr/stats/response.go +++ b/typedapi/ccr/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package stats @@ -26,10 +26,13 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/stats/CcrStatsResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/stats/CcrStatsResponse.ts#L22-L29 type Response struct { + + // AutoFollowStats Statistics for the auto-follow coordinator. AutoFollowStats types.AutoFollowStats `json:"auto_follow_stats"` - FollowStats types.FollowStats `json:"follow_stats"` + // FollowStats Shard-level statistics for follower indices. + FollowStats types.FollowStats `json:"follow_stats"` } // NewResponse returns a Response diff --git a/typedapi/ccr/stats/stats.go b/typedapi/ccr/stats/stats.go index 3eadc0a772..dccf139a4d 100644 --- a/typedapi/ccr/stats/stats.go +++ b/typedapi/ccr/stats/stats.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Gets all stats related to cross-cluster replication. +// Get cross-cluster replication stats. +// +// This API returns stats about auto-following and the same shard-level stats as +// the get follower stats API. package stats import ( @@ -68,9 +71,12 @@ func NewStatsFunc(tp elastictransport.Interface) NewStats { } } -// Gets all stats related to cross-cluster replication. +// Get cross-cluster replication stats. +// +// This API returns stats about auto-following and the same shard-level stats as +// the get follower stats API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-stats.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats func New(tp elastictransport.Interface) *Stats { r := &Stats{ transport: tp, @@ -276,6 +282,26 @@ func (r *Stats) Header(key, value string) *Stats { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *Stats) MasterTimeout(duration string) *Stats { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. If no response is received before the +// timeout expires, the request fails and returns an error. +// API name: timeout +func (r *Stats) Timeout(duration string) *Stats { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ccr/unfollow/response.go b/typedapi/ccr/unfollow/response.go index 69d2012c8c..0c4cdf82ed 100644 --- a/typedapi/ccr/unfollow/response.go +++ b/typedapi/ccr/unfollow/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package unfollow // Response holds the response body struct for the package unfollow // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/unfollow/UnfollowIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/unfollow/UnfollowIndexResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ccr/unfollow/unfollow.go b/typedapi/ccr/unfollow/unfollow.go index e41f43f55c..ffe422afd6 100644 --- a/typedapi/ccr/unfollow/unfollow.go +++ b/typedapi/ccr/unfollow/unfollow.go @@ -16,10 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Stops the following task associated with a follower index and removes index -// metadata and settings associated with cross-cluster replication. +// Unfollow an index. +// +// Convert a cross-cluster replication follower index to a regular index. +// The API stops the following task associated with a follower index and removes +// index metadata and settings associated with cross-cluster replication. +// The follower index must be paused and closed before you call the unfollow +// API. +// +// > info +// > Currently cross-cluster replication does not support converting an existing +// regular index to a follower index. Converting a follower index to a regular +// index is an irreversible operation. package unfollow import ( @@ -77,10 +87,20 @@ func NewUnfollowFunc(tp elastictransport.Interface) NewUnfollow { } } -// Stops the following task associated with a follower index and removes index -// metadata and settings associated with cross-cluster replication. +// Unfollow an index. +// +// Convert a cross-cluster replication follower index to a regular index. +// The API stops the following task associated with a follower index and removes +// index metadata and settings associated with cross-cluster replication. +// The follower index must be paused and closed before you call the unfollow +// API. +// +// > info +// > Currently cross-cluster replication does not support converting an existing +// regular index to a follower index. Converting a follower index to a regular +// index is an irreversible operation. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-unfollow.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow func New(tp elastictransport.Interface) *Unfollow { r := &Unfollow{ transport: tp, @@ -292,7 +312,7 @@ func (r *Unfollow) Header(key, value string) *Unfollow { return r } -// Index The name of the follower index that should be turned into a regular index. +// Index The name of the follower index. // API Name: index func (r *Unfollow) _index(index string) *Unfollow { r.paramSet |= indexMask @@ -301,6 +321,17 @@ func (r *Unfollow) _index(index string) *Unfollow { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *Unfollow) MasterTimeout(duration string) *Unfollow { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/cluster/allocationexplain/allocation_explain.go b/typedapi/cluster/allocationexplain/allocation_explain.go index d65395db7b..81a5a36bf2 100644 --- a/typedapi/cluster/allocationexplain/allocation_explain.go +++ b/typedapi/cluster/allocationexplain/allocation_explain.go @@ -16,9 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// Provides explanations for shard allocations in the cluster. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Explain the shard allocations. +// Get explanations for shard allocations in the cluster. +// For unassigned shards, it provides an explanation for why the shard is +// unassigned. +// For assigned shards, it provides an explanation for why the shard is +// remaining on its current node and has not moved or rebalanced to another +// node. +// This API can be very useful when attempting to diagnose why a shard is +// unassigned or why a shard continues to remain on its current node when you +// might expect otherwise. package allocationexplain import ( @@ -73,9 +82,18 @@ func NewAllocationExplainFunc(tp elastictransport.Interface) NewAllocationExplai } } -// Provides explanations for shard allocations in the cluster. +// Explain the shard allocations. +// Get explanations for shard allocations in the cluster. +// For unassigned shards, it provides an explanation for why the shard is +// unassigned. +// For assigned shards, it provides an explanation for why the shard is +// remaining on its current node and has not moved or rebalanced to another +// node. +// This API can be very useful when attempting to diagnose why a shard is +// unassigned or why a shard continues to remain on its current node when you +// might expect otherwise. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-allocation-explain.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain func New(tp elastictransport.Interface) *AllocationExplain { r := &AllocationExplain{ transport: tp, @@ -83,8 +101,6 @@ func New(tp elastictransport.Interface) *AllocationExplain { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -310,6 +326,14 @@ func (r *AllocationExplain) IncludeYesDecisions(includeyesdecisions bool) *Alloc return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *AllocationExplain) MasterTimeout(duration string) *AllocationExplain { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -354,35 +378,54 @@ func (r *AllocationExplain) Pretty(pretty bool) *AllocationExplain { return r } -// CurrentNode Specifies the node ID or the name of the node to only explain a shard that is +// Specifies the node ID or the name of the node to only explain a shard that is // currently located on the specified node. // API name: current_node func (r *AllocationExplain) CurrentNode(currentnode string) *AllocationExplain { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.CurrentNode = ¤tnode return r } -// Index Specifies the name of the index that you would like an explanation for. +// Specifies the name of the index that you would like an explanation for. // API name: index func (r *AllocationExplain) Index(indexname string) *AllocationExplain { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Index = &indexname return r } -// Primary If true, returns explanation for the primary shard for the given shard ID. +// If true, returns explanation for the primary shard for the given shard ID. // API name: primary func (r *AllocationExplain) Primary(primary bool) *AllocationExplain { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Primary = &primary return r } -// Shard Specifies the ID of the shard that you would like an explanation for. +// Specifies the ID of the shard that you would like an explanation for. // API name: shard func (r *AllocationExplain) Shard(shard int) *AllocationExplain { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Shard = &shard return r diff --git a/typedapi/cluster/allocationexplain/request.go b/typedapi/cluster/allocationexplain/request.go index 1617ae2cb6..5adb3767e5 100644 --- a/typedapi/cluster/allocationexplain/request.go +++ b/typedapi/cluster/allocationexplain/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package allocationexplain @@ -31,7 +31,7 @@ import ( // Request holds the request body struct for the package allocationexplain // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/allocation_explain/ClusterAllocationExplainRequest.ts#L24-L61 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/allocation_explain/ClusterAllocationExplainRequest.ts#L25-L79 type Request struct { // CurrentNode Specifies the node ID or the name of the node to only explain a shard that is diff --git a/typedapi/cluster/allocationexplain/response.go b/typedapi/cluster/allocationexplain/response.go index acc2ee93c7..6cc83419d5 100644 --- a/typedapi/cluster/allocationexplain/response.go +++ b/typedapi/cluster/allocationexplain/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package allocationexplain @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package allocationexplain // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/allocation_explain/ClusterAllocationExplainResponse.ts#L32-L64 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/allocation_explain/ClusterAllocationExplainResponse.ts#L32-L64 type Response struct { AllocateExplanation *string `json:"allocate_explanation,omitempty"` AllocationDelay types.Duration `json:"allocation_delay,omitempty"` diff --git a/typedapi/cluster/deletecomponenttemplate/delete_component_template.go b/typedapi/cluster/deletecomponenttemplate/delete_component_template.go index 83c0bd40fd..0032326ed6 100644 --- a/typedapi/cluster/deletecomponenttemplate/delete_component_template.go +++ b/typedapi/cluster/deletecomponenttemplate/delete_component_template.go @@ -16,10 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete component templates. -// Deletes component templates. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. package deletecomponenttemplate @@ -80,11 +79,10 @@ func NewDeleteComponentTemplateFunc(tp elastictransport.Interface) NewDeleteComp } // Delete component templates. -// Deletes component templates. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template func New(tp elastictransport.Interface) *DeleteComponentTemplate { r := &DeleteComponentTemplate{ transport: tp, diff --git a/typedapi/cluster/deletecomponenttemplate/response.go b/typedapi/cluster/deletecomponenttemplate/response.go index c21e145825..59f55bff14 100644 --- a/typedapi/cluster/deletecomponenttemplate/response.go +++ b/typedapi/cluster/deletecomponenttemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletecomponenttemplate // Response holds the response body struct for the package deletecomponenttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/delete_component_template/ClusterDeleteComponentTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/delete_component_template/ClusterDeleteComponentTemplateResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/cluster/deletevotingconfigexclusions/delete_voting_config_exclusions.go b/typedapi/cluster/deletevotingconfigexclusions/delete_voting_config_exclusions.go index 00bdf1778b..b6dfe68afb 100644 --- a/typedapi/cluster/deletevotingconfigexclusions/delete_voting_config_exclusions.go +++ b/typedapi/cluster/deletevotingconfigexclusions/delete_voting_config_exclusions.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Clears cluster voting config exclusions. +// Clear cluster voting config exclusions. +// Remove master-eligible nodes from the voting configuration exclusion list. package deletevotingconfigexclusions import ( @@ -66,9 +67,10 @@ func NewDeleteVotingConfigExclusionsFunc(tp elastictransport.Interface) NewDelet } } -// Clears cluster voting config exclusions. +// Clear cluster voting config exclusions. +// Remove master-eligible nodes from the voting configuration exclusion list. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/voting-config-exclusions.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions func New(tp elastictransport.Interface) *DeleteVotingConfigExclusions { r := &DeleteVotingConfigExclusions{ transport: tp, @@ -225,6 +227,14 @@ func (r *DeleteVotingConfigExclusions) Header(key, value string) *DeleteVotingCo return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *DeleteVotingConfigExclusions) MasterTimeout(duration string) *DeleteVotingConfigExclusions { + r.values.Set("master_timeout", duration) + + return r +} + // WaitForRemoval Specifies whether to wait for all excluded nodes to be removed from the // cluster before clearing the voting configuration exclusions list. // Defaults to true, meaning that all excluded nodes must be removed from diff --git a/typedapi/cluster/existscomponenttemplate/exists_component_template.go b/typedapi/cluster/existscomponenttemplate/exists_component_template.go index cd22626c11..ae779bd577 100644 --- a/typedapi/cluster/existscomponenttemplate/exists_component_template.go +++ b/typedapi/cluster/existscomponenttemplate/exists_component_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Check component templates. // Returns information about whether a particular component template exists. @@ -78,7 +78,7 @@ func NewExistsComponentTemplateFunc(tp elastictransport.Interface) NewExistsComp // Check component templates. // Returns information about whether a particular component template exists. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template func New(tp elastictransport.Interface) *ExistsComponentTemplate { r := &ExistsComponentTemplate{ transport: tp, diff --git a/typedapi/cluster/getcomponenttemplate/get_component_template.go b/typedapi/cluster/getcomponenttemplate/get_component_template.go index 3a43332fc6..c0a4894d1f 100644 --- a/typedapi/cluster/getcomponenttemplate/get_component_template.go +++ b/typedapi/cluster/getcomponenttemplate/get_component_template.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get component templates. -// Retrieves information about component templates. +// Get information about component templates. package getcomponenttemplate import ( @@ -76,9 +76,9 @@ func NewGetComponentTemplateFunc(tp elastictransport.Interface) NewGetComponentT } // Get component templates. -// Retrieves information about component templates. +// Get information about component templates. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template func New(tp elastictransport.Interface) *GetComponentTemplate { r := &GetComponentTemplate{ transport: tp, diff --git a/typedapi/cluster/getcomponenttemplate/response.go b/typedapi/cluster/getcomponenttemplate/response.go index 9b8a4898a8..cb06f9353b 100644 --- a/typedapi/cluster/getcomponenttemplate/response.go +++ b/typedapi/cluster/getcomponenttemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getcomponenttemplate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getcomponenttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/get_component_template/ClusterGetComponentTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/get_component_template/ClusterGetComponentTemplateResponse.ts#L22-L24 type Response struct { ComponentTemplates []types.ClusterComponentTemplate `json:"component_templates"` } diff --git a/typedapi/cluster/getsettings/get_settings.go b/typedapi/cluster/getsettings/get_settings.go index 2cc414676a..281e7b903b 100644 --- a/typedapi/cluster/getsettings/get_settings.go +++ b/typedapi/cluster/getsettings/get_settings.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns cluster-wide settings. +// Get cluster-wide settings. // By default, it returns only settings that have been explicitly defined. package getsettings @@ -69,10 +69,10 @@ func NewGetSettingsFunc(tp elastictransport.Interface) NewGetSettings { } } -// Returns cluster-wide settings. +// Get cluster-wide settings. // By default, it returns only settings that have been explicitly defined. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-get-settings.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings func New(tp elastictransport.Interface) *GetSettings { r := &GetSettings{ transport: tp, diff --git a/typedapi/cluster/getsettings/response.go b/typedapi/cluster/getsettings/response.go index af2c298f82..d9147e03cd 100644 --- a/typedapi/cluster/getsettings/response.go +++ b/typedapi/cluster/getsettings/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getsettings @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getsettings // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/get_settings/ClusterGetSettingsResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/get_settings/ClusterGetSettingsResponse.ts#L23-L29 type Response struct { Defaults map[string]json.RawMessage `json:"defaults,omitempty"` Persistent map[string]json.RawMessage `json:"persistent"` diff --git a/typedapi/cluster/health/health.go b/typedapi/cluster/health/health.go index d14cdb2ff6..9f707965ef 100644 --- a/typedapi/cluster/health/health.go +++ b/typedapi/cluster/health/health.go @@ -16,18 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// The cluster health API returns a simple status on the health of the cluster. +// Get the cluster health status. +// // You can also use the API to get the health status of only specified data -// streams and indices. For data streams, the API retrieves the health status of -// the stream’s backing indices. -// The cluster health status is: green, yellow or red. On the shard level, a red -// status indicates that the specific shard is not allocated in the cluster, -// yellow means that the primary shard is allocated but replicas are not, and -// green means that all shards are allocated. The index level status is -// controlled by the worst shard status. The cluster status is controlled by the -// worst index status. +// streams and indices. +// For data streams, the API retrieves the health status of the stream’s backing +// indices. +// +// The cluster health status is: green, yellow or red. +// On the shard level, a red status indicates that the specific shard is not +// allocated in the cluster. Yellow means that the primary shard is allocated +// but replicas are not. Green means that all shards are allocated. +// The index level status is controlled by the worst shard status. +// +// One of the main benefits of the API is the ability to wait until the cluster +// reaches a certain high watermark health level. +// The cluster status is controlled by the worst index status. package health import ( @@ -88,18 +94,24 @@ func NewHealthFunc(tp elastictransport.Interface) NewHealth { } } -// The cluster health API returns a simple status on the health of the cluster. +// Get the cluster health status. +// // You can also use the API to get the health status of only specified data -// streams and indices. For data streams, the API retrieves the health status of -// the stream’s backing indices. -// The cluster health status is: green, yellow or red. On the shard level, a red -// status indicates that the specific shard is not allocated in the cluster, -// yellow means that the primary shard is allocated but replicas are not, and -// green means that all shards are allocated. The index level status is -// controlled by the worst shard status. The cluster status is controlled by the -// worst index status. +// streams and indices. +// For data streams, the API retrieves the health status of the stream’s backing +// indices. +// +// The cluster health status is: green, yellow or red. +// On the shard level, a red status indicates that the specific shard is not +// allocated in the cluster. Yellow means that the primary shard is allocated +// but replicas are not. Green means that all shards are allocated. +// The index level status is controlled by the worst shard status. +// +// One of the main benefits of the API is the ability to wait until the cluster +// reaches a certain high watermark health level. +// The cluster status is controlled by the worst index status. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health func New(tp elastictransport.Interface) *Health { r := &Health{ transport: tp, diff --git a/typedapi/cluster/health/response.go b/typedapi/cluster/health/response.go index 5e6e3285be..14a25eb595 100644 --- a/typedapi/cluster/health/response.go +++ b/typedapi/cluster/health/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package health @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package health // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/health/ClusterHealthResponse.ts#L26-L37 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/health/ClusterHealthResponse.ts#L26-L37 type Response struct { // ActivePrimaryShards The number of active primary shards. diff --git a/typedapi/cluster/info/info.go b/typedapi/cluster/info/info.go index 86a4f6744b..2e16a3ef52 100644 --- a/typedapi/cluster/info/info.go +++ b/typedapi/cluster/info/info.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get cluster info. // Returns basic information about the cluster. @@ -80,7 +80,7 @@ func NewInfoFunc(tp elastictransport.Interface) NewInfo { // Get cluster info. // Returns basic information about the cluster. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-info.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info func New(tp elastictransport.Interface) *Info { r := &Info{ transport: tp, diff --git a/typedapi/cluster/info/response.go b/typedapi/cluster/info/response.go index 7dbf44a3e3..43e642cf7b 100644 --- a/typedapi/cluster/info/response.go +++ b/typedapi/cluster/info/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package info @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package info // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/info/ClusterInfoResponse.ts#L26-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/info/ClusterInfoResponse.ts#L26-L34 type Response struct { ClusterName string `json:"cluster_name"` Http *types.Http `json:"http,omitempty"` diff --git a/typedapi/cluster/pendingtasks/pending_tasks.go b/typedapi/cluster/pendingtasks/pending_tasks.go index 4f6355f693..6733b3012f 100644 --- a/typedapi/cluster/pendingtasks/pending_tasks.go +++ b/typedapi/cluster/pendingtasks/pending_tasks.go @@ -16,12 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns cluster-level changes (such as create index, update mapping, allocate -// or fail shard) that have not yet been executed. +// Get the pending cluster tasks. +// Get information about cluster-level changes (such as create index, update +// mapping, allocate or fail shard) that have not yet taken effect. +// // NOTE: This API returns a list of any pending updates to the cluster state. -// These are distinct from the tasks reported by the Task Management API which +// These are distinct from the tasks reported by the task management API which // include periodic tasks and tasks initiated by the user, such as node stats, // search queries, or create index requests. // However, if a user-initiated task such as a create index command causes a @@ -76,17 +78,19 @@ func NewPendingTasksFunc(tp elastictransport.Interface) NewPendingTasks { } } -// Returns cluster-level changes (such as create index, update mapping, allocate -// or fail shard) that have not yet been executed. +// Get the pending cluster tasks. +// Get information about cluster-level changes (such as create index, update +// mapping, allocate or fail shard) that have not yet taken effect. +// // NOTE: This API returns a list of any pending updates to the cluster state. -// These are distinct from the tasks reported by the Task Management API which +// These are distinct from the tasks reported by the task management API which // include periodic tasks and tasks initiated by the user, such as node stats, // search queries, or create index requests. // However, if a user-initiated task such as a create index command causes a // cluster state update, the activity of this task might be reported by both // task api and pending cluster tasks API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-pending.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks func New(tp elastictransport.Interface) *PendingTasks { r := &PendingTasks{ transport: tp, diff --git a/typedapi/cluster/pendingtasks/response.go b/typedapi/cluster/pendingtasks/response.go index bd12adb0cd..6c6e5a1362 100644 --- a/typedapi/cluster/pendingtasks/response.go +++ b/typedapi/cluster/pendingtasks/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package pendingtasks @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package pendingtasks // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/pending_tasks/ClusterPendingTasksResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/pending_tasks/ClusterPendingTasksResponse.ts#L22-L24 type Response struct { Tasks []types.PendingTask `json:"tasks"` } diff --git a/typedapi/cluster/postvotingconfigexclusions/post_voting_config_exclusions.go b/typedapi/cluster/postvotingconfigexclusions/post_voting_config_exclusions.go index ba8aca7a34..baefab5f72 100644 --- a/typedapi/cluster/postvotingconfigexclusions/post_voting_config_exclusions.go +++ b/typedapi/cluster/postvotingconfigexclusions/post_voting_config_exclusions.go @@ -16,9 +16,47 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// Updates the cluster voting config exclusions by node ids or node names. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Update voting configuration exclusions. +// Update the cluster voting config exclusions by node IDs or node names. +// By default, if there are more than three master-eligible nodes in the cluster +// and you remove fewer than half of the master-eligible nodes in the cluster at +// once, the voting configuration automatically shrinks. +// If you want to shrink the voting configuration to contain fewer than three +// nodes or to remove half or more of the master-eligible nodes in the cluster +// at once, use this API to remove departing nodes from the voting configuration +// manually. +// The API adds an entry for each specified node to the cluster’s voting +// configuration exclusions list. +// It then waits until the cluster has reconfigured its voting configuration to +// exclude the specified nodes. +// +// Clusters should have no voting configuration exclusions in normal operation. +// Once the excluded nodes have stopped, clear the voting configuration +// exclusions with `DELETE /_cluster/voting_config_exclusions`. +// This API waits for the nodes to be fully removed from the cluster before it +// returns. +// If your cluster has voting configuration exclusions for nodes that you no +// longer intend to remove, use `DELETE +// /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the +// voting configuration exclusions without waiting for the nodes to leave the +// cluster. +// +// A response to `POST /_cluster/voting_config_exclusions` with an HTTP status +// code of 200 OK guarantees that the node has been removed from the voting +// configuration and will not be reinstated until the voting configuration +// exclusions are cleared by calling `DELETE +// /_cluster/voting_config_exclusions`. +// If the call to `POST /_cluster/voting_config_exclusions` fails or returns a +// response with an HTTP status code other than 200 OK then the node may not +// have been removed from the voting configuration. +// In that case, you may safely retry the call. +// +// NOTE: Voting exclusions are required only when you remove at least half of +// the master-eligible nodes from a cluster in a short time period. +// They are not required when removing master-ineligible nodes or when removing +// fewer than half of the master-eligible nodes. package postvotingconfigexclusions import ( @@ -66,9 +104,47 @@ func NewPostVotingConfigExclusionsFunc(tp elastictransport.Interface) NewPostVot } } -// Updates the cluster voting config exclusions by node ids or node names. +// Update voting configuration exclusions. +// Update the cluster voting config exclusions by node IDs or node names. +// By default, if there are more than three master-eligible nodes in the cluster +// and you remove fewer than half of the master-eligible nodes in the cluster at +// once, the voting configuration automatically shrinks. +// If you want to shrink the voting configuration to contain fewer than three +// nodes or to remove half or more of the master-eligible nodes in the cluster +// at once, use this API to remove departing nodes from the voting configuration +// manually. +// The API adds an entry for each specified node to the cluster’s voting +// configuration exclusions list. +// It then waits until the cluster has reconfigured its voting configuration to +// exclude the specified nodes. +// +// Clusters should have no voting configuration exclusions in normal operation. +// Once the excluded nodes have stopped, clear the voting configuration +// exclusions with `DELETE /_cluster/voting_config_exclusions`. +// This API waits for the nodes to be fully removed from the cluster before it +// returns. +// If your cluster has voting configuration exclusions for nodes that you no +// longer intend to remove, use `DELETE +// /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the +// voting configuration exclusions without waiting for the nodes to leave the +// cluster. +// +// A response to `POST /_cluster/voting_config_exclusions` with an HTTP status +// code of 200 OK guarantees that the node has been removed from the voting +// configuration and will not be reinstated until the voting configuration +// exclusions are cleared by calling `DELETE +// /_cluster/voting_config_exclusions`. +// If the call to `POST /_cluster/voting_config_exclusions` fails or returns a +// response with an HTTP status code other than 200 OK then the node may not +// have been removed from the voting configuration. +// In that case, you may safely retry the call. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/voting-config-exclusions.html +// NOTE: Voting exclusions are required only when you remove at least half of +// the master-eligible nodes from a cluster in a short time period. +// They are not required when removing master-ineligible nodes or when removing +// fewer than half of the master-eligible nodes. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions func New(tp elastictransport.Interface) *PostVotingConfigExclusions { r := &PostVotingConfigExclusions{ transport: tp, @@ -244,6 +320,14 @@ func (r *PostVotingConfigExclusions) NodeIds(ids ...string) *PostVotingConfigExc return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *PostVotingConfigExclusions) MasterTimeout(duration string) *PostVotingConfigExclusions { + r.values.Set("master_timeout", duration) + + return r +} + // Timeout When adding a voting configuration exclusion, the API waits for the // specified nodes to be excluded from the voting configuration before // returning. If the timeout expires before the appropriate condition diff --git a/typedapi/cluster/putcomponenttemplate/put_component_template.go b/typedapi/cluster/putcomponenttemplate/put_component_template.go index 6031329c0f..45561c1385 100644 --- a/typedapi/cluster/putcomponenttemplate/put_component_template.go +++ b/typedapi/cluster/putcomponenttemplate/put_component_template.go @@ -16,10 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create or update a component template. -// Creates or updates a component template. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. // @@ -42,6 +41,12 @@ // You can use C-style `/* *\/` block comments in component templates. // You can include comments anywhere in the request body except before the // opening curly bracket. +// +// **Applying component templates** +// +// You cannot directly apply a component template to a data stream or index. +// To be applied, a component template must be included in an index template's +// `composed_of` list. package putcomponenttemplate import ( @@ -105,7 +110,6 @@ func NewPutComponentTemplateFunc(tp elastictransport.Interface) NewPutComponentT } // Create or update a component template. -// Creates or updates a component template. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. // @@ -129,7 +133,13 @@ func NewPutComponentTemplateFunc(tp elastictransport.Interface) NewPutComponentT // You can include comments anywhere in the request body except before the // opening curly bracket. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html +// **Applying component templates** +// +// You cannot directly apply a component template to a data stream or index. +// To be applied, a component template must be included in an index template's +// `composed_of` list. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template func New(tp elastictransport.Interface) *PutComponentTemplate { r := &PutComponentTemplate{ transport: tp, @@ -137,8 +147,6 @@ func New(tp elastictransport.Interface) *PutComponentTemplate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -432,45 +440,64 @@ func (r *PutComponentTemplate) Pretty(pretty bool) *PutComponentTemplate { return r } -// Deprecated Marks this index template as deprecated. When creating or updating a +// Marks this index template as deprecated. When creating or updating a // non-deprecated index template // that uses deprecated components, Elasticsearch will emit a deprecation // warning. // API name: deprecated func (r *PutComponentTemplate) Deprecated(deprecated bool) *PutComponentTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Deprecated = &deprecated return r } -// Meta_ Optional user metadata about the component template. -// May have any contents. This map is not automatically generated by +// Optional user metadata about the component template. +// It may have any contents. This map is not automatically generated by // Elasticsearch. // This information is stored in the cluster state, so keeping it short is // preferable. // To unset `_meta`, replace the template without specifying this information. // API name: _meta -func (r *PutComponentTemplate) Meta_(metadata types.Metadata) *PutComponentTemplate { - r.req.Meta_ = metadata +func (r *PutComponentTemplate) Meta_(metadata types.MetadataVariant) *PutComponentTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() return r } -// Template The template to be applied which includes mappings, settings, or aliases +// The template to be applied which includes mappings, settings, or aliases // configuration. // API name: template -func (r *PutComponentTemplate) Template(template *types.IndexState) *PutComponentTemplate { +func (r *PutComponentTemplate) Template(template types.IndexStateVariant) *PutComponentTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Template = *template + r.req.Template = *template.IndexStateCaster() return r } -// Version Version number used to manage component templates externally. +// Version number used to manage component templates externally. // This number isn't automatically generated or incremented by Elasticsearch. // To unset a version, replace the template without specifying a version. // API name: version func (r *PutComponentTemplate) Version(versionnumber int64) *PutComponentTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &versionnumber return r diff --git a/typedapi/cluster/putcomponenttemplate/request.go b/typedapi/cluster/putcomponenttemplate/request.go index 0eb6fee6cd..e25383a6f9 100644 --- a/typedapi/cluster/putcomponenttemplate/request.go +++ b/typedapi/cluster/putcomponenttemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putcomponenttemplate @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putcomponenttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/put_component_template/ClusterPutComponentTemplateRequest.ts#L25-L95 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/put_component_template/ClusterPutComponentTemplateRequest.ts#L25-L105 type Request struct { // Deprecated Marks this index template as deprecated. When creating or updating a @@ -42,7 +42,7 @@ type Request struct { // warning. Deprecated *bool `json:"deprecated,omitempty"` // Meta_ Optional user metadata about the component template. - // May have any contents. This map is not automatically generated by + // It may have any contents. This map is not automatically generated by // Elasticsearch. // This information is stored in the cluster state, so keeping it short is // preferable. diff --git a/typedapi/cluster/putcomponenttemplate/response.go b/typedapi/cluster/putcomponenttemplate/response.go index faa34ac908..aac3e8709b 100644 --- a/typedapi/cluster/putcomponenttemplate/response.go +++ b/typedapi/cluster/putcomponenttemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putcomponenttemplate // Response holds the response body struct for the package putcomponenttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/put_component_template/ClusterPutComponentTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/put_component_template/ClusterPutComponentTemplateResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/cluster/putsettings/put_settings.go b/typedapi/cluster/putsettings/put_settings.go index 1ea97d035b..b4d8c9430a 100644 --- a/typedapi/cluster/putsettings/put_settings.go +++ b/typedapi/cluster/putsettings/put_settings.go @@ -16,9 +16,41 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Updates the cluster settings. +// Update the cluster settings. +// +// Configure and update dynamic settings on a running cluster. +// You can also configure dynamic settings locally on an unstarted or shut down +// node in `elasticsearch.yml`. +// +// Updates made with this API can be persistent, which apply across cluster +// restarts, or transient, which reset after a cluster restart. +// You can also reset transient or persistent settings by assigning them a null +// value. +// +// If you configure the same setting using multiple methods, Elasticsearch +// applies the settings in following order of precedence: 1) Transient setting; +// 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting +// value. +// For example, you can apply a transient setting to override a persistent +// setting or `elasticsearch.yml` setting. +// However, a change to an `elasticsearch.yml` setting will not override a +// defined transient or persistent setting. +// +// TIP: In Elastic Cloud, use the user settings feature to configure all cluster +// settings. This method automatically rejects unsafe settings that could break +// your cluster. +// If you run Elasticsearch on your own hardware, use this API to configure +// dynamic cluster settings. +// Only use `elasticsearch.yml` for static cluster settings and node settings. +// The API doesn’t require a restart and ensures a setting’s value is the same +// on all nodes. +// +// WARNING: Transient cluster settings are no longer recommended. Use persistent +// cluster settings instead. +// If a cluster becomes unstable, transient settings can clear unexpectedly, +// resulting in a potentially undesired cluster configuration. package putsettings import ( @@ -73,9 +105,41 @@ func NewPutSettingsFunc(tp elastictransport.Interface) NewPutSettings { } } -// Updates the cluster settings. +// Update the cluster settings. +// +// Configure and update dynamic settings on a running cluster. +// You can also configure dynamic settings locally on an unstarted or shut down +// node in `elasticsearch.yml`. +// +// Updates made with this API can be persistent, which apply across cluster +// restarts, or transient, which reset after a cluster restart. +// You can also reset transient or persistent settings by assigning them a null +// value. +// +// If you configure the same setting using multiple methods, Elasticsearch +// applies the settings in following order of precedence: 1) Transient setting; +// 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting +// value. +// For example, you can apply a transient setting to override a persistent +// setting or `elasticsearch.yml` setting. +// However, a change to an `elasticsearch.yml` setting will not override a +// defined transient or persistent setting. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html +// TIP: In Elastic Cloud, use the user settings feature to configure all cluster +// settings. This method automatically rejects unsafe settings that could break +// your cluster. +// If you run Elasticsearch on your own hardware, use this API to configure +// dynamic cluster settings. +// Only use `elasticsearch.yml` for static cluster settings and node settings. +// The API doesn’t require a restart and ensures a setting’s value is the same +// on all nodes. +// +// WARNING: Transient cluster settings are no longer recommended. Use persistent +// cluster settings instead. +// If a cluster becomes unstable, transient settings can clear unexpectedly, +// resulting in a potentially undesired cluster configuration. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings func New(tp elastictransport.Interface) *PutSettings { r := &PutSettings{ transport: tp, @@ -83,8 +147,6 @@ func New(tp elastictransport.Interface) *PutSettings { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -362,16 +424,58 @@ func (r *PutSettings) Pretty(pretty bool) *PutSettings { // API name: persistent func (r *PutSettings) Persistent(persistent map[string]json.RawMessage) *PutSettings { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Persistent = persistent + return r +} +func (r *PutSettings) AddPersistent(key string, value json.RawMessage) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Persistent == nil { + r.req.Persistent = make(map[string]json.RawMessage) + } else { + tmp = r.req.Persistent + } + + tmp[key] = value + + r.req.Persistent = tmp return r } // API name: transient func (r *PutSettings) Transient(transient map[string]json.RawMessage) *PutSettings { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Transient = transient + return r +} + +func (r *PutSettings) AddTransient(key string, value json.RawMessage) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Transient == nil { + r.req.Transient = make(map[string]json.RawMessage) + } else { + tmp = r.req.Transient + } + + tmp[key] = value + r.req.Transient = tmp return r } diff --git a/typedapi/cluster/putsettings/request.go b/typedapi/cluster/putsettings/request.go index 3b69dc8f3d..e7c9c21bf7 100644 --- a/typedapi/cluster/putsettings/request.go +++ b/typedapi/cluster/putsettings/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putsettings @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package putsettings // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/put_settings/ClusterPutSettingsRequest.ts#L25-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/put_settings/ClusterPutSettingsRequest.ts#L25-L68 type Request struct { Persistent map[string]json.RawMessage `json:"persistent,omitempty"` Transient map[string]json.RawMessage `json:"transient,omitempty"` diff --git a/typedapi/cluster/putsettings/response.go b/typedapi/cluster/putsettings/response.go index c0736a574d..a31c35558a 100644 --- a/typedapi/cluster/putsettings/response.go +++ b/typedapi/cluster/putsettings/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putsettings @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putsettings // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/put_settings/ClusterPutSettingsResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/put_settings/ClusterPutSettingsResponse.ts#L23-L29 type Response struct { Acknowledged bool `json:"acknowledged"` Persistent map[string]json.RawMessage `json:"persistent"` diff --git a/typedapi/cluster/remoteinfo/remote_info.go b/typedapi/cluster/remoteinfo/remote_info.go index b6b2b4dde1..857f8bccfc 100644 --- a/typedapi/cluster/remoteinfo/remote_info.go +++ b/typedapi/cluster/remoteinfo/remote_info.go @@ -16,11 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// The cluster remote info API allows you to retrieve all of the configured -// remote cluster information. It returns connection and endpoint information -// keyed by the configured remote cluster alias. +// Get remote cluster information. +// +// Get information about configured remote clusters. +// The API returns connection and endpoint information keyed by the configured +// remote cluster alias. +// +// > info +// > This API returns information that reflects current state on the local +// cluster. +// > The `connected` field does not necessarily reflect whether a remote cluster +// is down or unavailable, only whether there is currently an open connection to +// it. +// > Elasticsearch does not spontaneously try to reconnect to a disconnected +// remote cluster. +// > To trigger a reconnection, attempt a cross-cluster search, ES|QL +// cross-cluster search, or try the [resolve cluster +// endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). package remoteinfo import ( @@ -70,11 +84,25 @@ func NewRemoteInfoFunc(tp elastictransport.Interface) NewRemoteInfo { } } -// The cluster remote info API allows you to retrieve all of the configured -// remote cluster information. It returns connection and endpoint information -// keyed by the configured remote cluster alias. +// Get remote cluster information. +// +// Get information about configured remote clusters. +// The API returns connection and endpoint information keyed by the configured +// remote cluster alias. +// +// > info +// > This API returns information that reflects current state on the local +// cluster. +// > The `connected` field does not necessarily reflect whether a remote cluster +// is down or unavailable, only whether there is currently an open connection to +// it. +// > Elasticsearch does not spontaneously try to reconnect to a disconnected +// remote cluster. +// > To trigger a reconnection, attempt a cross-cluster search, ES|QL +// cross-cluster search, or try the [resolve cluster +// endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-remote-info.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info func New(tp elastictransport.Interface) *RemoteInfo { r := &RemoteInfo{ transport: tp, diff --git a/typedapi/cluster/remoteinfo/response.go b/typedapi/cluster/remoteinfo/response.go index 9192d14004..82458e189e 100644 --- a/typedapi/cluster/remoteinfo/response.go +++ b/typedapi/cluster/remoteinfo/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package remoteinfo @@ -32,7 +32,7 @@ import ( // Response holds the response body struct for the package remoteinfo // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L24-L27 type Response map[string]types.ClusterRemoteInfo diff --git a/typedapi/cluster/reroute/request.go b/typedapi/cluster/reroute/request.go index 574b9d4c79..a52d7048af 100644 --- a/typedapi/cluster/reroute/request.go +++ b/typedapi/cluster/reroute/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package reroute @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package reroute // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/reroute/ClusterRerouteRequest.ts#L25-L70 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/reroute/ClusterRerouteRequest.ts#L25-L91 type Request struct { // Commands Defines the commands to perform. diff --git a/typedapi/cluster/reroute/reroute.go b/typedapi/cluster/reroute/reroute.go index 86e0b22d86..1d030c2131 100644 --- a/typedapi/cluster/reroute/reroute.go +++ b/typedapi/cluster/reroute/reroute.go @@ -16,9 +16,37 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Allows to manually change the allocation of individual shards in the cluster. +// Reroute the cluster. +// Manually change the allocation of individual shards in the cluster. +// For example, a shard can be moved from one node to another explicitly, an +// allocation can be canceled, and an unassigned shard can be explicitly +// allocated to a specific node. +// +// It is important to note that after processing any reroute commands +// Elasticsearch will perform rebalancing as normal (respecting the values of +// settings such as `cluster.routing.rebalance.enable`) in order to remain in a +// balanced state. +// For example, if the requested allocation includes moving a shard from node1 +// to node2 then this may cause a shard to be moved from node2 back to node1 to +// even things out. +// +// The cluster can be set to disable allocations using the +// `cluster.routing.allocation.enable` setting. +// If allocations are disabled then the only allocations that will be performed +// are explicit ones given using the reroute command, and consequent allocations +// due to rebalancing. +// +// The cluster will attempt to allocate a shard a maximum of +// `index.allocation.max_retries` times in a row (defaults to `5`), before +// giving up and leaving the shard unallocated. +// This scenario can be caused by structural problems such as having an analyzer +// which refers to a stopwords file which doesn’t exist on all nodes. +// +// Once the problem has been corrected, allocation can be manually retried by +// calling the reroute API with the `?retry_failed` URI query parameter, which +// will attempt a single retry round for these shards. package reroute import ( @@ -73,9 +101,37 @@ func NewRerouteFunc(tp elastictransport.Interface) NewReroute { } } -// Allows to manually change the allocation of individual shards in the cluster. +// Reroute the cluster. +// Manually change the allocation of individual shards in the cluster. +// For example, a shard can be moved from one node to another explicitly, an +// allocation can be canceled, and an unassigned shard can be explicitly +// allocated to a specific node. +// +// It is important to note that after processing any reroute commands +// Elasticsearch will perform rebalancing as normal (respecting the values of +// settings such as `cluster.routing.rebalance.enable`) in order to remain in a +// balanced state. +// For example, if the requested allocation includes moving a shard from node1 +// to node2 then this may cause a shard to be moved from node2 back to node1 to +// even things out. +// +// The cluster can be set to disable allocations using the +// `cluster.routing.allocation.enable` setting. +// If allocations are disabled then the only allocations that will be performed +// are explicit ones given using the reroute command, and consequent allocations +// due to rebalancing. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-reroute.html +// The cluster will attempt to allocate a shard a maximum of +// `index.allocation.max_retries` times in a row (defaults to `5`), before +// giving up and leaving the shard unallocated. +// This scenario can be caused by structural problems such as having an analyzer +// which refers to a stopwords file which doesn’t exist on all nodes. +// +// Once the problem has been corrected, allocation can be manually retried by +// calling the reroute API with the `?retry_failed` URI query parameter, which +// will attempt a single retry round for these shards. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute func New(tp elastictransport.Interface) *Reroute { r := &Reroute{ transport: tp, @@ -83,8 +139,6 @@ func New(tp elastictransport.Interface) *Reroute { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -292,8 +346,11 @@ func (r *Reroute) Header(key, value string) *Reroute { return r } -// DryRun If true, then the request simulates the operation only and returns the -// resulting state. +// DryRun If true, then the request simulates the operation. +// It will calculate the result of applying the commands to the current cluster +// state and return the resulting cluster state after the commands (and +// rebalancing) have been applied; it will not actually perform the requested +// changes. // API name: dry_run func (r *Reroute) DryRun(dryrun bool) *Reroute { r.values.Set("dry_run", strconv.FormatBool(dryrun)) @@ -302,7 +359,7 @@ func (r *Reroute) DryRun(dryrun bool) *Reroute { } // Explain If true, then the response contains an explanation of why the commands can or -// cannot be executed. +// cannot run. // API name: explain func (r *Reroute) Explain(explain bool) *Reroute { r.values.Set("explain", strconv.FormatBool(explain)) @@ -389,10 +446,17 @@ func (r *Reroute) Pretty(pretty bool) *Reroute { return r } -// Commands Defines the commands to perform. +// Defines the commands to perform. // API name: commands -func (r *Reroute) Commands(commands ...types.Command) *Reroute { - r.req.Commands = commands +func (r *Reroute) Commands(commands ...types.CommandVariant) *Reroute { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range commands { + + r.req.Commands = append(r.req.Commands, *v.CommandCaster()) + } return r } diff --git a/typedapi/cluster/reroute/response.go b/typedapi/cluster/reroute/response.go index 07da1dc0e6..fa6a97a9c4 100644 --- a/typedapi/cluster/reroute/response.go +++ b/typedapi/cluster/reroute/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package reroute @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package reroute // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/reroute/ClusterRerouteResponse.ts#L23-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/reroute/ClusterRerouteResponse.ts#L23-L34 type Response struct { Acknowledged bool `json:"acknowledged"` Explanations []types.RerouteExplanation `json:"explanations,omitempty"` diff --git a/typedapi/cluster/state/response.go b/typedapi/cluster/state/response.go index 764e446067..a30c49014c 100644 --- a/typedapi/cluster/state/response.go +++ b/typedapi/cluster/state/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package state @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package state // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/state/ClusterStateResponse.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/state/ClusterStateResponse.ts#L22-L29 type Response = json.RawMessage diff --git a/typedapi/cluster/state/state.go b/typedapi/cluster/state/state.go index 0c484d2cd3..252661bc55 100644 --- a/typedapi/cluster/state/state.go +++ b/typedapi/cluster/state/state.go @@ -16,9 +16,40 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns a comprehensive information about the state of the cluster. +// Get the cluster state. +// Get comprehensive information about the state of the cluster. +// +// The cluster state is an internal data structure which keeps track of a +// variety of information needed by every node, including the identity and +// attributes of the other nodes in the cluster; cluster-wide settings; index +// metadata, including the mapping and settings for each index; the location and +// status of every shard copy in the cluster. +// +// The elected master node ensures that every node in the cluster has a copy of +// the same cluster state. +// This API lets you retrieve a representation of this internal state for +// debugging or diagnostic purposes. +// You may need to consult the Elasticsearch source code to determine the +// precise meaning of the response. +// +// By default the API will route requests to the elected master node since this +// node is the authoritative source of cluster states. +// You can also retrieve the cluster state held on the node handling the API +// request by adding the `?local=true` query parameter. +// +// Elasticsearch may need to expend significant effort to compute a response to +// this API in larger clusters, and the response may comprise a very large +// quantity of data. +// If you use this API repeatedly, your cluster may become unstable. +// +// WARNING: The response is a representation of an internal data structure. +// Its format is not subject to the same compatibility guarantees as other more +// stable APIs and may change from version to version. +// Do not query this API using external monitoring tools. +// Instead, obtain the information you require using other more stable cluster +// APIs. package state import ( @@ -78,9 +109,40 @@ func NewStateFunc(tp elastictransport.Interface) NewState { } } -// Returns a comprehensive information about the state of the cluster. +// Get the cluster state. +// Get comprehensive information about the state of the cluster. +// +// The cluster state is an internal data structure which keeps track of a +// variety of information needed by every node, including the identity and +// attributes of the other nodes in the cluster; cluster-wide settings; index +// metadata, including the mapping and settings for each index; the location and +// status of every shard copy in the cluster. +// +// The elected master node ensures that every node in the cluster has a copy of +// the same cluster state. +// This API lets you retrieve a representation of this internal state for +// debugging or diagnostic purposes. +// You may need to consult the Elasticsearch source code to determine the +// precise meaning of the response. +// +// By default the API will route requests to the elected master node since this +// node is the authoritative source of cluster states. +// You can also retrieve the cluster state held on the node handling the API +// request by adding the `?local=true` query parameter. +// +// Elasticsearch may need to expend significant effort to compute a response to +// this API in larger clusters, and the response may comprise a very large +// quantity of data. +// If you use this API repeatedly, your cluster may become unstable. +// +// WARNING: The response is a representation of an internal data structure. +// Its format is not subject to the same compatibility guarantees as other more +// stable APIs and may change from version to version. +// Do not query this API using external monitoring tools. +// Instead, obtain the information you require using other more stable cluster +// APIs. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state func New(tp elastictransport.Interface) *State { r := &State{ transport: tp, diff --git a/typedapi/cluster/stats/response.go b/typedapi/cluster/stats/response.go index 3d7cc45307..9bdc5631ef 100644 --- a/typedapi/cluster/stats/response.go +++ b/typedapi/cluster/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package stats @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/ClusterStatsResponse.ts#L53-L55 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/ClusterStatsResponse.ts#L53-L55 type Response struct { // ClusterName Name of the cluster, based on the cluster name setting. diff --git a/typedapi/cluster/stats/stats.go b/typedapi/cluster/stats/stats.go index 3bdd371de0..ac60dd97c8 100644 --- a/typedapi/cluster/stats/stats.go +++ b/typedapi/cluster/stats/stats.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns cluster statistics. -// It returns basic index metrics (shard numbers, store size, memory usage) and +// Get cluster statistics. +// Get basic index metrics (shard numbers, store size, memory usage) and // information about the current nodes that form the cluster (number, roles, os, // jvm versions, memory usage, cpu and installed plugins). package stats @@ -77,12 +77,12 @@ func NewStatsFunc(tp elastictransport.Interface) NewStats { } } -// Returns cluster statistics. -// It returns basic index metrics (shard numbers, store size, memory usage) and +// Get cluster statistics. +// Get basic index metrics (shard numbers, store size, memory usage) and // information about the current nodes that form the cluster (number, roles, os, // jvm versions, memory usage, cpu and installed plugins). // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats func New(tp elastictransport.Interface) *Stats { r := &Stats{ transport: tp, diff --git a/typedapi/connector/checkin/check_in.go b/typedapi/connector/checkin/check_in.go index 08264db944..a2c7dc4b52 100644 --- a/typedapi/connector/checkin/check_in.go +++ b/typedapi/connector/checkin/check_in.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Check in a connector. // @@ -84,7 +84,7 @@ func NewCheckInFunc(tp elastictransport.Interface) NewCheckIn { // Update the `last_seen` field in the connector and set it to the current // timestamp. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/check-in-connector-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in func New(tp elastictransport.Interface) *CheckIn { r := &CheckIn{ transport: tp, diff --git a/typedapi/connector/checkin/response.go b/typedapi/connector/checkin/response.go index 1d2b42cc2c..3efb9ea1f3 100644 --- a/typedapi/connector/checkin/response.go +++ b/typedapi/connector/checkin/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package checkin @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package checkin // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/check_in/ConnectorCheckInResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/check_in/ConnectorCheckInResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/delete/delete.go b/typedapi/connector/delete/delete.go index 36e87a8b76..202b78fa59 100644 --- a/typedapi/connector/delete/delete.go +++ b/typedapi/connector/delete/delete.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete a connector. // @@ -90,7 +90,7 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { // indices associated with the connector. // These need to be removed manually. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-connector-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete func New(tp elastictransport.Interface) *Delete { r := &Delete{ transport: tp, @@ -318,6 +318,14 @@ func (r *Delete) DeleteSyncJobs(deletesyncjobs bool) *Delete { return r } +// Hard A flag indicating if the connector should be hard deleted. +// API name: hard +func (r *Delete) Hard(hard bool) *Delete { + r.values.Set("hard", strconv.FormatBool(hard)) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/connector/delete/response.go b/typedapi/connector/delete/response.go index a5f5e09725..bb7477482d 100644 --- a/typedapi/connector/delete/response.go +++ b/typedapi/connector/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/delete/ConnectorDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/delete/ConnectorDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/connector/get/get.go b/typedapi/connector/get/get.go index b236fdbd3f..0ba04f2e1b 100644 --- a/typedapi/connector/get/get.go +++ b/typedapi/connector/get/get.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get a connector. // @@ -82,7 +82,7 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { // // Get the details about a connector. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-connector-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get func New(tp elastictransport.Interface) *Get { r := &Get{ transport: tp, @@ -301,6 +301,15 @@ func (r *Get) _connectorid(connectorid string) *Get { return r } +// IncludeDeleted A flag to indicate if the desired connector should be fetched, even if it was +// soft-deleted. +// API name: include_deleted +func (r *Get) IncludeDeleted(includedeleted bool) *Get { + r.values.Set("include_deleted", strconv.FormatBool(includedeleted)) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/connector/get/response.go b/typedapi/connector/get/response.go index 8965b0e8a3..98c6b19e1a 100644 --- a/typedapi/connector/get/response.go +++ b/typedapi/connector/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package get @@ -35,12 +35,13 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/get/ConnectorGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/get/ConnectorGetResponse.ts#L22-L24 type Response struct { ApiKeyId *string `json:"api_key_id,omitempty"` ApiKeySecretId *string `json:"api_key_secret_id,omitempty"` Configuration types.ConnectorConfiguration `json:"configuration"` CustomScheduling types.ConnectorCustomScheduling `json:"custom_scheduling"` + Deleted bool `json:"deleted"` Description *string `json:"description,omitempty"` Error *string `json:"error,omitempty"` Features *types.ConnectorFeatures `json:"features,omitempty"` @@ -123,6 +124,20 @@ func (s *Response) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "CustomScheduling", err) } + case "deleted": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deleted", err) + } + s.Deleted = value + case bool: + s.Deleted = v + } + case "description": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { diff --git a/typedapi/connector/lastsync/last_sync.go b/typedapi/connector/lastsync/last_sync.go index 68331b2daa..6cd55ea776 100644 --- a/typedapi/connector/lastsync/last_sync.go +++ b/typedapi/connector/lastsync/last_sync.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update the connector last sync stats. // @@ -90,7 +90,7 @@ func NewLastSyncFunc(tp elastictransport.Interface) NewLastSync { // Update the fields related to the last sync of a connector. // This action is used for analytics and monitoring. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-last-sync-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-last-sync func New(tp elastictransport.Interface) *LastSync { r := &LastSync{ transport: tp, @@ -98,8 +98,6 @@ func New(tp elastictransport.Interface) *LastSync { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -368,6 +366,10 @@ func (r *LastSync) Pretty(pretty bool) *LastSync { // API name: last_access_control_sync_error func (r *LastSync) LastAccessControlSyncError(lastaccesscontrolsyncerror string) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.LastAccessControlSyncError = &lastaccesscontrolsyncerror @@ -375,21 +377,33 @@ func (r *LastSync) LastAccessControlSyncError(lastaccesscontrolsyncerror string) } // API name: last_access_control_sync_scheduled_at -func (r *LastSync) LastAccessControlSyncScheduledAt(datetime types.DateTime) *LastSync { - r.req.LastAccessControlSyncScheduledAt = datetime +func (r *LastSync) LastAccessControlSyncScheduledAt(datetime types.DateTimeVariant) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastAccessControlSyncScheduledAt = *datetime.DateTimeCaster() return r } // API name: last_access_control_sync_status func (r *LastSync) LastAccessControlSyncStatus(lastaccesscontrolsyncstatus syncstatus.SyncStatus) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.LastAccessControlSyncStatus = &lastaccesscontrolsyncstatus - return r } // API name: last_deleted_document_count func (r *LastSync) LastDeletedDocumentCount(lastdeleteddocumentcount int64) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.LastDeletedDocumentCount = &lastdeleteddocumentcount @@ -397,14 +411,23 @@ func (r *LastSync) LastDeletedDocumentCount(lastdeleteddocumentcount int64) *Las } // API name: last_incremental_sync_scheduled_at -func (r *LastSync) LastIncrementalSyncScheduledAt(datetime types.DateTime) *LastSync { - r.req.LastIncrementalSyncScheduledAt = datetime +func (r *LastSync) LastIncrementalSyncScheduledAt(datetime types.DateTimeVariant) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastIncrementalSyncScheduledAt = *datetime.DateTimeCaster() return r } // API name: last_indexed_document_count func (r *LastSync) LastIndexedDocumentCount(lastindexeddocumentcount int64) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.LastIndexedDocumentCount = &lastindexeddocumentcount @@ -412,14 +435,23 @@ func (r *LastSync) LastIndexedDocumentCount(lastindexeddocumentcount int64) *Las } // API name: last_seen -func (r *LastSync) LastSeen(datetime types.DateTime) *LastSync { - r.req.LastSeen = datetime +func (r *LastSync) LastSeen(datetime types.DateTimeVariant) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastSeen = *datetime.DateTimeCaster() return r } // API name: last_sync_error func (r *LastSync) LastSyncError(lastsyncerror string) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.LastSyncError = &lastsyncerror @@ -427,32 +459,45 @@ func (r *LastSync) LastSyncError(lastsyncerror string) *LastSync { } // API name: last_sync_scheduled_at -func (r *LastSync) LastSyncScheduledAt(datetime types.DateTime) *LastSync { - r.req.LastSyncScheduledAt = datetime +func (r *LastSync) LastSyncScheduledAt(datetime types.DateTimeVariant) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastSyncScheduledAt = *datetime.DateTimeCaster() return r } // API name: last_sync_status func (r *LastSync) LastSyncStatus(lastsyncstatus syncstatus.SyncStatus) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.LastSyncStatus = &lastsyncstatus - return r } // API name: last_synced -func (r *LastSync) LastSynced(datetime types.DateTime) *LastSync { - r.req.LastSynced = datetime +func (r *LastSync) LastSynced(datetime types.DateTimeVariant) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastSynced = *datetime.DateTimeCaster() return r } // API name: sync_cursor -// -// synccursor should be a json.RawMessage or a structure -// if a structure is provided, the client will defer a json serialization -// prior to sending the payload to Elasticsearch. func (r *LastSync) SyncCursor(synccursor any) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } switch casted := synccursor.(type) { case json.RawMessage: r.req.SyncCursor = casted @@ -466,6 +511,5 @@ func (r *LastSync) SyncCursor(synccursor any) *LastSync { return nil }) } - return r } diff --git a/typedapi/connector/lastsync/request.go b/typedapi/connector/lastsync/request.go index 9394425a3c..06e945be5b 100644 --- a/typedapi/connector/lastsync/request.go +++ b/typedapi/connector/lastsync/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package lastsync @@ -34,7 +34,7 @@ import ( // Request holds the request body struct for the package lastsync // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/last_sync/ConnectorUpdateLastSyncRequest.ts#L26-L60 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/last_sync/ConnectorUpdateLastSyncRequest.ts#L26-L66 type Request struct { LastAccessControlSyncError *string `json:"last_access_control_sync_error,omitempty"` LastAccessControlSyncScheduledAt types.DateTime `json:"last_access_control_sync_scheduled_at,omitempty"` diff --git a/typedapi/connector/lastsync/response.go b/typedapi/connector/lastsync/response.go index f715270253..ccd75b9d5f 100644 --- a/typedapi/connector/lastsync/response.go +++ b/typedapi/connector/lastsync/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package lastsync @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package lastsync // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/last_sync/ConnectorUpdateLastSyncResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/last_sync/ConnectorUpdateLastSyncResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/list/list.go b/typedapi/connector/list/list.go index d7601d137a..7d0f4f337b 100644 --- a/typedapi/connector/list/list.go +++ b/typedapi/connector/list/list.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get all connectors. // @@ -74,7 +74,7 @@ func NewListFunc(tp elastictransport.Interface) NewList { // // Get information about all connectors. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/list-connector-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list func New(tp elastictransport.Interface) *List { r := &List{ transport: tp, @@ -320,6 +320,15 @@ func (r *List) ServiceType(names ...string) *List { return r } +// IncludeDeleted A flag to indicate if the desired connector should be fetched, even if it was +// soft-deleted. +// API name: include_deleted +func (r *List) IncludeDeleted(includedeleted bool) *List { + r.values.Set("include_deleted", strconv.FormatBool(includedeleted)) + + return r +} + // Query A wildcard query string that filters connectors with matching name, // description or index name // API name: query diff --git a/typedapi/connector/list/response.go b/typedapi/connector/list/response.go index a6e9e6fef4..bcd9f5c64a 100644 --- a/typedapi/connector/list/response.go +++ b/typedapi/connector/list/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package list @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package list // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/list/ConnectorListResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/list/ConnectorListResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Results []types.Connector `json:"results"` diff --git a/typedapi/connector/post/post.go b/typedapi/connector/post/post.go index 0a1d018531..ed5505ee5a 100644 --- a/typedapi/connector/post/post.go +++ b/typedapi/connector/post/post.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create a connector. // @@ -91,7 +91,7 @@ func NewPostFunc(tp elastictransport.Interface) NewPost { // Self-managed connectors (Connector clients) are self-managed on your // infrastructure. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/create-connector-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put func New(tp elastictransport.Interface) *Post { r := &Post{ transport: tp, @@ -99,8 +99,6 @@ func New(tp elastictransport.Interface) *Post { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -352,6 +350,10 @@ func (r *Post) Pretty(pretty bool) *Post { // API name: description func (r *Post) Description(description string) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description @@ -360,6 +362,11 @@ func (r *Post) Description(description string) *Post { // API name: index_name func (r *Post) IndexName(indexname string) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexName = &indexname return r @@ -367,6 +374,11 @@ func (r *Post) IndexName(indexname string) *Post { // API name: is_native func (r *Post) IsNative(isnative bool) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IsNative = &isnative return r @@ -374,6 +386,10 @@ func (r *Post) IsNative(isnative bool) *Post { // API name: language func (r *Post) Language(language string) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Language = &language @@ -382,6 +398,10 @@ func (r *Post) Language(language string) *Post { // API name: name func (r *Post) Name(name string) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Name = &name @@ -390,6 +410,10 @@ func (r *Post) Name(name string) *Post { // API name: service_type func (r *Post) ServiceType(servicetype string) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ServiceType = &servicetype diff --git a/typedapi/connector/post/request.go b/typedapi/connector/post/request.go index 151692b56b..63cd18482d 100644 --- a/typedapi/connector/post/request.go +++ b/typedapi/connector/post/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package post @@ -31,7 +31,7 @@ import ( // Request holds the request body struct for the package post // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/post/ConnectorPostRequest.ts#L22-L46 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/post/ConnectorPostRequest.ts#L22-L52 type Request struct { Description *string `json:"description,omitempty"` IndexName *string `json:"index_name,omitempty"` diff --git a/typedapi/connector/post/response.go b/typedapi/connector/post/response.go index e12249dd8d..c4ebddf753 100644 --- a/typedapi/connector/post/response.go +++ b/typedapi/connector/post/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package post @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package post // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/post/ConnectorPostResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/post/ConnectorPostResponse.ts#L23-L28 type Response struct { Id string `json:"id"` Result result.Result `json:"result"` diff --git a/typedapi/connector/put/put.go b/typedapi/connector/put/put.go index be611a0412..2ac6fc0939 100644 --- a/typedapi/connector/put/put.go +++ b/typedapi/connector/put/put.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create or update a connector. package put @@ -81,7 +81,7 @@ func NewPutFunc(tp elastictransport.Interface) NewPut { // Create or update a connector. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/create-connector-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put func New(tp elastictransport.Interface) *Put { r := &Put{ transport: tp, @@ -89,8 +89,6 @@ func New(tp elastictransport.Interface) *Put { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -363,6 +361,10 @@ func (r *Put) Pretty(pretty bool) *Put { // API name: description func (r *Put) Description(description string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description @@ -371,6 +373,11 @@ func (r *Put) Description(description string) *Put { // API name: index_name func (r *Put) IndexName(indexname string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexName = &indexname return r @@ -378,6 +385,11 @@ func (r *Put) IndexName(indexname string) *Put { // API name: is_native func (r *Put) IsNative(isnative bool) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IsNative = &isnative return r @@ -385,6 +397,10 @@ func (r *Put) IsNative(isnative bool) *Put { // API name: language func (r *Put) Language(language string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Language = &language @@ -393,6 +409,10 @@ func (r *Put) Language(language string) *Put { // API name: name func (r *Put) Name(name string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Name = &name @@ -401,6 +421,10 @@ func (r *Put) Name(name string) *Put { // API name: service_type func (r *Put) ServiceType(servicetype string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ServiceType = &servicetype diff --git a/typedapi/connector/put/request.go b/typedapi/connector/put/request.go index 5f740b5cf5..0dfd11d714 100644 --- a/typedapi/connector/put/request.go +++ b/typedapi/connector/put/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package put @@ -31,7 +31,7 @@ import ( // Request holds the request body struct for the package put // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/put/ConnectorPutRequest.ts#L22-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/put/ConnectorPutRequest.ts#L22-L58 type Request struct { Description *string `json:"description,omitempty"` IndexName *string `json:"index_name,omitempty"` diff --git a/typedapi/connector/put/response.go b/typedapi/connector/put/response.go index 73efd9ce80..d4551a9ada 100644 --- a/typedapi/connector/put/response.go +++ b/typedapi/connector/put/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package put @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package put // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/put/ConnectorPutResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/put/ConnectorPutResponse.ts#L23-L28 type Response struct { Id string `json:"id"` Result result.Result `json:"result"` diff --git a/typedapi/connector/secretpost/secret_post.go b/typedapi/connector/secretpost/secret_post.go index f4c7072530..21dc3b356b 100644 --- a/typedapi/connector/secretpost/secret_post.go +++ b/typedapi/connector/secretpost/secret_post.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Creates a secret for a Connector. package secretpost diff --git a/typedapi/connector/syncjobcancel/response.go b/typedapi/connector/syncjobcancel/response.go index 49d46a76ed..0cc5409989 100644 --- a/typedapi/connector/syncjobcancel/response.go +++ b/typedapi/connector/syncjobcancel/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package syncjobcancel @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package syncjobcancel // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/sync_job_cancel/SyncJobCancelResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/sync_job_cancel/SyncJobCancelResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/syncjobcancel/sync_job_cancel.go b/typedapi/connector/syncjobcancel/sync_job_cancel.go index f3fab5630c..a35b196b74 100644 --- a/typedapi/connector/syncjobcancel/sync_job_cancel.go +++ b/typedapi/connector/syncjobcancel/sync_job_cancel.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Cancel a connector sync job. // @@ -88,7 +88,7 @@ func NewSyncJobCancelFunc(tp elastictransport.Interface) NewSyncJobCancel { // The connector service is then responsible for setting the status of connector // sync jobs to cancelled. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cancel-connector-sync-job-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel func New(tp elastictransport.Interface) *SyncJobCancel { r := &SyncJobCancel{ transport: tp, diff --git a/typedapi/indices/unfreeze/response.go b/typedapi/connector/syncjobcheckin/response.go similarity index 68% rename from typedapi/indices/unfreeze/response.go rename to typedapi/connector/syncjobcheckin/response.go index c7927ce46f..e6dbf916de 100644 --- a/typedapi/indices/unfreeze/response.go +++ b/typedapi/connector/syncjobcheckin/response.go @@ -16,16 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -package unfreeze +package syncjobcheckin -// Response holds the response body struct for the package unfreeze +// Response holds the response body struct for the package syncjobcheckin // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/unfreeze/IndicesUnfreezeResponse.ts#L20-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/sync_job_check_in/SyncJobCheckInResponse.ts#L20-L22 type Response struct { - Acknowledged bool `json:"acknowledged"` - ShardsAcknowledged bool `json:"shards_acknowledged"` } // NewResponse returns a Response diff --git a/typedapi/connector/syncjobcheckin/sync_job_check_in.go b/typedapi/connector/syncjobcheckin/sync_job_check_in.go new file mode 100644 index 0000000000..1d7466951b --- /dev/null +++ b/typedapi/connector/syncjobcheckin/sync_job_check_in.go @@ -0,0 +1,360 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Check in a connector sync job. +// Check in a connector sync job and set the `last_seen` field to the current +// time before updating it in the internal index. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +package syncjobcheckin + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + connectorsyncjobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobCheckIn struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + connectorsyncjobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobCheckIn type alias for index. +type NewSyncJobCheckIn func(connectorsyncjobid string) *SyncJobCheckIn + +// NewSyncJobCheckInFunc returns a new instance of SyncJobCheckIn with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobCheckInFunc(tp elastictransport.Interface) NewSyncJobCheckIn { + return func(connectorsyncjobid string) *SyncJobCheckIn { + n := New(tp) + + n._connectorsyncjobid(connectorsyncjobid) + + return n + } +} + +// Check in a connector sync job. +// Check in a connector sync job and set the `last_seen` field to the current +// time before updating it in the internal index. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in +func New(tp elastictransport.Interface) *SyncJobCheckIn { + r := &SyncJobCheckIn{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobCheckIn) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectorsyncjobidMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorsyncjobid", r.connectorsyncjobid) + } + path.WriteString(r.connectorsyncjobid) + path.WriteString("/") + path.WriteString("_check_in") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobCheckIn) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_check_in") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_check_in") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_check_in", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_check_in") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobCheckIn query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjobcheckin.Response +func (r SyncJobCheckIn) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_check_in") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r SyncJobCheckIn) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_check_in") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the SyncJobCheckIn query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the SyncJobCheckIn headers map. +func (r *SyncJobCheckIn) Header(key, value string) *SyncJobCheckIn { + r.headers.Set(key, value) + + return r +} + +// ConnectorSyncJobId The unique identifier of the connector sync job to be checked in. +// API Name: connectorsyncjobid +func (r *SyncJobCheckIn) _connectorsyncjobid(connectorsyncjobid string) *SyncJobCheckIn { + r.paramSet |= connectorsyncjobidMask + r.connectorsyncjobid = connectorsyncjobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobCheckIn) ErrorTrace(errortrace bool) *SyncJobCheckIn { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobCheckIn) FilterPath(filterpaths ...string) *SyncJobCheckIn { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobCheckIn) Human(human bool) *SyncJobCheckIn { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobCheckIn) Pretty(pretty bool) *SyncJobCheckIn { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/connector/syncjobclaim/request.go b/typedapi/connector/syncjobclaim/request.go new file mode 100644 index 0000000000..0d8b083a15 --- /dev/null +++ b/typedapi/connector/syncjobclaim/request.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package syncjobclaim + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package syncjobclaim +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/sync_job_claim/SyncJobClaimRequest.ts#L23-L61 +type Request struct { + + // SyncCursor The cursor object from the last incremental sync job. + // This should reference the `sync_cursor` field in the connector state for + // which the job runs. + SyncCursor json.RawMessage `json:"sync_cursor,omitempty"` + // WorkerHostname The host name of the current system that will run the job. + WorkerHostname string `json:"worker_hostname"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Syncjobclaim request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/connector/syncjobclaim/response.go b/typedapi/connector/syncjobclaim/response.go new file mode 100644 index 0000000000..f33ffeadd9 --- /dev/null +++ b/typedapi/connector/syncjobclaim/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package syncjobclaim + +// Response holds the response body struct for the package syncjobclaim +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/sync_job_claim/SyncJobClaimResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/connector/syncjobclaim/sync_job_claim.go b/typedapi/connector/syncjobclaim/sync_job_claim.go new file mode 100644 index 0000000000..e33b091c47 --- /dev/null +++ b/typedapi/connector/syncjobclaim/sync_job_claim.go @@ -0,0 +1,422 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Claim a connector sync job. +// This action updates the job status to `in_progress` and sets the `last_seen` +// and `started_at` timestamps to the current time. +// Additionally, it can set the `sync_cursor` property for the sync job. +// +// This API is not intended for direct connector management by users. +// It supports the implementation of services that utilize the connector +// protocol to communicate with Elasticsearch. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +package syncjobclaim + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + connectorsyncjobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobClaim struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorsyncjobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobClaim type alias for index. +type NewSyncJobClaim func(connectorsyncjobid string) *SyncJobClaim + +// NewSyncJobClaimFunc returns a new instance of SyncJobClaim with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobClaimFunc(tp elastictransport.Interface) NewSyncJobClaim { + return func(connectorsyncjobid string) *SyncJobClaim { + n := New(tp) + + n._connectorsyncjobid(connectorsyncjobid) + + return n + } +} + +// Claim a connector sync job. +// This action updates the job status to `in_progress` and sets the `last_seen` +// and `started_at` timestamps to the current time. +// Additionally, it can set the `sync_cursor` property for the sync job. +// +// This API is not intended for direct connector management by users. +// It supports the implementation of services that utilize the connector +// protocol to communicate with Elasticsearch. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim +func New(tp elastictransport.Interface) *SyncJobClaim { + r := &SyncJobClaim{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SyncJobClaim) Raw(raw io.Reader) *SyncJobClaim { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SyncJobClaim) Request(req *Request) *SyncJobClaim { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobClaim) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SyncJobClaim: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectorsyncjobidMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorsyncjobid", r.connectorsyncjobid) + } + path.WriteString(r.connectorsyncjobid) + path.WriteString("/") + path.WriteString("_claim") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobClaim) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_claim") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_claim") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_claim", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_claim") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobClaim query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjobclaim.Response +func (r SyncJobClaim) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_claim") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SyncJobClaim headers map. +func (r *SyncJobClaim) Header(key, value string) *SyncJobClaim { + r.headers.Set(key, value) + + return r +} + +// ConnectorSyncJobId The unique identifier of the connector sync job. +// API Name: connectorsyncjobid +func (r *SyncJobClaim) _connectorsyncjobid(connectorsyncjobid string) *SyncJobClaim { + r.paramSet |= connectorsyncjobidMask + r.connectorsyncjobid = connectorsyncjobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobClaim) ErrorTrace(errortrace bool) *SyncJobClaim { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobClaim) FilterPath(filterpaths ...string) *SyncJobClaim { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobClaim) Human(human bool) *SyncJobClaim { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobClaim) Pretty(pretty bool) *SyncJobClaim { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The cursor object from the last incremental sync job. +// This should reference the `sync_cursor` field in the connector state for +// which the job runs. +// API name: sync_cursor +func (r *SyncJobClaim) SyncCursor(synccursor any) *SyncJobClaim { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + switch casted := synccursor.(type) { + case json.RawMessage: + r.req.SyncCursor = casted + default: + r.deferred = append(r.deferred, func(request *Request) error { + data, err := json.Marshal(synccursor) + if err != nil { + return err + } + r.req.SyncCursor = data + return nil + }) + } + return r +} + +// The host name of the current system that will run the job. +// API name: worker_hostname +func (r *SyncJobClaim) WorkerHostname(workerhostname string) *SyncJobClaim { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.WorkerHostname = workerhostname + + return r +} diff --git a/typedapi/connector/syncjobdelete/response.go b/typedapi/connector/syncjobdelete/response.go index d0f86fd8c7..f09d8768cf 100644 --- a/typedapi/connector/syncjobdelete/response.go +++ b/typedapi/connector/syncjobdelete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package syncjobdelete // Response holds the response body struct for the package syncjobdelete // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/sync_job_delete/SyncJobDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/sync_job_delete/SyncJobDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/connector/syncjobdelete/sync_job_delete.go b/typedapi/connector/syncjobdelete/sync_job_delete.go index 6315c80546..00dc86f44d 100644 --- a/typedapi/connector/syncjobdelete/sync_job_delete.go +++ b/typedapi/connector/syncjobdelete/sync_job_delete.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete a connector sync job. // @@ -84,7 +84,7 @@ func NewSyncJobDeleteFunc(tp elastictransport.Interface) NewSyncJobDelete { // Remove a connector sync job and its associated data. // This is a destructive action that is not recoverable. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-connector-sync-job-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete func New(tp elastictransport.Interface) *SyncJobDelete { r := &SyncJobDelete{ transport: tp, diff --git a/typedapi/connector/syncjoberror/request.go b/typedapi/connector/syncjoberror/request.go new file mode 100644 index 0000000000..070cf3fc64 --- /dev/null +++ b/typedapi/connector/syncjoberror/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package syncjoberror + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package syncjoberror +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/sync_job_error/SyncJobErrorRequest.ts#L23-L52 +type Request struct { + + // Error The error for the connector sync job error field. + Error string `json:"error"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Syncjoberror request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/connector/syncjoberror/response.go b/typedapi/connector/syncjoberror/response.go new file mode 100644 index 0000000000..fdb373c20b --- /dev/null +++ b/typedapi/connector/syncjoberror/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package syncjoberror + +// Response holds the response body struct for the package syncjoberror +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/sync_job_error/SyncJobErrorResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/connector/syncjoberror/sync_job_error.go b/typedapi/connector/syncjoberror/sync_job_error.go new file mode 100644 index 0000000000..f8fd2d3e9e --- /dev/null +++ b/typedapi/connector/syncjoberror/sync_job_error.go @@ -0,0 +1,387 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Set a connector sync job error. +// Set the `error` field for a connector sync job and set its `status` to +// `error`. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +package syncjoberror + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + connectorsyncjobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobError struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorsyncjobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobError type alias for index. +type NewSyncJobError func(connectorsyncjobid string) *SyncJobError + +// NewSyncJobErrorFunc returns a new instance of SyncJobError with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobErrorFunc(tp elastictransport.Interface) NewSyncJobError { + return func(connectorsyncjobid string) *SyncJobError { + n := New(tp) + + n._connectorsyncjobid(connectorsyncjobid) + + return n + } +} + +// Set a connector sync job error. +// Set the `error` field for a connector sync job and set its `status` to +// `error`. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error +func New(tp elastictransport.Interface) *SyncJobError { + r := &SyncJobError{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SyncJobError) Raw(raw io.Reader) *SyncJobError { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SyncJobError) Request(req *Request) *SyncJobError { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobError) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SyncJobError: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectorsyncjobidMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorsyncjobid", r.connectorsyncjobid) + } + path.WriteString(r.connectorsyncjobid) + path.WriteString("/") + path.WriteString("_error") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobError) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_error") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_error") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_error", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_error") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobError query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjoberror.Response +func (r SyncJobError) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_error") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SyncJobError headers map. +func (r *SyncJobError) Header(key, value string) *SyncJobError { + r.headers.Set(key, value) + + return r +} + +// ConnectorSyncJobId The unique identifier for the connector sync job. +// API Name: connectorsyncjobid +func (r *SyncJobError) _connectorsyncjobid(connectorsyncjobid string) *SyncJobError { + r.paramSet |= connectorsyncjobidMask + r.connectorsyncjobid = connectorsyncjobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobError) ErrorTrace(errortrace bool) *SyncJobError { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobError) FilterPath(filterpaths ...string) *SyncJobError { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobError) Human(human bool) *SyncJobError { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobError) Pretty(pretty bool) *SyncJobError { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The error for the connector sync job error field. +// API name: error +func (r *SyncJobError) Error(error string) *SyncJobError { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Error = error + + return r +} diff --git a/typedapi/connector/syncjobget/response.go b/typedapi/connector/syncjobget/response.go index 5507f34ba6..29964dd7f7 100644 --- a/typedapi/connector/syncjobget/response.go +++ b/typedapi/connector/syncjobget/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package syncjobget @@ -36,7 +36,7 @@ import ( // Response holds the response body struct for the package syncjobget // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/sync_job_get/SyncJobGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/sync_job_get/SyncJobGetResponse.ts#L22-L24 type Response struct { CancelationRequestedAt types.DateTime `json:"cancelation_requested_at,omitempty"` CanceledAt types.DateTime `json:"canceled_at,omitempty"` diff --git a/typedapi/connector/syncjobget/sync_job_get.go b/typedapi/connector/syncjobget/sync_job_get.go index ddc1fcfe5e..7fc912e898 100644 --- a/typedapi/connector/syncjobget/sync_job_get.go +++ b/typedapi/connector/syncjobget/sync_job_get.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get a connector sync job. package syncjobget @@ -78,7 +78,7 @@ func NewSyncJobGetFunc(tp elastictransport.Interface) NewSyncJobGet { // Get a connector sync job. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-connector-sync-job-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get func New(tp elastictransport.Interface) *SyncJobGet { r := &SyncJobGet{ transport: tp, diff --git a/typedapi/connector/syncjoblist/response.go b/typedapi/connector/syncjoblist/response.go index b9ed59e636..8914eae6b2 100644 --- a/typedapi/connector/syncjoblist/response.go +++ b/typedapi/connector/syncjoblist/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package syncjoblist @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package syncjoblist // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/sync_job_list/SyncJobListResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/sync_job_list/SyncJobListResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Results []types.ConnectorSyncJob `json:"results"` diff --git a/typedapi/connector/syncjoblist/sync_job_list.go b/typedapi/connector/syncjoblist/sync_job_list.go index 80588ce484..bfbe869e9a 100644 --- a/typedapi/connector/syncjoblist/sync_job_list.go +++ b/typedapi/connector/syncjoblist/sync_job_list.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get all connector sync jobs. // @@ -78,7 +78,7 @@ func NewSyncJobListFunc(tp elastictransport.Interface) NewSyncJobList { // Get information about all stored connector sync jobs listed by their creation // date in ascending order. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/list-connector-sync-jobs-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list func New(tp elastictransport.Interface) *SyncJobList { r := &SyncJobList{ transport: tp, diff --git a/typedapi/connector/syncjobpost/request.go b/typedapi/connector/syncjobpost/request.go index c337177586..2d368f7de3 100644 --- a/typedapi/connector/syncjobpost/request.go +++ b/typedapi/connector/syncjobpost/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package syncjobpost @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package syncjobpost // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/sync_job_post/SyncJobPostRequest.ts#L23-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/sync_job_post/SyncJobPostRequest.ts#L23-L51 type Request struct { // Id The id of the associated connector diff --git a/typedapi/connector/syncjobpost/response.go b/typedapi/connector/syncjobpost/response.go index 2f724818e3..fed5c59c0e 100644 --- a/typedapi/connector/syncjobpost/response.go +++ b/typedapi/connector/syncjobpost/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package syncjobpost // Response holds the response body struct for the package syncjobpost // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/sync_job_post/SyncJobPostResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/sync_job_post/SyncJobPostResponse.ts#L22-L26 type Response struct { Id string `json:"id"` } diff --git a/typedapi/connector/syncjobpost/sync_job_post.go b/typedapi/connector/syncjobpost/sync_job_post.go index a5f366e105..7421d3d4bc 100644 --- a/typedapi/connector/syncjobpost/sync_job_post.go +++ b/typedapi/connector/syncjobpost/sync_job_post.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create a connector sync job. // @@ -83,7 +83,7 @@ func NewSyncJobPostFunc(tp elastictransport.Interface) NewSyncJobPost { // Create a connector sync job document in the internal index and initialize its // counters and timestamps with default values. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/create-connector-sync-job-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post func New(tp elastictransport.Interface) *SyncJobPost { r := &SyncJobPost{ transport: tp, @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *SyncJobPost { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -344,9 +342,14 @@ func (r *SyncJobPost) Pretty(pretty bool) *SyncJobPost { return r } -// Id The id of the associated connector +// The id of the associated connector // API name: id func (r *SyncJobPost) Id(id string) *SyncJobPost { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Id = id return r @@ -354,14 +357,20 @@ func (r *SyncJobPost) Id(id string) *SyncJobPost { // API name: job_type func (r *SyncJobPost) JobType(jobtype syncjobtype.SyncJobType) *SyncJobPost { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.JobType = &jobtype - return r } // API name: trigger_method func (r *SyncJobPost) TriggerMethod(triggermethod syncjobtriggermethod.SyncJobTriggerMethod) *SyncJobPost { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.TriggerMethod = &triggermethod - return r } diff --git a/typedapi/connector/syncjobupdatestats/request.go b/typedapi/connector/syncjobupdatestats/request.go new file mode 100644 index 0000000000..8aa3d1bccb --- /dev/null +++ b/typedapi/connector/syncjobupdatestats/request.go @@ -0,0 +1,161 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package syncjobupdatestats + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package syncjobupdatestats +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/sync_job_update_stats/SyncJobUpdateStatsRequest.ts#L24-L78 +type Request struct { + + // DeletedDocumentCount The number of documents the sync job deleted. + DeletedDocumentCount int64 `json:"deleted_document_count"` + // IndexedDocumentCount The number of documents the sync job indexed. + IndexedDocumentCount int64 `json:"indexed_document_count"` + // IndexedDocumentVolume The total size of the data (in MiB) the sync job indexed. + IndexedDocumentVolume int64 `json:"indexed_document_volume"` + // LastSeen The timestamp to use in the `last_seen` property for the connector sync job. + LastSeen types.Duration `json:"last_seen,omitempty"` + // Metadata The connector-specific metadata. + Metadata types.Metadata `json:"metadata,omitempty"` + // TotalDocumentCount The total number of documents in the target index after the sync job + // finished. + TotalDocumentCount *int `json:"total_document_count,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Syncjobupdatestats request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "deleted_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DeletedDocumentCount", err) + } + s.DeletedDocumentCount = value + case float64: + f := int64(v) + s.DeletedDocumentCount = f + } + + case "indexed_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexedDocumentCount", err) + } + s.IndexedDocumentCount = value + case float64: + f := int64(v) + s.IndexedDocumentCount = f + } + + case "indexed_document_volume": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexedDocumentVolume", err) + } + s.IndexedDocumentVolume = value + case float64: + f := int64(v) + s.IndexedDocumentVolume = f + } + + case "last_seen": + if err := dec.Decode(&s.LastSeen); err != nil { + return fmt.Errorf("%s | %w", "LastSeen", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "total_document_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalDocumentCount", err) + } + s.TotalDocumentCount = &value + case float64: + f := int(v) + s.TotalDocumentCount = &f + } + + } + } + return nil +} diff --git a/typedapi/connector/syncjobupdatestats/response.go b/typedapi/connector/syncjobupdatestats/response.go new file mode 100644 index 0000000000..a24b9ee7a8 --- /dev/null +++ b/typedapi/connector/syncjobupdatestats/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package syncjobupdatestats + +// Response holds the response body struct for the package syncjobupdatestats +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/sync_job_update_stats/SyncJobUpdateStatsResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/connector/syncjobupdatestats/sync_job_update_stats.go b/typedapi/connector/syncjobupdatestats/sync_job_update_stats.go new file mode 100644 index 0000000000..f26a072ca6 --- /dev/null +++ b/typedapi/connector/syncjobupdatestats/sync_job_update_stats.go @@ -0,0 +1,459 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Set the connector sync job stats. +// Stats include: `deleted_document_count`, `indexed_document_count`, +// `indexed_document_volume`, and `total_document_count`. +// You can also update `last_seen`. +// This API is mainly used by the connector service for updating sync job +// information. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +package syncjobupdatestats + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + connectorsyncjobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobUpdateStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorsyncjobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobUpdateStats type alias for index. +type NewSyncJobUpdateStats func(connectorsyncjobid string) *SyncJobUpdateStats + +// NewSyncJobUpdateStatsFunc returns a new instance of SyncJobUpdateStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobUpdateStatsFunc(tp elastictransport.Interface) NewSyncJobUpdateStats { + return func(connectorsyncjobid string) *SyncJobUpdateStats { + n := New(tp) + + n._connectorsyncjobid(connectorsyncjobid) + + return n + } +} + +// Set the connector sync job stats. +// Stats include: `deleted_document_count`, `indexed_document_count`, +// `indexed_document_volume`, and `total_document_count`. +// You can also update `last_seen`. +// This API is mainly used by the connector service for updating sync job +// information. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats +func New(tp elastictransport.Interface) *SyncJobUpdateStats { + r := &SyncJobUpdateStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SyncJobUpdateStats) Raw(raw io.Reader) *SyncJobUpdateStats { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SyncJobUpdateStats) Request(req *Request) *SyncJobUpdateStats { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobUpdateStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SyncJobUpdateStats: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectorsyncjobidMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorsyncjobid", r.connectorsyncjobid) + } + path.WriteString(r.connectorsyncjobid) + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobUpdateStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_update_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_update_stats") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_update_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_update_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobUpdateStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjobupdatestats.Response +func (r SyncJobUpdateStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_update_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SyncJobUpdateStats headers map. +func (r *SyncJobUpdateStats) Header(key, value string) *SyncJobUpdateStats { + r.headers.Set(key, value) + + return r +} + +// ConnectorSyncJobId The unique identifier of the connector sync job. +// API Name: connectorsyncjobid +func (r *SyncJobUpdateStats) _connectorsyncjobid(connectorsyncjobid string) *SyncJobUpdateStats { + r.paramSet |= connectorsyncjobidMask + r.connectorsyncjobid = connectorsyncjobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobUpdateStats) ErrorTrace(errortrace bool) *SyncJobUpdateStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobUpdateStats) FilterPath(filterpaths ...string) *SyncJobUpdateStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobUpdateStats) Human(human bool) *SyncJobUpdateStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobUpdateStats) Pretty(pretty bool) *SyncJobUpdateStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The number of documents the sync job deleted. +// API name: deleted_document_count +func (r *SyncJobUpdateStats) DeletedDocumentCount(deleteddocumentcount int64) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DeletedDocumentCount = deleteddocumentcount + + return r +} + +// The number of documents the sync job indexed. +// API name: indexed_document_count +func (r *SyncJobUpdateStats) IndexedDocumentCount(indexeddocumentcount int64) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexedDocumentCount = indexeddocumentcount + + return r +} + +// The total size of the data (in MiB) the sync job indexed. +// API name: indexed_document_volume +func (r *SyncJobUpdateStats) IndexedDocumentVolume(indexeddocumentvolume int64) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexedDocumentVolume = indexeddocumentvolume + + return r +} + +// The timestamp to use in the `last_seen` property for the connector sync job. +// API name: last_seen +func (r *SyncJobUpdateStats) LastSeen(duration types.DurationVariant) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastSeen = *duration.DurationCaster() + + return r +} + +// The connector-specific metadata. +// API name: metadata +func (r *SyncJobUpdateStats) Metadata(metadata types.MetadataVariant) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} + +// The total number of documents in the target index after the sync job +// finished. +// API name: total_document_count +func (r *SyncJobUpdateStats) TotalDocumentCount(totaldocumentcount int) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TotalDocumentCount = &totaldocumentcount + + return r +} diff --git a/typedapi/connector/updateactivefiltering/response.go b/typedapi/connector/updateactivefiltering/response.go index 4e2fa26b13..3b47f46a65 100644 --- a/typedapi/connector/updateactivefiltering/response.go +++ b/typedapi/connector/updateactivefiltering/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updateactivefiltering @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updateactivefiltering // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_active_filtering/ConnectorUpdateActiveFilteringResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_active_filtering/ConnectorUpdateActiveFilteringResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updateactivefiltering/update_active_filtering.go b/typedapi/connector/updateactivefiltering/update_active_filtering.go index d2aeec8985..3ed4cb62a2 100644 --- a/typedapi/connector/updateactivefiltering/update_active_filtering.go +++ b/typedapi/connector/updateactivefiltering/update_active_filtering.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Activate the connector draft filter. // @@ -82,7 +82,7 @@ func NewUpdateActiveFilteringFunc(tp elastictransport.Interface) NewUpdateActive // // Activates the valid draft filtering for a connector. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering func New(tp elastictransport.Interface) *UpdateActiveFiltering { r := &UpdateActiveFiltering{ transport: tp, diff --git a/typedapi/connector/updateapikeyid/request.go b/typedapi/connector/updateapikeyid/request.go index 5a40034f3d..95aa02f8cf 100644 --- a/typedapi/connector/updateapikeyid/request.go +++ b/typedapi/connector/updateapikeyid/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updateapikeyid @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updateapikeyid // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_api_key_id/ConnectorUpdateAPIKeyIDRequest.ts#L21-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_api_key_id/ConnectorUpdateAPIKeyIDRequest.ts#L21-L53 type Request struct { ApiKeyId *string `json:"api_key_id,omitempty"` ApiKeySecretId *string `json:"api_key_secret_id,omitempty"` diff --git a/typedapi/connector/updateapikeyid/response.go b/typedapi/connector/updateapikeyid/response.go index 5823cf092c..d0987872a0 100644 --- a/typedapi/connector/updateapikeyid/response.go +++ b/typedapi/connector/updateapikeyid/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updateapikeyid @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updateapikeyid // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_api_key_id/ConnectorUpdateAPIKeyIDResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_api_key_id/ConnectorUpdateAPIKeyIDResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updateapikeyid/update_api_key_id.go b/typedapi/connector/updateapikeyid/update_api_key_id.go index 0bb750bb05..a153d8021c 100644 --- a/typedapi/connector/updateapikeyid/update_api_key_id.go +++ b/typedapi/connector/updateapikeyid/update_api_key_id.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update the connector API key ID. // @@ -97,7 +97,7 @@ func NewUpdateApiKeyIdFunc(tp elastictransport.Interface) NewUpdateApiKeyId { // connectors. // Self-managed connectors (connector clients) do not use this field. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-api-key-id-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id func New(tp elastictransport.Interface) *UpdateApiKeyId { r := &UpdateApiKeyId{ transport: tp, @@ -105,8 +105,6 @@ func New(tp elastictransport.Interface) *UpdateApiKeyId { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -375,6 +373,10 @@ func (r *UpdateApiKeyId) Pretty(pretty bool) *UpdateApiKeyId { // API name: api_key_id func (r *UpdateApiKeyId) ApiKeyId(apikeyid string) *UpdateApiKeyId { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ApiKeyId = &apikeyid @@ -383,6 +385,10 @@ func (r *UpdateApiKeyId) ApiKeyId(apikeyid string) *UpdateApiKeyId { // API name: api_key_secret_id func (r *UpdateApiKeyId) ApiKeySecretId(apikeysecretid string) *UpdateApiKeyId { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ApiKeySecretId = &apikeysecretid diff --git a/typedapi/connector/updateconfiguration/request.go b/typedapi/connector/updateconfiguration/request.go index 2bd4417725..78b47e2004 100644 --- a/typedapi/connector/updateconfiguration/request.go +++ b/typedapi/connector/updateconfiguration/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updateconfiguration @@ -32,7 +32,7 @@ import ( // Request holds the request body struct for the package updateconfiguration // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_configuration/ConnectorUpdateConfigurationRequest.ts#L25-L49 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_configuration/ConnectorUpdateConfigurationRequest.ts#L25-L55 type Request struct { Configuration types.ConnectorConfiguration `json:"configuration,omitempty"` Values map[string]json.RawMessage `json:"values,omitempty"` diff --git a/typedapi/connector/updateconfiguration/response.go b/typedapi/connector/updateconfiguration/response.go index 80e91f141e..1b9d24da82 100644 --- a/typedapi/connector/updateconfiguration/response.go +++ b/typedapi/connector/updateconfiguration/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updateconfiguration @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updateconfiguration // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_configuration/ConnectorUpdateConfigurationResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_configuration/ConnectorUpdateConfigurationResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updateconfiguration/update_configuration.go b/typedapi/connector/updateconfiguration/update_configuration.go index d707fc5e23..4f810872a7 100644 --- a/typedapi/connector/updateconfiguration/update_configuration.go +++ b/typedapi/connector/updateconfiguration/update_configuration.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update the connector configuration. // @@ -87,7 +87,7 @@ func NewUpdateConfigurationFunc(tp elastictransport.Interface) NewUpdateConfigur // // Update the configuration field in the connector document. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-configuration-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration func New(tp elastictransport.Interface) *UpdateConfiguration { r := &UpdateConfiguration{ transport: tp, @@ -95,8 +95,6 @@ func New(tp elastictransport.Interface) *UpdateConfiguration { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -364,16 +362,42 @@ func (r *UpdateConfiguration) Pretty(pretty bool) *UpdateConfiguration { } // API name: configuration -func (r *UpdateConfiguration) Configuration(connectorconfiguration types.ConnectorConfiguration) *UpdateConfiguration { - r.req.Configuration = connectorconfiguration +func (r *UpdateConfiguration) Configuration(connectorconfiguration types.ConnectorConfigurationVariant) *UpdateConfiguration { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Configuration = *connectorconfiguration.ConnectorConfigurationCaster() return r } // API name: values func (r *UpdateConfiguration) Values(values map[string]json.RawMessage) *UpdateConfiguration { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Values = values + return r +} + +func (r *UpdateConfiguration) AddValue(key string, value json.RawMessage) *UpdateConfiguration { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Values == nil { + r.req.Values = make(map[string]json.RawMessage) + } else { + tmp = r.req.Values + } + + tmp[key] = value + r.req.Values = tmp return r } diff --git a/typedapi/connector/updateerror/request.go b/typedapi/connector/updateerror/request.go index 114c3dc9d2..7d36c0425e 100644 --- a/typedapi/connector/updateerror/request.go +++ b/typedapi/connector/updateerror/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updateerror @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package updateerror // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_error/ConnectorUpdateErrorRequest.ts#L23-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_error/ConnectorUpdateErrorRequest.ts#L23-L54 type Request struct { Error any `json:"error"` } diff --git a/typedapi/connector/updateerror/response.go b/typedapi/connector/updateerror/response.go index 11688f8c66..1c4269b37e 100644 --- a/typedapi/connector/updateerror/response.go +++ b/typedapi/connector/updateerror/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updateerror @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updateerror // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_error/ConnectorUpdateErrorResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_error/ConnectorUpdateErrorResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updateerror/update_error.go b/typedapi/connector/updateerror/update_error.go index 9bba5cef82..7793ba21dc 100644 --- a/typedapi/connector/updateerror/update_error.go +++ b/typedapi/connector/updateerror/update_error.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update the connector error field. // @@ -95,7 +95,7 @@ func NewUpdateErrorFunc(tp elastictransport.Interface) NewUpdateError { // Otherwise, if the error is reset to null, the connector status is updated to // connected. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-error-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error func New(tp elastictransport.Interface) *UpdateError { r := &UpdateError{ transport: tp, @@ -103,8 +103,6 @@ func New(tp elastictransport.Interface) *UpdateError { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -373,6 +371,11 @@ func (r *UpdateError) Pretty(pretty bool) *UpdateError { // API name: error func (r *UpdateError) Error(error any) *UpdateError { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Error = error return r diff --git a/typedapi/connector/updatefeatures/request.go b/typedapi/connector/updatefeatures/request.go new file mode 100644 index 0000000000..8cdbf8d4d3 --- /dev/null +++ b/typedapi/connector/updatefeatures/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package updatefeatures + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package updatefeatures +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_features/ConnectorUpdateFeaturesRequest.ts#L23-L61 +type Request struct { + Features types.ConnectorFeatures `json:"features"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatefeatures request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/connector/updatefeatures/response.go b/typedapi/connector/updatefeatures/response.go new file mode 100644 index 0000000000..ae2693940e --- /dev/null +++ b/typedapi/connector/updatefeatures/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package updatefeatures + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package updatefeatures +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_features/ConnectorUpdateFeaturesResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/connector/updatefeatures/update_features.go b/typedapi/connector/updatefeatures/update_features.go new file mode 100644 index 0000000000..f0a0baa4e7 --- /dev/null +++ b/typedapi/connector/updatefeatures/update_features.go @@ -0,0 +1,400 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Update the connector features. +// Update the connector features in the connector document. +// This API can be used to control the following aspects of a connector: +// +// * document-level security +// * incremental syncs +// * advanced sync rules +// * basic sync rules +// +// Normally, the running connector service automatically manages these features. +// However, you can use this API to override the default behavior. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +package updatefeatures + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateFeatures struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateFeatures type alias for index. +type NewUpdateFeatures func(connectorid string) *UpdateFeatures + +// NewUpdateFeaturesFunc returns a new instance of UpdateFeatures with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateFeaturesFunc(tp elastictransport.Interface) NewUpdateFeatures { + return func(connectorid string) *UpdateFeatures { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Update the connector features. +// Update the connector features in the connector document. +// This API can be used to control the following aspects of a connector: +// +// * document-level security +// * incremental syncs +// * advanced sync rules +// * basic sync rules +// +// Normally, the running connector service automatically manages these features. +// However, you can use this API to override the default behavior. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features +func New(tp elastictransport.Interface) *UpdateFeatures { + r := &UpdateFeatures{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateFeatures) Raw(raw io.Reader) *UpdateFeatures { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateFeatures) Request(req *Request) *UpdateFeatures { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateFeatures) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateFeatures: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_features") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateFeatures) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.update_features") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_features") + if reader := instrument.RecordRequestBody(ctx, "connector.update_features", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_features") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateFeatures query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatefeatures.Response +func (r UpdateFeatures) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_features") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateFeatures headers map. +func (r *UpdateFeatures) Header(key, value string) *UpdateFeatures { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated. +// API Name: connectorid +func (r *UpdateFeatures) _connectorid(connectorid string) *UpdateFeatures { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateFeatures) ErrorTrace(errortrace bool) *UpdateFeatures { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateFeatures) FilterPath(filterpaths ...string) *UpdateFeatures { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateFeatures) Human(human bool) *UpdateFeatures { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateFeatures) Pretty(pretty bool) *UpdateFeatures { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: features +func (r *UpdateFeatures) Features(features types.ConnectorFeaturesVariant) *UpdateFeatures { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Features = *features.ConnectorFeaturesCaster() + + return r +} diff --git a/typedapi/connector/updatefiltering/request.go b/typedapi/connector/updatefiltering/request.go index dde3dacfe5..65cf875463 100644 --- a/typedapi/connector/updatefiltering/request.go +++ b/typedapi/connector/updatefiltering/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatefiltering @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package updatefiltering // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_filtering/ConnectorUpdateFilteringRequest.ts#L27-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_filtering/ConnectorUpdateFilteringRequest.ts#L27-L60 type Request struct { AdvancedSnippet *types.FilteringAdvancedSnippet `json:"advanced_snippet,omitempty"` Filtering []types.FilteringConfig `json:"filtering,omitempty"` diff --git a/typedapi/connector/updatefiltering/response.go b/typedapi/connector/updatefiltering/response.go index 0b260e6d80..879548cfda 100644 --- a/typedapi/connector/updatefiltering/response.go +++ b/typedapi/connector/updatefiltering/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatefiltering @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatefiltering // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_filtering/ConnectorUpdateFilteringResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_filtering/ConnectorUpdateFilteringResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updatefiltering/update_filtering.go b/typedapi/connector/updatefiltering/update_filtering.go index 14c95ef276..18c70c84ac 100644 --- a/typedapi/connector/updatefiltering/update_filtering.go +++ b/typedapi/connector/updatefiltering/update_filtering.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update the connector filtering. // @@ -97,7 +97,7 @@ func NewUpdateFilteringFunc(tp elastictransport.Interface) NewUpdateFiltering { // The filtering property is used to configure sync rules (both basic and // advanced) for a connector. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering func New(tp elastictransport.Interface) *UpdateFiltering { r := &UpdateFiltering{ transport: tp, @@ -105,8 +105,6 @@ func New(tp elastictransport.Interface) *UpdateFiltering { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -374,23 +372,41 @@ func (r *UpdateFiltering) Pretty(pretty bool) *UpdateFiltering { } // API name: advanced_snippet -func (r *UpdateFiltering) AdvancedSnippet(advancedsnippet *types.FilteringAdvancedSnippet) *UpdateFiltering { +func (r *UpdateFiltering) AdvancedSnippet(advancedsnippet types.FilteringAdvancedSnippetVariant) *UpdateFiltering { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AdvancedSnippet = advancedsnippet + r.req.AdvancedSnippet = advancedsnippet.FilteringAdvancedSnippetCaster() return r } // API name: filtering -func (r *UpdateFiltering) Filtering(filterings ...types.FilteringConfig) *UpdateFiltering { - r.req.Filtering = filterings +func (r *UpdateFiltering) Filtering(filterings ...types.FilteringConfigVariant) *UpdateFiltering { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range filterings { + + r.req.Filtering = append(r.req.Filtering, *v.FilteringConfigCaster()) + } return r } // API name: rules -func (r *UpdateFiltering) Rules(rules ...types.FilteringRule) *UpdateFiltering { - r.req.Rules = rules +func (r *UpdateFiltering) Rules(rules ...types.FilteringRuleVariant) *UpdateFiltering { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range rules { + r.req.Rules = append(r.req.Rules, *v.FilteringRuleCaster()) + + } return r } diff --git a/typedapi/connector/updatefilteringvalidation/request.go b/typedapi/connector/updatefilteringvalidation/request.go index 47ee8cb2be..b39ee19727 100644 --- a/typedapi/connector/updatefilteringvalidation/request.go +++ b/typedapi/connector/updatefilteringvalidation/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatefilteringvalidation @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package updatefilteringvalidation // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_filtering_validation/ConnectorUpdateFilteringValidationRequest.ts#L23-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_filtering_validation/ConnectorUpdateFilteringValidationRequest.ts#L23-L48 type Request struct { Validation types.FilteringRulesValidation `json:"validation"` } diff --git a/typedapi/connector/updatefilteringvalidation/response.go b/typedapi/connector/updatefilteringvalidation/response.go index acaf4accbe..034780d603 100644 --- a/typedapi/connector/updatefilteringvalidation/response.go +++ b/typedapi/connector/updatefilteringvalidation/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatefilteringvalidation @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatefilteringvalidation // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_filtering_validation/ConnectorUpdateFilteringValidationResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_filtering_validation/ConnectorUpdateFilteringValidationResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updatefilteringvalidation/update_filtering_validation.go b/typedapi/connector/updatefilteringvalidation/update_filtering_validation.go index e06d5c59e4..8c8b569cdf 100644 --- a/typedapi/connector/updatefilteringvalidation/update_filtering_validation.go +++ b/typedapi/connector/updatefilteringvalidation/update_filtering_validation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update the connector draft filtering validation. // @@ -95,8 +95,6 @@ func New(tp elastictransport.Interface) *UpdateFilteringValidation { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -366,9 +364,13 @@ func (r *UpdateFilteringValidation) Pretty(pretty bool) *UpdateFilteringValidati } // API name: validation -func (r *UpdateFilteringValidation) Validation(validation *types.FilteringRulesValidation) *UpdateFilteringValidation { +func (r *UpdateFilteringValidation) Validation(validation types.FilteringRulesValidationVariant) *UpdateFilteringValidation { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Validation = *validation + r.req.Validation = *validation.FilteringRulesValidationCaster() return r } diff --git a/typedapi/connector/updateindexname/request.go b/typedapi/connector/updateindexname/request.go index 4355165e58..72f10d6b93 100644 --- a/typedapi/connector/updateindexname/request.go +++ b/typedapi/connector/updateindexname/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updateindexname @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package updateindexname // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_index_name/ConnectorUpdateIndexNameRequest.ts#L23-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_index_name/ConnectorUpdateIndexNameRequest.ts#L23-L51 type Request struct { IndexName any `json:"index_name"` } diff --git a/typedapi/connector/updateindexname/response.go b/typedapi/connector/updateindexname/response.go index 1dcf0a51be..efc9313754 100644 --- a/typedapi/connector/updateindexname/response.go +++ b/typedapi/connector/updateindexname/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updateindexname @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updateindexname // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_index_name/ConnectorUpdateIndexNameResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_index_name/ConnectorUpdateIndexNameResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updateindexname/update_index_name.go b/typedapi/connector/updateindexname/update_index_name.go index 8f5fec0c59..fc41b045bd 100644 --- a/typedapi/connector/updateindexname/update_index_name.go +++ b/typedapi/connector/updateindexname/update_index_name.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update the connector index name. // @@ -89,7 +89,7 @@ func NewUpdateIndexNameFunc(tp elastictransport.Interface) NewUpdateIndexName { // Update the `index_name` field of a connector, specifying the index where the // data ingested by the connector is stored. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-index-name-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name func New(tp elastictransport.Interface) *UpdateIndexName { r := &UpdateIndexName{ transport: tp, @@ -97,8 +97,6 @@ func New(tp elastictransport.Interface) *UpdateIndexName { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -367,6 +365,11 @@ func (r *UpdateIndexName) Pretty(pretty bool) *UpdateIndexName { // API name: index_name func (r *UpdateIndexName) IndexName(indexname any) *UpdateIndexName { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexName = indexname return r diff --git a/typedapi/connector/updatename/request.go b/typedapi/connector/updatename/request.go index b964d378f7..9f3099d08e 100644 --- a/typedapi/connector/updatename/request.go +++ b/typedapi/connector/updatename/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatename @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updatename // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_name/ConnectorUpdateNameRequest.ts#L22-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_name/ConnectorUpdateNameRequest.ts#L22-L49 type Request struct { Description *string `json:"description,omitempty"` Name *string `json:"name,omitempty"` diff --git a/typedapi/connector/updatename/response.go b/typedapi/connector/updatename/response.go index 11724495ca..72e2ead1f3 100644 --- a/typedapi/connector/updatename/response.go +++ b/typedapi/connector/updatename/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatename @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatename // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_name/ConnectorUpdateNameResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_name/ConnectorUpdateNameResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updatename/update_name.go b/typedapi/connector/updatename/update_name.go index c02cdf7d4c..694d2b5427 100644 --- a/typedapi/connector/updatename/update_name.go +++ b/typedapi/connector/updatename/update_name.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update the connector name and description. package updatename @@ -83,7 +83,7 @@ func NewUpdateNameFunc(tp elastictransport.Interface) NewUpdateName { // Update the connector name and description. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-name-description-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name func New(tp elastictransport.Interface) *UpdateName { r := &UpdateName{ transport: tp, @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *UpdateName { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -361,6 +359,10 @@ func (r *UpdateName) Pretty(pretty bool) *UpdateName { // API name: description func (r *UpdateName) Description(description string) *UpdateName { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description @@ -369,6 +371,10 @@ func (r *UpdateName) Description(description string) *UpdateName { // API name: name func (r *UpdateName) Name(name string) *UpdateName { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Name = &name diff --git a/typedapi/connector/updatenative/request.go b/typedapi/connector/updatenative/request.go index 8dc873e09c..5ce8c403ae 100644 --- a/typedapi/connector/updatenative/request.go +++ b/typedapi/connector/updatenative/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatenative @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updatenative // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_native/ConnectorUpdateNativeRequest.ts#L22-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_native/ConnectorUpdateNativeRequest.ts#L22-L48 type Request struct { IsNative bool `json:"is_native"` } diff --git a/typedapi/connector/updatenative/response.go b/typedapi/connector/updatenative/response.go index 7ac4290c55..4e569f123b 100644 --- a/typedapi/connector/updatenative/response.go +++ b/typedapi/connector/updatenative/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatenative @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatenative // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_native/ConnectorUpdateNativeResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_native/ConnectorUpdateNativeResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updatenative/update_native.go b/typedapi/connector/updatenative/update_native.go index d0a2198489..56ceacfe33 100644 --- a/typedapi/connector/updatenative/update_native.go +++ b/typedapi/connector/updatenative/update_native.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update the connector is_native flag. package updatenative @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *UpdateNative { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -361,6 +359,11 @@ func (r *UpdateNative) Pretty(pretty bool) *UpdateNative { // API name: is_native func (r *UpdateNative) IsNative(isnative bool) *UpdateNative { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IsNative = isnative return r diff --git a/typedapi/connector/updatepipeline/request.go b/typedapi/connector/updatepipeline/request.go index 844fc8ff46..560020f297 100644 --- a/typedapi/connector/updatepipeline/request.go +++ b/typedapi/connector/updatepipeline/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatepipeline @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package updatepipeline // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_pipeline/ConnectorUpdatePipelineRequest.ts#L23-L46 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_pipeline/ConnectorUpdatePipelineRequest.ts#L23-L52 type Request struct { Pipeline types.IngestPipelineParams `json:"pipeline"` } diff --git a/typedapi/connector/updatepipeline/response.go b/typedapi/connector/updatepipeline/response.go index 79a7875979..ba77a2b74e 100644 --- a/typedapi/connector/updatepipeline/response.go +++ b/typedapi/connector/updatepipeline/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatepipeline @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatepipeline // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_pipeline/ConnectorUpdatePipelineResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_pipeline/ConnectorUpdatePipelineResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updatepipeline/update_pipeline.go b/typedapi/connector/updatepipeline/update_pipeline.go index cb862598cc..6265e0b215 100644 --- a/typedapi/connector/updatepipeline/update_pipeline.go +++ b/typedapi/connector/updatepipeline/update_pipeline.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update the connector pipeline. // @@ -89,7 +89,7 @@ func NewUpdatePipelineFunc(tp elastictransport.Interface) NewUpdatePipeline { // When you create a new connector, the configuration of an ingest pipeline is // populated with default settings. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-pipeline-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline func New(tp elastictransport.Interface) *UpdatePipeline { r := &UpdatePipeline{ transport: tp, @@ -97,8 +97,6 @@ func New(tp elastictransport.Interface) *UpdatePipeline { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -366,9 +364,13 @@ func (r *UpdatePipeline) Pretty(pretty bool) *UpdatePipeline { } // API name: pipeline -func (r *UpdatePipeline) Pipeline(pipeline *types.IngestPipelineParams) *UpdatePipeline { +func (r *UpdatePipeline) Pipeline(pipeline types.IngestPipelineParamsVariant) *UpdatePipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Pipeline = *pipeline + r.req.Pipeline = *pipeline.IngestPipelineParamsCaster() return r } diff --git a/typedapi/connector/updatescheduling/request.go b/typedapi/connector/updatescheduling/request.go index 9743ff2de8..7ac876f64a 100644 --- a/typedapi/connector/updatescheduling/request.go +++ b/typedapi/connector/updatescheduling/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatescheduling @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package updatescheduling // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_scheduling/ConnectorUpdateSchedulingRequest.ts#L23-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_scheduling/ConnectorUpdateSchedulingRequest.ts#L23-L50 type Request struct { Scheduling types.SchedulingConfiguration `json:"scheduling"` } diff --git a/typedapi/connector/updatescheduling/response.go b/typedapi/connector/updatescheduling/response.go index 303e0032d2..d442cb6dd4 100644 --- a/typedapi/connector/updatescheduling/response.go +++ b/typedapi/connector/updatescheduling/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatescheduling @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatescheduling // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_scheduling/ConnectorUpdateSchedulingResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_scheduling/ConnectorUpdateSchedulingResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updatescheduling/update_scheduling.go b/typedapi/connector/updatescheduling/update_scheduling.go index 7ea04ccea0..8c25a2b025 100644 --- a/typedapi/connector/updatescheduling/update_scheduling.go +++ b/typedapi/connector/updatescheduling/update_scheduling.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update the connector scheduling. package updatescheduling @@ -83,7 +83,7 @@ func NewUpdateSchedulingFunc(tp elastictransport.Interface) NewUpdateScheduling // Update the connector scheduling. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-scheduling-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling func New(tp elastictransport.Interface) *UpdateScheduling { r := &UpdateScheduling{ transport: tp, @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *UpdateScheduling { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -360,9 +358,13 @@ func (r *UpdateScheduling) Pretty(pretty bool) *UpdateScheduling { } // API name: scheduling -func (r *UpdateScheduling) Scheduling(scheduling *types.SchedulingConfiguration) *UpdateScheduling { +func (r *UpdateScheduling) Scheduling(scheduling types.SchedulingConfigurationVariant) *UpdateScheduling { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Scheduling = *scheduling + r.req.Scheduling = *scheduling.SchedulingConfigurationCaster() return r } diff --git a/typedapi/connector/updateservicetype/request.go b/typedapi/connector/updateservicetype/request.go index 937af93678..d4119a07d0 100644 --- a/typedapi/connector/updateservicetype/request.go +++ b/typedapi/connector/updateservicetype/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updateservicetype @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updateservicetype // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_service_type/ConnectorUpdateServiceTypeRequest.ts#L22-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_service_type/ConnectorUpdateServiceTypeRequest.ts#L22-L48 type Request struct { ServiceType string `json:"service_type"` } diff --git a/typedapi/connector/updateservicetype/response.go b/typedapi/connector/updateservicetype/response.go index d3d116afc9..7bb47eebda 100644 --- a/typedapi/connector/updateservicetype/response.go +++ b/typedapi/connector/updateservicetype/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updateservicetype @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updateservicetype // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_service_type/ConnectorUpdateServiceTypeResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_service_type/ConnectorUpdateServiceTypeResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updateservicetype/update_service_type.go b/typedapi/connector/updateservicetype/update_service_type.go index c73caf722d..655cf7dfe8 100644 --- a/typedapi/connector/updateservicetype/update_service_type.go +++ b/typedapi/connector/updateservicetype/update_service_type.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update the connector service type. package updateservicetype @@ -83,7 +83,7 @@ func NewUpdateServiceTypeFunc(tp elastictransport.Interface) NewUpdateServiceTyp // Update the connector service type. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-service-type-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type func New(tp elastictransport.Interface) *UpdateServiceType { r := &UpdateServiceType{ transport: tp, @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *UpdateServiceType { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -361,6 +359,10 @@ func (r *UpdateServiceType) Pretty(pretty bool) *UpdateServiceType { // API name: service_type func (r *UpdateServiceType) ServiceType(servicetype string) *UpdateServiceType { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ServiceType = servicetype diff --git a/typedapi/connector/updatestatus/request.go b/typedapi/connector/updatestatus/request.go index a4eea9558f..129506f29a 100644 --- a/typedapi/connector/updatestatus/request.go +++ b/typedapi/connector/updatestatus/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatestatus @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package updatestatus // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_status/ConnectorUpdateStatusRequest.ts#L23-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_status/ConnectorUpdateStatusRequest.ts#L23-L49 type Request struct { Status connectorstatus.ConnectorStatus `json:"status"` } diff --git a/typedapi/connector/updatestatus/response.go b/typedapi/connector/updatestatus/response.go index 0873da2d10..f1a397ccaa 100644 --- a/typedapi/connector/updatestatus/response.go +++ b/typedapi/connector/updatestatus/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatestatus @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatestatus // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/update_status/ConnectorUpdateStatusResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/update_status/ConnectorUpdateStatusResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updatestatus/update_status.go b/typedapi/connector/updatestatus/update_status.go index e312d87375..c926890bf1 100644 --- a/typedapi/connector/updatestatus/update_status.go +++ b/typedapi/connector/updatestatus/update_status.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update the connector status. package updatestatus @@ -84,7 +84,7 @@ func NewUpdateStatusFunc(tp elastictransport.Interface) NewUpdateStatus { // Update the connector status. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-status-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status func New(tp elastictransport.Interface) *UpdateStatus { r := &UpdateStatus{ transport: tp, @@ -92,8 +92,6 @@ func New(tp elastictransport.Interface) *UpdateStatus { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -362,7 +360,10 @@ func (r *UpdateStatus) Pretty(pretty bool) *UpdateStatus { // API name: status func (r *UpdateStatus) Status(status connectorstatus.ConnectorStatus) *UpdateStatus { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Status = status - return r } diff --git a/typedapi/core/bulk/bulk.go b/typedapi/core/bulk/bulk.go index 1543f2f554..547c4b0fad 100644 --- a/typedapi/core/bulk/bulk.go +++ b/typedapi/core/bulk/bulk.go @@ -16,11 +16,165 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Bulk index or delete documents. -// Performs multiple indexing or delete operations in a single API call. +// Perform multiple `index`, `create`, `delete`, and `update` actions in a +// single request. // This reduces overhead and can greatly increase indexing speed. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To use the `create` action, you must have the `create_doc`, `create`, +// `index`, or `write` index privilege. Data streams support only the `create` +// action. +// * To use the `index` action, you must have the `create`, `index`, or `write` +// index privilege. +// * To use the `delete` action, you must have the `delete` or `write` index +// privilege. +// * To use the `update` action, you must have the `index` or `write` index +// privilege. +// * To automatically create a data stream or index with a bulk API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// * To make the result of a bulk operation visible to search using the +// `refresh` parameter, you must have the `maintenance` or `manage` index +// privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// The actions are specified in the request body using a newline delimited JSON +// (NDJSON) structure: +// +// ``` +// action_and_meta_data\n +// optional_source\n +// action_and_meta_data\n +// optional_source\n +// .... +// action_and_meta_data\n +// optional_source\n +// ``` +// +// The `index` and `create` actions expect a source on the next line and have +// the same semantics as the `op_type` parameter in the standard index API. +// A `create` action fails if a document with the same ID already exists in the +// target +// An `index` action adds or replaces a document as necessary. +// +// NOTE: Data streams support only the `create` action. +// To update or delete a document in a data stream, you must target the backing +// index containing the document. +// +// An `update` action expects that the partial doc, upsert, and script and its +// options are specified on the next line. +// +// A `delete` action does not expect a source on the next line and has the same +// semantics as the standard delete API. +// +// NOTE: The final line of data must end with a newline character (`\n`). +// Each newline character may be preceded by a carriage return (`\r`). +// When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header +// of `application/json` or `application/x-ndjson`. +// Because this format uses literal newline characters (`\n`) as delimiters, +// make sure that the JSON actions and sources are not pretty printed. +// +// If you provide a target in the request path, it is used for any actions that +// don't explicitly specify an `_index` argument. +// +// A note on the format: the idea here is to make processing as fast as +// possible. +// As some of the actions are redirected to other shards on other nodes, only +// `action_meta_data` is parsed on the receiving node side. +// +// Client libraries using this protocol should try and strive to do something +// similar on the client side, and reduce buffering as much as possible. +// +// There is no "correct" number of actions to perform in a single bulk request. +// Experiment with different settings to find the optimal size for your +// particular workload. +// Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by +// default so clients must ensure that no request exceeds this size. +// It is not possible to index a single document that exceeds the size limit, so +// you must pre-process any such documents into smaller pieces before sending +// them to Elasticsearch. +// For instance, split documents into pages or chapters before indexing them, or +// store raw binary data in a system outside Elasticsearch and replace the raw +// data with a link to the external system in the documents that you send to +// Elasticsearch. +// +// **Client suppport for bulk requests** +// +// Some of the officially supported clients provide helpers to assist with bulk +// requests and reindexing: +// +// * Go: Check out `esutil.BulkIndexer` +// * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and +// `Search::Elasticsearch::Client::5_0::Scroll` +// * Python: Check out `elasticsearch.helpers.*` +// * JavaScript: Check out `client.helpers.*` +// * .NET: Check out `BulkAllObservable` +// * PHP: Check out bulk indexing. +// +// **Submitting bulk requests with cURL** +// +// If you're providing text file input to `curl`, you must use the +// `--data-binary` flag instead of plain `-d`. +// The latter doesn't preserve newlines. For example: +// +// ``` +// $ cat requests +// { "index" : { "_index" : "test", "_id" : "1" } } +// { "field1" : "value1" } +// $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk +// --data-binary "@requests"; echo +// {"took":7, "errors": false, +// "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} +// ``` +// +// **Optimistic concurrency control** +// +// Each `index` and `delete` action within a bulk API call may include the +// `if_seq_no` and `if_primary_term` parameters in their respective action and +// meta data lines. +// The `if_seq_no` and `if_primary_term` parameters control how operations are +// run, based on the last modification to existing documents. See Optimistic +// concurrency control for more details. +// +// **Versioning** +// +// Each bulk item can include the version value using the `version` field. +// It automatically follows the behavior of the index or delete operation based +// on the `_version` mapping. +// It also support the `version_type`. +// +// **Routing** +// +// Each bulk item can include the routing value using the `routing` field. +// It automatically follows the behavior of the index or delete operation based +// on the `_routing` mapping. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Wait for active shards** +// +// When making bulk calls, you can set the `wait_for_active_shards` parameter to +// require a minimum number of shard copies to be active before starting to +// process the bulk request. +// +// **Refresh** +// +// Control when the changes made by this request are visible to search. +// +// NOTE: Only the shards that receive the bulk request will be affected by +// refresh. +// Imagine a `_bulk?refresh=wait_for` request with three documents in it that +// happen to be routed to different shards in an index with five shards. +// The request will only wait for those three shards to refresh. +// The other two shards that make up the index do not participate in the `_bulk` +// request at all. package bulk import ( @@ -83,10 +237,164 @@ func NewBulkFunc(tp elastictransport.Interface) NewBulk { } // Bulk index or delete documents. -// Performs multiple indexing or delete operations in a single API call. +// Perform multiple `index`, `create`, `delete`, and `update` actions in a +// single request. // This reduces overhead and can greatly increase indexing speed. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To use the `create` action, you must have the `create_doc`, `create`, +// `index`, or `write` index privilege. Data streams support only the `create` +// action. +// * To use the `index` action, you must have the `create`, `index`, or `write` +// index privilege. +// * To use the `delete` action, you must have the `delete` or `write` index +// privilege. +// * To use the `update` action, you must have the `index` or `write` index +// privilege. +// * To automatically create a data stream or index with a bulk API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// * To make the result of a bulk operation visible to search using the +// `refresh` parameter, you must have the `maintenance` or `manage` index +// privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// The actions are specified in the request body using a newline delimited JSON +// (NDJSON) structure: +// +// ``` +// action_and_meta_data\n +// optional_source\n +// action_and_meta_data\n +// optional_source\n +// .... +// action_and_meta_data\n +// optional_source\n +// ``` +// +// The `index` and `create` actions expect a source on the next line and have +// the same semantics as the `op_type` parameter in the standard index API. +// A `create` action fails if a document with the same ID already exists in the +// target +// An `index` action adds or replaces a document as necessary. +// +// NOTE: Data streams support only the `create` action. +// To update or delete a document in a data stream, you must target the backing +// index containing the document. +// +// An `update` action expects that the partial doc, upsert, and script and its +// options are specified on the next line. +// +// A `delete` action does not expect a source on the next line and has the same +// semantics as the standard delete API. +// +// NOTE: The final line of data must end with a newline character (`\n`). +// Each newline character may be preceded by a carriage return (`\r`). +// When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header +// of `application/json` or `application/x-ndjson`. +// Because this format uses literal newline characters (`\n`) as delimiters, +// make sure that the JSON actions and sources are not pretty printed. +// +// If you provide a target in the request path, it is used for any actions that +// don't explicitly specify an `_index` argument. +// +// A note on the format: the idea here is to make processing as fast as +// possible. +// As some of the actions are redirected to other shards on other nodes, only +// `action_meta_data` is parsed on the receiving node side. +// +// Client libraries using this protocol should try and strive to do something +// similar on the client side, and reduce buffering as much as possible. +// +// There is no "correct" number of actions to perform in a single bulk request. +// Experiment with different settings to find the optimal size for your +// particular workload. +// Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by +// default so clients must ensure that no request exceeds this size. +// It is not possible to index a single document that exceeds the size limit, so +// you must pre-process any such documents into smaller pieces before sending +// them to Elasticsearch. +// For instance, split documents into pages or chapters before indexing them, or +// store raw binary data in a system outside Elasticsearch and replace the raw +// data with a link to the external system in the documents that you send to +// Elasticsearch. +// +// **Client suppport for bulk requests** +// +// Some of the officially supported clients provide helpers to assist with bulk +// requests and reindexing: +// +// * Go: Check out `esutil.BulkIndexer` +// * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and +// `Search::Elasticsearch::Client::5_0::Scroll` +// * Python: Check out `elasticsearch.helpers.*` +// * JavaScript: Check out `client.helpers.*` +// * .NET: Check out `BulkAllObservable` +// * PHP: Check out bulk indexing. +// +// **Submitting bulk requests with cURL** +// +// If you're providing text file input to `curl`, you must use the +// `--data-binary` flag instead of plain `-d`. +// The latter doesn't preserve newlines. For example: +// +// ``` +// $ cat requests +// { "index" : { "_index" : "test", "_id" : "1" } } +// { "field1" : "value1" } +// $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk +// --data-binary "@requests"; echo +// {"took":7, "errors": false, +// "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} +// ``` +// +// **Optimistic concurrency control** +// +// Each `index` and `delete` action within a bulk API call may include the +// `if_seq_no` and `if_primary_term` parameters in their respective action and +// meta data lines. +// The `if_seq_no` and `if_primary_term` parameters control how operations are +// run, based on the last modification to existing documents. See Optimistic +// concurrency control for more details. +// +// **Versioning** +// +// Each bulk item can include the version value using the `version` field. +// It automatically follows the behavior of the index or delete operation based +// on the `_version` mapping. +// It also support the `version_type`. +// +// **Routing** +// +// Each bulk item can include the routing value using the `routing` field. +// It automatically follows the behavior of the index or delete operation based +// on the `_routing` mapping. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Wait for active shards** +// +// When making bulk calls, you can set the `wait_for_active_shards` parameter to +// require a minimum number of shard copies to be active before starting to +// process the bulk request. +// +// **Refresh** +// +// Control when the changes made by this request are visible to search. +// +// NOTE: Only the shards that receive the bulk request will be affected by +// refresh. +// Imagine a `_bulk?refresh=wait_for` request with three documents in it that +// happen to be routed to different shards in an index with five shards. +// The request will only wait for those three shards to refresh. +// The other two shards that make up the index do not participate in the `_bulk` +// request at all. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk func New(tp elastictransport.Interface) *Bulk { r := &Bulk{ transport: tp, @@ -315,7 +623,8 @@ func (r *Bulk) Header(key, value string) *Bulk { return r } -// Index Name of the data stream, index, or index alias to perform bulk actions on. +// Index The name of the data stream, index, or index alias to perform bulk actions +// on. // API Name: index func (r *Bulk) Index(index string) *Bulk { r.paramSet |= indexMask @@ -324,10 +633,28 @@ func (r *Bulk) Index(index string) *Bulk { return r } -// Pipeline ID of the pipeline to use to preprocess incoming documents. -// If the index has a default ingest pipeline specified, then setting the value -// to `_none` disables the default ingest pipeline for this request. -// If a final pipeline is configured it will always run, regardless of the value +// IncludeSourceOnError True or false if to include the document source in the error message in case +// of parsing errors. +// API name: include_source_on_error +func (r *Bulk) IncludeSourceOnError(includesourceonerror bool) *Bulk { + r.values.Set("include_source_on_error", strconv.FormatBool(includesourceonerror)) + + return r +} + +// ListExecutedPipelines If `true`, the response will include the ingest pipelines that were run for +// each index or create. +// API name: list_executed_pipelines +func (r *Bulk) ListExecutedPipelines(listexecutedpipelines bool) *Bulk { + r.values.Set("list_executed_pipelines", strconv.FormatBool(listexecutedpipelines)) + + return r +} + +// Pipeline The pipeline identifier to use to preprocess incoming documents. +// If the index has a default ingest pipeline specified, setting the value to +// `_none` turns off the default ingest pipeline for this request. +// If a final pipeline is configured, it will always run regardless of the value // of this parameter. // API name: pipeline func (r *Bulk) Pipeline(pipeline string) *Bulk { @@ -337,8 +664,9 @@ func (r *Bulk) Pipeline(pipeline string) *Bulk { } // Refresh If `true`, Elasticsearch refreshes the affected shards to make this operation -// visible to search, if `wait_for` then wait for a refresh to make this -// operation visible to search, if `false` do nothing with refreshes. +// visible to search. +// If `wait_for`, wait for a refresh to make this operation visible to search. +// If `false`, do nothing with refreshes. // Valid values: `true`, `false`, `wait_for`. // API name: refresh func (r *Bulk) Refresh(refresh refresh.Refresh) *Bulk { @@ -347,7 +675,7 @@ func (r *Bulk) Refresh(refresh refresh.Refresh) *Bulk { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value that is used to route operations to a specific shard. // API name: routing func (r *Bulk) Routing(routing string) *Bulk { r.values.Set("routing", routing) @@ -355,8 +683,8 @@ func (r *Bulk) Routing(routing string) *Bulk { return r } -// Source_ `true` or `false` to return the `_source` field or not, or a list of fields -// to return. +// Source_ Indicates whether to return the `_source` field (`true` or `false`) or +// contains a list of fields to return. // API name: _source func (r *Bulk) Source_(sourceconfigparam string) *Bulk { r.values.Set("_source", sourceconfigparam) @@ -365,6 +693,9 @@ func (r *Bulk) Source_(sourceconfigparam string) *Bulk { } // SourceExcludes_ A comma-separated list of source fields to exclude from the response. +// You can also use this parameter to exclude fields from the subset specified +// in `_source_includes` query parameter. +// If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_excludes func (r *Bulk) SourceExcludes_(fields ...string) *Bulk { r.values.Set("_source_excludes", strings.Join(fields, ",")) @@ -373,6 +704,10 @@ func (r *Bulk) SourceExcludes_(fields ...string) *Bulk { } // SourceIncludes_ A comma-separated list of source fields to include in the response. +// If this parameter is specified, only these source fields are returned. +// You can exclude fields from this subset using the `_source_excludes` query +// parameter. +// If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_includes func (r *Bulk) SourceIncludes_(fields ...string) *Bulk { r.values.Set("_source_includes", strings.Join(fields, ",")) @@ -380,8 +715,11 @@ func (r *Bulk) SourceIncludes_(fields ...string) *Bulk { return r } -// Timeout Period each action waits for the following operations: automatic index -// creation, dynamic mapping updates, waiting for active shards. +// Timeout The period each action waits for the following operations: automatic index +// creation, dynamic mapping updates, and waiting for active shards. +// The default is `1m` (one minute), which guarantees Elasticsearch waits for at +// least the timeout before failing. +// The actual wait time could be longer, particularly when multiple waits occur. // API name: timeout func (r *Bulk) Timeout(duration string) *Bulk { r.values.Set("timeout", duration) @@ -391,8 +729,9 @@ func (r *Bulk) Timeout(duration string) *Bulk { // WaitForActiveShards The number of shard copies that must be active before proceeding with the // operation. -// Set to all or any positive integer up to the total number of shards in the +// Set to `all` or any positive integer up to the total number of shards in the // index (`number_of_replicas+1`). +// The default is `1`, which waits for each primary shard to be active. // API name: wait_for_active_shards func (r *Bulk) WaitForActiveShards(waitforactiveshards string) *Bulk { r.values.Set("wait_for_active_shards", waitforactiveshards) @@ -400,7 +739,7 @@ func (r *Bulk) WaitForActiveShards(waitforactiveshards string) *Bulk { return r } -// RequireAlias If `true`, the request’s actions must target an index alias. +// RequireAlias If `true`, the request's actions must target an index alias. // API name: require_alias func (r *Bulk) RequireAlias(requirealias bool) *Bulk { r.values.Set("require_alias", strconv.FormatBool(requirealias)) @@ -408,6 +747,15 @@ func (r *Bulk) RequireAlias(requirealias bool) *Bulk { return r } +// RequireDataStream If `true`, the request's actions must target a data stream (existing or to be +// created). +// API name: require_data_stream +func (r *Bulk) RequireDataStream(requiredatastream bool) *Bulk { + r.values.Set("require_data_stream", strconv.FormatBool(requiredatastream)) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/core/bulk/request.go b/typedapi/core/bulk/request.go index 57cc8f0845..4dce0c1bca 100644 --- a/typedapi/core/bulk/request.go +++ b/typedapi/core/bulk/request.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package bulk // Request holds the request body struct for the package bulk // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/bulk/BulkRequest.ts#L32-L105 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/bulk/BulkRequest.ts#L32-L247 type Request = []any diff --git a/typedapi/core/bulk/response.go b/typedapi/core/bulk/response.go index beb90d07a3..9fd28245ea 100644 --- a/typedapi/core/bulk/response.go +++ b/typedapi/core/bulk/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package bulk @@ -27,12 +27,18 @@ import ( // Response holds the response body struct for the package bulk // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/bulk/BulkResponse.ts#L24-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/bulk/BulkResponse.ts#L24-L45 type Response struct { - Errors bool `json:"errors"` - IngestTook *int64 `json:"ingest_took,omitempty"` - Items []map[operationtype.OperationType]types.ResponseItem `json:"items"` - Took int64 `json:"took"` + + // Errors If `true`, one or more of the operations in the bulk request did not complete + // successfully. + Errors bool `json:"errors"` + IngestTook *int64 `json:"ingest_took,omitempty"` + // Items The result of each operation in the bulk request, in the order they were + // submitted. + Items []map[operationtype.OperationType]types.ResponseItem `json:"items"` + // Took The length of time, in milliseconds, it took to process the bulk request. + Took int64 `json:"took"` } // NewResponse returns a Response diff --git a/typedapi/core/clearscroll/clear_scroll.go b/typedapi/core/clearscroll/clear_scroll.go index 2a43a4002c..c22be7e936 100644 --- a/typedapi/core/clearscroll/clear_scroll.go +++ b/typedapi/core/clearscroll/clear_scroll.go @@ -16,10 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Clear a scrolling search. -// // Clear the search context and results for a scrolling search. package clearscroll @@ -78,10 +77,9 @@ func NewClearScrollFunc(tp elastictransport.Interface) NewClearScroll { } // Clear a scrolling search. -// // Clear the search context and results for a scrolling search. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-scroll-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll func New(tp elastictransport.Interface) *ClearScroll { r := &ClearScroll{ transport: tp, @@ -89,8 +87,6 @@ func New(tp elastictransport.Interface) *ClearScroll { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -378,10 +374,15 @@ func (r *ClearScroll) Pretty(pretty bool) *ClearScroll { return r } -// ScrollId Scroll IDs to clear. +// The scroll IDs to clear. // To clear all scroll IDs, use `_all`. // API name: scroll_id func (r *ClearScroll) ScrollId(scrollids ...string) *ClearScroll { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ScrollId = scrollids return r diff --git a/typedapi/core/clearscroll/request.go b/typedapi/core/clearscroll/request.go index 176d435913..7bc6fa608c 100644 --- a/typedapi/core/clearscroll/request.go +++ b/typedapi/core/clearscroll/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package clearscroll @@ -30,10 +30,10 @@ import ( // Request holds the request body struct for the package clearscroll // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/clear_scroll/ClearScrollRequest.ts#L23-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/clear_scroll/ClearScrollRequest.ts#L23-L61 type Request struct { - // ScrollId Scroll IDs to clear. + // ScrollId The scroll IDs to clear. // To clear all scroll IDs, use `_all`. ScrollId []string `json:"scroll_id,omitempty"` } diff --git a/typedapi/core/clearscroll/response.go b/typedapi/core/clearscroll/response.go index 42223e22d9..06ec2e7a2b 100644 --- a/typedapi/core/clearscroll/response.go +++ b/typedapi/core/clearscroll/response.go @@ -16,15 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package clearscroll // Response holds the response body struct for the package clearscroll // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/clear_scroll/ClearScrollResponse.ts#L22-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/clear_scroll/ClearScrollResponse.ts#L22-L43 type Response struct { - NumFreed int `json:"num_freed"` + + // NumFreed The number of scrolling search requests cleared. + NumFreed int `json:"num_freed"` + // Succeeded If `true`, the request succeeded. + // This does not indicate whether any scrolling search requests were cleared. Succeeded bool `json:"succeeded"` } diff --git a/typedapi/core/closepointintime/close_point_in_time.go b/typedapi/core/closepointintime/close_point_in_time.go index 17621a2c0c..60974c0ab1 100644 --- a/typedapi/core/closepointintime/close_point_in_time.go +++ b/typedapi/core/closepointintime/close_point_in_time.go @@ -16,10 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Close a point in time. -// // A point in time must be opened explicitly before being used in search // requests. // The `keep_alive` parameter tells Elasticsearch how long it should persist. @@ -82,7 +81,6 @@ func NewClosePointInTimeFunc(tp elastictransport.Interface) NewClosePointInTime } // Close a point in time. -// // A point in time must be opened explicitly before being used in search // requests. // The `keep_alive` parameter tells Elasticsearch how long it should persist. @@ -91,7 +89,7 @@ func NewClosePointInTimeFunc(tp elastictransport.Interface) NewClosePointInTime // However, keeping points in time has a cost; close them as soon as they are no // longer required for search requests. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time func New(tp elastictransport.Interface) *ClosePointInTime { r := &ClosePointInTime{ transport: tp, @@ -99,8 +97,6 @@ func New(tp elastictransport.Interface) *ClosePointInTime { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -386,9 +382,14 @@ func (r *ClosePointInTime) Pretty(pretty bool) *ClosePointInTime { return r } -// Id The ID of the point-in-time. +// The ID of the point-in-time. // API name: id func (r *ClosePointInTime) Id(id string) *ClosePointInTime { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Id = id return r diff --git a/typedapi/core/closepointintime/request.go b/typedapi/core/closepointintime/request.go index 3047a665cd..df82e6be3a 100644 --- a/typedapi/core/closepointintime/request.go +++ b/typedapi/core/closepointintime/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package closepointintime @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package closepointintime // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/close_point_in_time/ClosePointInTimeRequest.ts#L23-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/close_point_in_time/ClosePointInTimeRequest.ts#L23-L48 type Request struct { // Id The ID of the point-in-time. diff --git a/typedapi/core/closepointintime/response.go b/typedapi/core/closepointintime/response.go index e55e9b2964..e1adc0ccd3 100644 --- a/typedapi/core/closepointintime/response.go +++ b/typedapi/core/closepointintime/response.go @@ -16,15 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package closepointintime // Response holds the response body struct for the package closepointintime // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/close_point_in_time/ClosePointInTimeResponse.ts#L22-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/close_point_in_time/ClosePointInTimeResponse.ts#L22-L42 type Response struct { - NumFreed int `json:"num_freed"` + + // NumFreed The number of search contexts that were successfully closed. + NumFreed int `json:"num_freed"` + // Succeeded If `true`, all search contexts associated with the point-in-time ID were + // successfully closed. Succeeded bool `json:"succeeded"` } diff --git a/typedapi/core/count/count.go b/typedapi/core/count/count.go index bd57ef57d1..c0f8199df9 100644 --- a/typedapi/core/count/count.go +++ b/typedapi/core/count/count.go @@ -16,9 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns number of documents matching a query. +// Count search results. +// Get the number of documents matching a query. +// +// The query can be provided either by using a simple query string as a +// parameter, or by defining Query DSL within the request body. +// The query is optional. When no query is provided, the API uses `match_all` to +// count all the documents. +// +// The count API supports multi-target syntax. You can run a single count API +// search across multiple data streams and indices. +// +// The operation is broadcast across all shards. +// For each shard ID group, a replica is chosen and the search is run against +// it. +// This means that replicas increase the scalability of the count. package count import ( @@ -81,9 +95,23 @@ func NewCountFunc(tp elastictransport.Interface) NewCount { } } -// Returns number of documents matching a query. +// Count search results. +// Get the number of documents matching a query. +// +// The query can be provided either by using a simple query string as a +// parameter, or by defining Query DSL within the request body. +// The query is optional. When no query is provided, the API uses `match_all` to +// count all the documents. +// +// The count API supports multi-target syntax. You can run a single count API +// search across multiple data streams and indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html +// The operation is broadcast across all shards. +// For each shard ID group, a replica is chosen and the search is run against +// it. +// This means that replicas increase the scalability of the count. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count func New(tp elastictransport.Interface) *Count { r := &Count{ transport: tp, @@ -91,8 +119,6 @@ func New(tp elastictransport.Interface) *Count { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -309,8 +335,8 @@ func (r *Count) Header(key, value string) *Count { return r } -// Index Comma-separated list of data streams, indices, and aliases to search. -// Supports wildcards (`*`). +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). // To search all data streams and indices, omit this parameter or use `*` or // `_all`. // API Name: index @@ -324,6 +350,8 @@ func (r *Count) Index(index string) *Count { // AllowNoIndices If `false`, the request returns an error if any wildcard expression, index // alias, or `_all` value targets only missing or closed indices. // This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. // API name: allow_no_indices func (r *Count) AllowNoIndices(allownoindices bool) *Count { r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) @@ -331,8 +359,8 @@ func (r *Count) AllowNoIndices(allownoindices bool) *Count { return r } -// Analyzer Analyzer to use for the query string. -// This parameter can only be used when the `q` query string parameter is +// Analyzer The analyzer to use for the query string. +// This parameter can be used only when the `q` query string parameter is // specified. // API name: analyzer func (r *Count) Analyzer(analyzer string) *Count { @@ -342,7 +370,7 @@ func (r *Count) Analyzer(analyzer string) *Count { } // AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. -// This parameter can only be used when the `q` query string parameter is +// This parameter can be used only when the `q` query string parameter is // specified. // API name: analyze_wildcard func (r *Count) AnalyzeWildcard(analyzewildcard bool) *Count { @@ -352,7 +380,7 @@ func (r *Count) AnalyzeWildcard(analyzewildcard bool) *Count { } // DefaultOperator The default operator for query string query: `AND` or `OR`. -// This parameter can only be used when the `q` query string parameter is +// This parameter can be used only when the `q` query string parameter is // specified. // API name: default_operator func (r *Count) DefaultOperator(defaultoperator operator.Operator) *Count { @@ -361,8 +389,9 @@ func (r *Count) DefaultOperator(defaultoperator operator.Operator) *Count { return r } -// Df Field to use as default where no field prefix is given in the query string. -// This parameter can only be used when the `q` query string parameter is +// Df The field to use as a default when no field prefix is given in the query +// string. +// This parameter can be used only when the `q` query string parameter is // specified. // API name: df func (r *Count) Df(df string) *Count { @@ -371,10 +400,10 @@ func (r *Count) Df(df string) *Count { return r } -// ExpandWildcards Type of index that wildcard patterns can match. +// ExpandWildcards The type of index that wildcard patterns can match. // If the request can target data streams, this argument determines whether // wildcard expressions match hidden data streams. -// Supports comma-separated values, such as `open,hidden`. +// It supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards func (r *Count) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Count { tmp := []string{} @@ -386,7 +415,7 @@ func (r *Count) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard return r } -// IgnoreThrottled If `true`, concrete, expanded or aliased indices are ignored when frozen. +// IgnoreThrottled If `true`, concrete, expanded, or aliased indices are ignored when frozen. // API name: ignore_throttled func (r *Count) IgnoreThrottled(ignorethrottled bool) *Count { r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) @@ -405,6 +434,8 @@ func (r *Count) IgnoreUnavailable(ignoreunavailable bool) *Count { // Lenient If `true`, format-based query failures (such as providing text to a numeric // field) in the query string will be ignored. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: lenient func (r *Count) Lenient(lenient bool) *Count { r.values.Set("lenient", strconv.FormatBool(lenient)) @@ -412,8 +443,8 @@ func (r *Count) Lenient(lenient bool) *Count { return r } -// MinScore Sets the minimum `_score` value that documents must have to be included in -// the result. +// MinScore The minimum `_score` value that documents must have to be included in the +// result. // API name: min_score func (r *Count) MinScore(minscore string) *Count { r.values.Set("min_score", minscore) @@ -421,8 +452,8 @@ func (r *Count) MinScore(minscore string) *Count { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// By default, it is random. // API name: preference func (r *Count) Preference(preference string) *Count { r.values.Set("preference", preference) @@ -430,7 +461,7 @@ func (r *Count) Preference(preference string) *Count { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *Count) Routing(routing string) *Count { r.values.Set("routing", routing) @@ -438,9 +469,15 @@ func (r *Count) Routing(routing string) *Count { return r } -// TerminateAfter Maximum number of documents to collect for each shard. +// TerminateAfter The maximum number of documents to collect for each shard. // If a query reaches this limit, Elasticsearch terminates the query early. // Elasticsearch collects documents before sorting. +// +// IMPORTANT: Use with caution. +// Elasticsearch applies this parameter to each shard handling the request. +// When possible, let Elasticsearch perform early termination automatically. +// Avoid specifying this parameter for requests that target data streams with +// backing indices across multiple data tiers. // API name: terminate_after func (r *Count) TerminateAfter(terminateafter string) *Count { r.values.Set("terminate_after", terminateafter) @@ -448,7 +485,8 @@ func (r *Count) TerminateAfter(terminateafter string) *Count { return r } -// Q Query in the Lucene query string syntax. +// Q The query in Lucene query string syntax. This parameter cannot be used with a +// request body. // API name: q func (r *Count) Q(q string) *Count { r.values.Set("q", q) @@ -500,11 +538,16 @@ func (r *Count) Pretty(pretty bool) *Count { return r } -// Query Defines the search definition using the Query DSL. +// Defines the search query using Query DSL. A request body query cannot be used +// with the `q` query string parameter. // API name: query -func (r *Count) Query(query *types.Query) *Count { +func (r *Count) Query(query types.QueryVariant) *Count { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } diff --git a/typedapi/core/count/request.go b/typedapi/core/count/request.go index 469c2e01c6..29ac40e279 100644 --- a/typedapi/core/count/request.go +++ b/typedapi/core/count/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package count @@ -29,10 +29,11 @@ import ( // Request holds the request body struct for the package count // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/count/CountRequest.ts#L26-L121 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/count/CountRequest.ts#L26-L154 type Request struct { - // Query Defines the search definition using the Query DSL. + // Query Defines the search query using Query DSL. A request body query cannot be used + // with the `q` query string parameter. Query *types.Query `json:"query,omitempty"` } diff --git a/typedapi/core/count/response.go b/typedapi/core/count/response.go index 8edfa55b59..dc78ec5f4d 100644 --- a/typedapi/core/count/response.go +++ b/typedapi/core/count/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package count @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package count // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/count/CountResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/count/CountResponse.ts#L23-L25 type Response struct { Count int64 `json:"count"` Shards_ types.ShardStatistics `json:"_shards"` diff --git a/typedapi/core/create/create.go b/typedapi/core/create/create.go index 7364159c12..84c40c35d7 100644 --- a/typedapi/core/create/create.go +++ b/typedapi/core/create/create.go @@ -16,13 +16,134 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Index a document. -// Adds a JSON document to the specified data stream or index and makes it -// searchable. -// If the target is an index and the document already exists, the request -// updates the document and increments its version. +// Create a new document in the index. +// +// You can index a new JSON document with the `//_doc/` or +// `//_create/<_id>` APIs +// Using `_create` guarantees that the document is indexed only if it does not +// already exist. +// It returns a 409 response when a document with a same ID already exists in +// the index. +// To update an existing document, you must use the `//_doc/` API. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To add a document using the `PUT //_create/<_id>` or `POST +// //_create/<_id>` request formats, you must have the `create_doc`, +// `create`, `index`, or `write` index privilege. +// * To automatically create a data stream or index with this API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// **Automatically create data streams and indices** +// +// If the request's target doesn't exist and matches an index template with a +// `data_stream` definition, the index operation automatically creates the data +// stream. +// +// If the target doesn't exist and doesn't match a data stream template, the +// operation automatically creates the index and applies any matching index +// templates. +// +// NOTE: Elasticsearch includes several built-in index templates. To avoid +// naming collisions with these templates, refer to index pattern documentation. +// +// If no mapping exists, the index operation creates a dynamic mapping. +// By default, new fields and objects are automatically added to the mapping if +// needed. +// +// Automatic index creation is controlled by the `action.auto_create_index` +// setting. +// If it is `true`, any index can be created automatically. +// You can modify this setting to explicitly allow or block automatic creation +// of indices that match specified patterns or set it to `false` to turn off +// automatic index creation entirely. +// Specify a comma-separated list of patterns you want to allow or prefix each +// pattern with `+` or `-` to indicate whether it should be allowed or blocked. +// When a list is specified, the default behaviour is to disallow. +// +// NOTE: The `action.auto_create_index` setting affects the automatic creation +// of indices only. +// It does not affect the creation of data streams. +// +// **Routing** +// +// By default, shard placement — or routing — is controlled by using a hash of +// the document's ID value. +// For more explicit control, the value fed into the hash function used by the +// router can be directly specified on a per-operation basis using the `routing` +// parameter. +// +// When setting up explicit mapping, you can also use the `_routing` field to +// direct the index operation to extract the routing value from the document +// itself. +// This does come at the (very minimal) cost of an additional document parsing +// pass. +// If the `_routing` mapping is defined and set to be required, the index +// operation will fail if no routing value is provided or extracted. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Distributed** +// +// The index operation is directed to the primary shard based on its route and +// performed on the actual node containing this shard. +// After the primary shard completes the operation, if needed, the update is +// distributed to applicable replicas. +// +// **Active shards** +// +// To improve the resiliency of writes to the system, indexing operations can be +// configured to wait for a certain number of active shard copies before +// proceeding with the operation. +// If the requisite number of active shard copies are not available, then the +// write operation must wait and retry, until either the requisite shard copies +// have started or a timeout occurs. +// By default, write operations only wait for the primary shards to be active +// before proceeding (that is to say `wait_for_active_shards` is `1`). +// This default can be overridden in the index settings dynamically by setting +// `index.write.wait_for_active_shards`. +// To alter this behavior per operation, use the `wait_for_active_shards +// request` parameter. +// +// Valid values are all or any positive integer up to the total number of +// configured copies per shard in the index (which is `number_of_replicas`+1). +// Specifying a negative value or a number greater than the number of shard +// copies will throw an error. +// +// For example, suppose you have a cluster of three nodes, A, B, and C and you +// create an index index with the number of replicas set to 3 (resulting in 4 +// shard copies, one more copy than there are nodes). +// If you attempt an indexing operation, by default the operation will only +// ensure the primary copy of each shard is available before proceeding. +// This means that even if B and C went down and A hosted the primary shard +// copies, the indexing operation would still proceed with only one copy of the +// data. +// If `wait_for_active_shards` is set on the request to `3` (and all three nodes +// are up), the indexing operation will require 3 active shard copies before +// proceeding. +// This requirement should be met because there are 3 active nodes in the +// cluster, each one holding a copy of the shard. +// However, if you set `wait_for_active_shards` to `all` (or to `4`, which is +// the same in this situation), the indexing operation will not proceed as you +// do not have all 4 copies of each shard active in the index. +// The operation will timeout unless a new node is brought up in the cluster to +// host the fourth copy of the shard. +// +// It is important to note that this setting greatly reduces the chances of the +// write operation not writing to the requisite number of shard copies, but it +// does not completely eliminate the possibility, because this check occurs +// before the write operation starts. +// After the write operation is underway, it is still possible for replication +// to fail on any number of shard copies but still succeed on the primary. +// The `_shards` section of the API response reveals the number of shard copies +// on which replication succeeded and failed. package create import ( @@ -39,6 +160,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/optype" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" ) @@ -92,13 +214,134 @@ func NewCreateFunc(tp elastictransport.Interface) NewCreate { } } -// Index a document. -// Adds a JSON document to the specified data stream or index and makes it -// searchable. -// If the target is an index and the document already exists, the request -// updates the document and increments its version. +// Create a new document in the index. +// +// You can index a new JSON document with the `//_doc/` or +// `//_create/<_id>` APIs +// Using `_create` guarantees that the document is indexed only if it does not +// already exist. +// It returns a 409 response when a document with a same ID already exists in +// the index. +// To update an existing document, you must use the `//_doc/` API. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To add a document using the `PUT //_create/<_id>` or `POST +// //_create/<_id>` request formats, you must have the `create_doc`, +// `create`, `index`, or `write` index privilege. +// * To automatically create a data stream or index with this API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// **Automatically create data streams and indices** +// +// If the request's target doesn't exist and matches an index template with a +// `data_stream` definition, the index operation automatically creates the data +// stream. +// +// If the target doesn't exist and doesn't match a data stream template, the +// operation automatically creates the index and applies any matching index +// templates. +// +// NOTE: Elasticsearch includes several built-in index templates. To avoid +// naming collisions with these templates, refer to index pattern documentation. +// +// If no mapping exists, the index operation creates a dynamic mapping. +// By default, new fields and objects are automatically added to the mapping if +// needed. +// +// Automatic index creation is controlled by the `action.auto_create_index` +// setting. +// If it is `true`, any index can be created automatically. +// You can modify this setting to explicitly allow or block automatic creation +// of indices that match specified patterns or set it to `false` to turn off +// automatic index creation entirely. +// Specify a comma-separated list of patterns you want to allow or prefix each +// pattern with `+` or `-` to indicate whether it should be allowed or blocked. +// When a list is specified, the default behaviour is to disallow. +// +// NOTE: The `action.auto_create_index` setting affects the automatic creation +// of indices only. +// It does not affect the creation of data streams. +// +// **Routing** +// +// By default, shard placement — or routing — is controlled by using a hash of +// the document's ID value. +// For more explicit control, the value fed into the hash function used by the +// router can be directly specified on a per-operation basis using the `routing` +// parameter. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html +// When setting up explicit mapping, you can also use the `_routing` field to +// direct the index operation to extract the routing value from the document +// itself. +// This does come at the (very minimal) cost of an additional document parsing +// pass. +// If the `_routing` mapping is defined and set to be required, the index +// operation will fail if no routing value is provided or extracted. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Distributed** +// +// The index operation is directed to the primary shard based on its route and +// performed on the actual node containing this shard. +// After the primary shard completes the operation, if needed, the update is +// distributed to applicable replicas. +// +// **Active shards** +// +// To improve the resiliency of writes to the system, indexing operations can be +// configured to wait for a certain number of active shard copies before +// proceeding with the operation. +// If the requisite number of active shard copies are not available, then the +// write operation must wait and retry, until either the requisite shard copies +// have started or a timeout occurs. +// By default, write operations only wait for the primary shards to be active +// before proceeding (that is to say `wait_for_active_shards` is `1`). +// This default can be overridden in the index settings dynamically by setting +// `index.write.wait_for_active_shards`. +// To alter this behavior per operation, use the `wait_for_active_shards +// request` parameter. +// +// Valid values are all or any positive integer up to the total number of +// configured copies per shard in the index (which is `number_of_replicas`+1). +// Specifying a negative value or a number greater than the number of shard +// copies will throw an error. +// +// For example, suppose you have a cluster of three nodes, A, B, and C and you +// create an index index with the number of replicas set to 3 (resulting in 4 +// shard copies, one more copy than there are nodes). +// If you attempt an indexing operation, by default the operation will only +// ensure the primary copy of each shard is available before proceeding. +// This means that even if B and C went down and A hosted the primary shard +// copies, the indexing operation would still proceed with only one copy of the +// data. +// If `wait_for_active_shards` is set on the request to `3` (and all three nodes +// are up), the indexing operation will require 3 active shard copies before +// proceeding. +// This requirement should be met because there are 3 active nodes in the +// cluster, each one holding a copy of the shard. +// However, if you set `wait_for_active_shards` to `all` (or to `4`, which is +// the same in this situation), the indexing operation will not proceed as you +// do not have all 4 copies of each shard active in the index. +// The operation will timeout unless a new node is brought up in the cluster to +// host the fourth copy of the shard. +// +// It is important to note that this setting greatly reduces the chances of the +// write operation not writing to the requisite number of shard copies, but it +// does not completely eliminate the possibility, because this check occurs +// before the write operation starts. +// After the write operation is underway, it is still possible for replication +// to fail on any number of shard copies but still succeed on the primary. +// The `_shards` section of the API response reveals the number of shard copies +// on which replication succeeded and failed. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create func New(tp elastictransport.Interface) *Create { r := &Create{ transport: tp, @@ -106,8 +349,6 @@ func New(tp elastictransport.Interface) *Create { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -332,7 +573,9 @@ func (r *Create) Header(key, value string) *Create { return r } -// Id Unique identifier for the document. +// Id A unique identifier for the document. +// To automatically generate a document ID, use the `POST //_doc/` +// request format. // API Name: id func (r *Create) _id(id string) *Create { r.paramSet |= idMask @@ -341,11 +584,11 @@ func (r *Create) _id(id string) *Create { return r } -// Index Name of the data stream or index to target. -// If the target doesn’t exist and matches the name or wildcard (`*`) pattern of +// Index The name of the data stream or index to target. +// If the target doesn't exist and matches the name or wildcard (`*`) pattern of // an index template with a `data_stream` definition, this request creates the // data stream. -// If the target doesn’t exist and doesn’t match a data stream template, this +// If the target doesn't exist and doesn’t match a data stream template, this // request creates the index. // API Name: index func (r *Create) _index(index string) *Create { @@ -355,10 +598,50 @@ func (r *Create) _index(index string) *Create { return r } -// Pipeline ID of the pipeline to use to preprocess incoming documents. -// If the index has a default ingest pipeline specified, then setting the value -// to `_none` disables the default ingest pipeline for this request. -// If a final pipeline is configured it will always run, regardless of the value +// IfPrimaryTerm Only perform the operation if the document has this primary term. +// API name: if_primary_term +func (r *Create) IfPrimaryTerm(ifprimaryterm string) *Create { + r.values.Set("if_primary_term", ifprimaryterm) + + return r +} + +// IfSeqNo Only perform the operation if the document has this sequence number. +// API name: if_seq_no +func (r *Create) IfSeqNo(sequencenumber string) *Create { + r.values.Set("if_seq_no", sequencenumber) + + return r +} + +// IncludeSourceOnError True or false if to include the document source in the error message in case +// of parsing errors. +// API name: include_source_on_error +func (r *Create) IncludeSourceOnError(includesourceonerror bool) *Create { + r.values.Set("include_source_on_error", strconv.FormatBool(includesourceonerror)) + + return r +} + +// OpType Set to `create` to only index the document if it does not already exist (put +// if absent). +// If a document with the specified `_id` already exists, the indexing operation +// will fail. +// The behavior is the same as using the `/_create` endpoint. +// If a document ID is specified, this paramater defaults to `index`. +// Otherwise, it defaults to `create`. +// If the request targets a data stream, an `op_type` of `create` is required. +// API name: op_type +func (r *Create) OpType(optype optype.OpType) *Create { + r.values.Set("op_type", optype.String()) + + return r +} + +// Pipeline The ID of the pipeline to use to preprocess incoming documents. +// If the index has a default ingest pipeline specified, setting the value to +// `_none` turns off the default ingest pipeline for this request. +// If a final pipeline is configured, it will always run regardless of the value // of this parameter. // API name: pipeline func (r *Create) Pipeline(pipeline string) *Create { @@ -368,9 +651,10 @@ func (r *Create) Pipeline(pipeline string) *Create { } // Refresh If `true`, Elasticsearch refreshes the affected shards to make this operation -// visible to search, if `wait_for` then wait for a refresh to make this -// operation visible to search, if `false` do nothing with refreshes. -// Valid values: `true`, `false`, `wait_for`. +// visible to search. +// If `wait_for`, it waits for a refresh to make this operation visible to +// search. +// If `false`, it does nothing with refreshes. // API name: refresh func (r *Create) Refresh(refresh refresh.Refresh) *Create { r.values.Set("refresh", refresh.String()) @@ -378,7 +662,24 @@ func (r *Create) Refresh(refresh refresh.Refresh) *Create { return r } -// Routing Custom value used to route operations to a specific shard. +// RequireAlias If `true`, the destination must be an index alias. +// API name: require_alias +func (r *Create) RequireAlias(requirealias bool) *Create { + r.values.Set("require_alias", strconv.FormatBool(requirealias)) + + return r +} + +// RequireDataStream If `true`, the request's actions must target a data stream (existing or to be +// created). +// API name: require_data_stream +func (r *Create) RequireDataStream(requiredatastream bool) *Create { + r.values.Set("require_data_stream", strconv.FormatBool(requiredatastream)) + + return r +} + +// Routing A custom value that is used to route operations to a specific shard. // API name: routing func (r *Create) Routing(routing string) *Create { r.values.Set("routing", routing) @@ -386,8 +687,18 @@ func (r *Create) Routing(routing string) *Create { return r } -// Timeout Period the request waits for the following operations: automatic index +// Timeout The period the request waits for the following operations: automatic index // creation, dynamic mapping updates, waiting for active shards. +// Elasticsearch waits for at least the specified timeout period before failing. +// The actual wait time could be longer, particularly when multiple waits occur. +// +// This parameter is useful for situations where the primary shard assigned to +// perform the operation might not be available when the operation runs. +// Some reasons for this might be that the primary shard is currently recovering +// from a gateway or undergoing relocation. +// By default, the operation will wait on the primary shard to become available +// for at least 1 minute before failing and responding with an error. +// The actual wait time could be longer, particularly when multiple waits occur. // API name: timeout func (r *Create) Timeout(duration string) *Create { r.values.Set("timeout", duration) @@ -395,9 +706,8 @@ func (r *Create) Timeout(duration string) *Create { return r } -// Version Explicit version number for concurrency control. -// The specified version must match the current version of the document for the -// request to succeed. +// Version The explicit version number for concurrency control. +// It must be a non-negative long number. // API name: version func (r *Create) Version(versionnumber string) *Create { r.values.Set("version", versionnumber) @@ -405,7 +715,7 @@ func (r *Create) Version(versionnumber string) *Create { return r } -// VersionType Specific version type: `external`, `external_gte`. +// VersionType The version type. // API name: version_type func (r *Create) VersionType(versiontype versiontype.VersionType) *Create { r.values.Set("version_type", versiontype.String()) @@ -415,8 +725,9 @@ func (r *Create) VersionType(versiontype versiontype.VersionType) *Create { // WaitForActiveShards The number of shard copies that must be active before proceeding with the // operation. -// Set to `all` or any positive integer up to the total number of shards in the -// index (`number_of_replicas+1`). +// You can set it to `all` or any positive integer up to the total number of +// shards in the index (`number_of_replicas+1`). +// The default value of `1` means it waits for each primary shard to be active. // API name: wait_for_active_shards func (r *Create) WaitForActiveShards(waitforactiveshards string) *Create { r.values.Set("wait_for_active_shards", waitforactiveshards) diff --git a/typedapi/core/create/request.go b/typedapi/core/create/request.go index 4765aad829..7c9af76bdf 100644 --- a/typedapi/core/create/request.go +++ b/typedapi/core/create/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package create @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/create/CreateRequest.ts#L32-L96 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/create/CreateRequest.ts#L35-L221 type Request = json.RawMessage // NewRequest returns a Request diff --git a/typedapi/core/create/response.go b/typedapi/core/create/response.go index b8ca6895a4..13a95e9f3a 100644 --- a/typedapi/core/create/response.go +++ b/typedapi/core/create/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package create @@ -27,16 +27,25 @@ import ( // Response holds the response body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/create/CreateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/create/CreateResponse.ts#L22-L24 type Response struct { - ForcedRefresh *bool `json:"forced_refresh,omitempty"` - Id_ string `json:"_id"` - Index_ string `json:"_index"` - PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` - Result result.Result `json:"result"` - SeqNo_ *int64 `json:"_seq_no,omitempty"` - Shards_ types.ShardStatistics `json:"_shards"` - Version_ int64 `json:"_version"` + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + // Id_ The unique identifier for the added document. + Id_ string `json:"_id"` + // Index_ The name of the index the document was added to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Result The result of the indexing operation: `created` or `updated`. + Result result.Result `json:"result"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Shards_ Information about the replication process of the operation. + Shards_ types.ShardStatistics `json:"_shards"` + // Version_ The document version, which is incremented each time the document is updated. + Version_ int64 `json:"_version"` } // NewResponse returns a Response diff --git a/typedapi/core/delete/delete.go b/typedapi/core/delete/delete.go index e676a0f13c..c777e7172e 100644 --- a/typedapi/core/delete/delete.go +++ b/typedapi/core/delete/delete.go @@ -16,10 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete a document. -// Removes a JSON document from the specified index. +// +// Remove a JSON document from the specified index. +// +// NOTE: You cannot send deletion requests directly to a data stream. +// To delete a document in a data stream, you must target the backing index +// containing the document. +// +// **Optimistic concurrency control** +// +// Delete operations can be made conditional and only be performed if the last +// modification to the document was assigned the sequence number and primary +// term specified by the `if_seq_no` and `if_primary_term` parameters. +// If a mismatch is detected, the operation will result in a +// `VersionConflictException` and a status code of `409`. +// +// **Versioning** +// +// Each document indexed is versioned. +// When deleting a document, the version can be specified to make sure the +// relevant document you are trying to delete is actually being deleted and it +// has not changed in the meantime. +// Every write operation run on a document, deletes included, causes its version +// to be incremented. +// The version number of a deleted document remains available for a short time +// after deletion to allow for control of concurrent operations. +// The length of time for which a deleted document's version remains available +// is determined by the `index.gc_deletes` index setting. +// +// **Routing** +// +// If routing is used during indexing, the routing value also needs to be +// specified to delete a document. +// +// If the `_routing` mapping is set to `required` and no routing value is +// specified, the delete API throws a `RoutingMissingException` and rejects the +// request. +// +// For example: +// +// ``` +// DELETE /my-index-000001/_doc/1?routing=shard-1 +// ``` +// +// This request deletes the document with ID 1, but it is routed based on the +// user. +// The document is not deleted if the correct routing is not specified. +// +// **Distributed** +// +// The delete operation gets hashed into a specific shard ID. +// It then gets redirected into the primary shard within that ID group and +// replicated (if needed) to shard replicas within that ID group. package delete import ( @@ -86,9 +137,60 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { } // Delete a document. -// Removes a JSON document from the specified index. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html +// Remove a JSON document from the specified index. +// +// NOTE: You cannot send deletion requests directly to a data stream. +// To delete a document in a data stream, you must target the backing index +// containing the document. +// +// **Optimistic concurrency control** +// +// Delete operations can be made conditional and only be performed if the last +// modification to the document was assigned the sequence number and primary +// term specified by the `if_seq_no` and `if_primary_term` parameters. +// If a mismatch is detected, the operation will result in a +// `VersionConflictException` and a status code of `409`. +// +// **Versioning** +// +// Each document indexed is versioned. +// When deleting a document, the version can be specified to make sure the +// relevant document you are trying to delete is actually being deleted and it +// has not changed in the meantime. +// Every write operation run on a document, deletes included, causes its version +// to be incremented. +// The version number of a deleted document remains available for a short time +// after deletion to allow for control of concurrent operations. +// The length of time for which a deleted document's version remains available +// is determined by the `index.gc_deletes` index setting. +// +// **Routing** +// +// If routing is used during indexing, the routing value also needs to be +// specified to delete a document. +// +// If the `_routing` mapping is set to `required` and no routing value is +// specified, the delete API throws a `RoutingMissingException` and rejects the +// request. +// +// For example: +// +// ``` +// DELETE /my-index-000001/_doc/1?routing=shard-1 +// ``` +// +// This request deletes the document with ID 1, but it is routed based on the +// user. +// The document is not deleted if the correct routing is not specified. +// +// **Distributed** +// +// The delete operation gets hashed into a specific shard ID. +// It then gets redirected into the primary shard within that ID group and +// replicated (if needed) to shard replicas within that ID group. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete func New(tp elastictransport.Interface) *Delete { r := &Delete{ transport: tp, @@ -340,7 +442,7 @@ func (r *Delete) Header(key, value string) *Delete { return r } -// Id Unique identifier for the document. +// Id A unique identifier for the document. // API Name: id func (r *Delete) _id(id string) *Delete { r.paramSet |= idMask @@ -349,7 +451,7 @@ func (r *Delete) _id(id string) *Delete { return r } -// Index Name of the target index. +// Index The name of the target index. // API Name: index func (r *Delete) _index(index string) *Delete { r.paramSet |= indexMask @@ -375,9 +477,10 @@ func (r *Delete) IfSeqNo(sequencenumber string) *Delete { } // Refresh If `true`, Elasticsearch refreshes the affected shards to make this operation -// visible to search, if `wait_for` then wait for a refresh to make this -// operation visible to search, if `false` do nothing with refreshes. -// Valid values: `true`, `false`, `wait_for`. +// visible to search. +// If `wait_for`, it waits for a refresh to make this operation visible to +// search. +// If `false`, it does nothing with refreshes. // API name: refresh func (r *Delete) Refresh(refresh refresh.Refresh) *Delete { r.values.Set("refresh", refresh.String()) @@ -385,7 +488,7 @@ func (r *Delete) Refresh(refresh refresh.Refresh) *Delete { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *Delete) Routing(routing string) *Delete { r.values.Set("routing", routing) @@ -393,7 +496,15 @@ func (r *Delete) Routing(routing string) *Delete { return r } -// Timeout Period to wait for active shards. +// Timeout The period to wait for active shards. +// +// This parameter is useful for situations where the primary shard assigned to +// perform the delete operation might not be available when the delete operation +// runs. +// Some reasons for this might be that the primary shard is currently recovering +// from a store or undergoing relocation. +// By default, the delete operation will wait on the primary shard to become +// available for up to 1 minute before failing and responding with an error. // API name: timeout func (r *Delete) Timeout(duration string) *Delete { r.values.Set("timeout", duration) @@ -401,9 +512,8 @@ func (r *Delete) Timeout(duration string) *Delete { return r } -// Version Explicit version number for concurrency control. -// The specified version must match the current version of the document for the -// request to succeed. +// Version An explicit version number for concurrency control. +// It must match the current version of the document for the request to succeed. // API name: version func (r *Delete) Version(versionnumber string) *Delete { r.values.Set("version", versionnumber) @@ -411,7 +521,7 @@ func (r *Delete) Version(versionnumber string) *Delete { return r } -// VersionType Specific version type: `external`, `external_gte`. +// VersionType The version type. // API name: version_type func (r *Delete) VersionType(versiontype versiontype.VersionType) *Delete { r.values.Set("version_type", versiontype.String()) @@ -419,10 +529,11 @@ func (r *Delete) VersionType(versiontype versiontype.VersionType) *Delete { return r } -// WaitForActiveShards The number of shard copies that must be active before proceeding with the -// operation. -// Set to `all` or any positive integer up to the total number of shards in the -// index (`number_of_replicas+1`). +// WaitForActiveShards The minimum number of shard copies that must be active before proceeding with +// the operation. +// You can set it to `all` or any positive integer up to the total number of +// shards in the index (`number_of_replicas+1`). +// The default value of `1` means it waits for each primary shard to be active. // API name: wait_for_active_shards func (r *Delete) WaitForActiveShards(waitforactiveshards string) *Delete { r.values.Set("wait_for_active_shards", waitforactiveshards) diff --git a/typedapi/core/delete/response.go b/typedapi/core/delete/response.go index 579e0cda6d..6c3b6523da 100644 --- a/typedapi/core/delete/response.go +++ b/typedapi/core/delete/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package delete @@ -27,16 +27,25 @@ import ( // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/delete/DeleteResponse.ts#L22-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/delete/DeleteResponse.ts#L22-L34 type Response struct { - ForcedRefresh *bool `json:"forced_refresh,omitempty"` - Id_ string `json:"_id"` - Index_ string `json:"_index"` - PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` - Result result.Result `json:"result"` - SeqNo_ *int64 `json:"_seq_no,omitempty"` - Shards_ types.ShardStatistics `json:"_shards"` - Version_ int64 `json:"_version"` + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + // Id_ The unique identifier for the added document. + Id_ string `json:"_id"` + // Index_ The name of the index the document was added to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Result The result of the indexing operation: `created` or `updated`. + Result result.Result `json:"result"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Shards_ Information about the replication process of the operation. + Shards_ types.ShardStatistics `json:"_shards"` + // Version_ The document version, which is incremented each time the document is updated. + Version_ int64 `json:"_version"` } // NewResponse returns a Response diff --git a/typedapi/core/deletebyquery/delete_by_query.go b/typedapi/core/deletebyquery/delete_by_query.go index aba274ea93..d53e6c7b10 100644 --- a/typedapi/core/deletebyquery/delete_by_query.go +++ b/typedapi/core/deletebyquery/delete_by_query.go @@ -16,10 +16,135 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete documents. +// // Deletes documents that match the specified query. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or alias: +// +// * `read` +// * `delete` or `write` +// +// You can specify the query criteria in the request URI or the request body +// using the same syntax as the search API. +// When you submit a delete by query request, Elasticsearch gets a snapshot of +// the data stream or index when it begins processing the request and deletes +// matching documents using internal versioning. +// If a document changes between the time that the snapshot is taken and the +// delete operation is processed, it results in a version conflict and the +// delete operation fails. +// +// NOTE: Documents with a version equal to 0 cannot be deleted using delete by +// query because internal versioning does not support 0 as a valid version +// number. +// +// While processing a delete by query request, Elasticsearch performs multiple +// search requests sequentially to find all of the matching documents to delete. +// A bulk delete request is performed for each batch of matching documents. +// If a search or bulk request is rejected, the requests are retried up to 10 +// times, with exponential back off. +// If the maximum retry limit is reached, processing halts and all failed +// requests are returned in the response. +// Any delete requests that completed successfully still stick, they are not +// rolled back. +// +// You can opt to count version conflicts instead of halting and returning by +// setting `conflicts` to `proceed`. +// Note that if you opt to count version conflicts the operation could attempt +// to delete more documents from the source than `max_docs` until it has +// successfully deleted `max_docs documents`, or it has gone through every +// document in the source query. +// +// **Throttling delete requests** +// +// To control the rate at which delete by query issues batches of delete +// operations, you can set `requests_per_second` to any positive decimal number. +// This pads each batch with a wait time to throttle the rate. +// Set `requests_per_second` to `-1` to disable throttling. +// +// Throttling uses a wait time between batches so that the internal scroll +// requests can be given a timeout that takes the request padding into account. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is `1000`, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single `_bulk` request, large batch sizes +// cause Elasticsearch to create many requests and wait before starting the next +// set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Delete by query supports sliced scroll to parallelize the delete process. +// This can improve efficiency and provide a convenient way to break the request +// down into smaller parts. +// +// Setting `slices` to `auto` lets Elasticsearch choose the number of slices to +// use. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple source data streams or indices, it will choose the +// number of slices based on the index or backing index with the smallest number +// of shards. +// Adding slices to the delete by query operation creates sub-requests which +// means it has some quirks: +// +// * You can see these requests in the tasks APIs. These sub-requests are +// "child" tasks of the task for the request with slices. +// * Fetching the status of the task for the request with slices only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with `slices` will cancel each sub-request. +// * Due to the nature of `slices` each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// `slices` are distributed proportionally to each sub-request. Combine that +// with the earlier point about distribution being uneven and you should +// conclude that using `max_docs` with `slices` might not result in exactly +// `max_docs` documents being deleted. +// * Each sub-request gets a slightly different snapshot of the source data +// stream or index though these are all taken at approximately the same time. +// +// If you're slicing manually or otherwise tuning automatic slicing, keep in +// mind that: +// +// * Query performance is most efficient when the number of slices is equal to +// the number of shards in the index or backing index. If that number is large +// (for example, 500), choose a lower number as too many `slices` hurts +// performance. Setting `slices` higher than the number of shards generally does +// not improve efficiency and adds overhead. +// * Delete performance scales linearly across available resources with the +// number of slices. +// +// Whether query or delete performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// +// **Cancel a delete by query operation** +// +// Any delete by query can be canceled using the task cancel API. For example: +// +// ``` +// POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel +// ``` +// +// The task ID can be found by using the get tasks API. +// +// Cancellation should happen quickly but might take a few seconds. +// The get task status API will continue to list the delete by query task until +// this task checks that it has been cancelled and terminates itself. package deletebyquery import ( @@ -87,9 +212,134 @@ func NewDeleteByQueryFunc(tp elastictransport.Interface) NewDeleteByQuery { } // Delete documents. +// // Deletes documents that match the specified query. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or alias: +// +// * `read` +// * `delete` or `write` +// +// You can specify the query criteria in the request URI or the request body +// using the same syntax as the search API. +// When you submit a delete by query request, Elasticsearch gets a snapshot of +// the data stream or index when it begins processing the request and deletes +// matching documents using internal versioning. +// If a document changes between the time that the snapshot is taken and the +// delete operation is processed, it results in a version conflict and the +// delete operation fails. +// +// NOTE: Documents with a version equal to 0 cannot be deleted using delete by +// query because internal versioning does not support 0 as a valid version +// number. +// +// While processing a delete by query request, Elasticsearch performs multiple +// search requests sequentially to find all of the matching documents to delete. +// A bulk delete request is performed for each batch of matching documents. +// If a search or bulk request is rejected, the requests are retried up to 10 +// times, with exponential back off. +// If the maximum retry limit is reached, processing halts and all failed +// requests are returned in the response. +// Any delete requests that completed successfully still stick, they are not +// rolled back. +// +// You can opt to count version conflicts instead of halting and returning by +// setting `conflicts` to `proceed`. +// Note that if you opt to count version conflicts the operation could attempt +// to delete more documents from the source than `max_docs` until it has +// successfully deleted `max_docs documents`, or it has gone through every +// document in the source query. +// +// **Throttling delete requests** +// +// To control the rate at which delete by query issues batches of delete +// operations, you can set `requests_per_second` to any positive decimal number. +// This pads each batch with a wait time to throttle the rate. +// Set `requests_per_second` to `-1` to disable throttling. +// +// Throttling uses a wait time between batches so that the internal scroll +// requests can be given a timeout that takes the request padding into account. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is `1000`, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single `_bulk` request, large batch sizes +// cause Elasticsearch to create many requests and wait before starting the next +// set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Delete by query supports sliced scroll to parallelize the delete process. +// This can improve efficiency and provide a convenient way to break the request +// down into smaller parts. +// +// Setting `slices` to `auto` lets Elasticsearch choose the number of slices to +// use. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple source data streams or indices, it will choose the +// number of slices based on the index or backing index with the smallest number +// of shards. +// Adding slices to the delete by query operation creates sub-requests which +// means it has some quirks: +// +// * You can see these requests in the tasks APIs. These sub-requests are +// "child" tasks of the task for the request with slices. +// * Fetching the status of the task for the request with slices only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with `slices` will cancel each sub-request. +// * Due to the nature of `slices` each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// `slices` are distributed proportionally to each sub-request. Combine that +// with the earlier point about distribution being uneven and you should +// conclude that using `max_docs` with `slices` might not result in exactly +// `max_docs` documents being deleted. +// * Each sub-request gets a slightly different snapshot of the source data +// stream or index though these are all taken at approximately the same time. +// +// If you're slicing manually or otherwise tuning automatic slicing, keep in +// mind that: +// +// * Query performance is most efficient when the number of slices is equal to +// the number of shards in the index or backing index. If that number is large +// (for example, 500), choose a lower number as too many `slices` hurts +// performance. Setting `slices` higher than the number of shards generally does +// not improve efficiency and adds overhead. +// * Delete performance scales linearly across available resources with the +// number of slices. +// +// Whether query or delete performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// +// **Cancel a delete by query operation** +// +// Any delete by query can be canceled using the task cancel API. For example: +// +// ``` +// POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel +// ``` +// +// The task ID can be found by using the get tasks API. +// +// Cancellation should happen quickly but might take a few seconds. +// The get task status API will continue to list the delete by query task until +// this task checks that it has been cancelled and terminates itself. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query func New(tp elastictransport.Interface) *DeleteByQuery { r := &DeleteByQuery{ transport: tp, @@ -97,8 +347,6 @@ func New(tp elastictransport.Interface) *DeleteByQuery { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -310,8 +558,8 @@ func (r *DeleteByQuery) Header(key, value string) *DeleteByQuery { return r } -// Index Comma-separated list of data streams, indices, and aliases to search. -// Supports wildcards (`*`). +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). // To search all data streams or indices, omit this parameter or use `*` or // `_all`. // API Name: index @@ -335,6 +583,8 @@ func (r *DeleteByQuery) AllowNoIndices(allownoindices bool) *DeleteByQuery { } // Analyzer Analyzer to use for the query string. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: analyzer func (r *DeleteByQuery) Analyzer(analyzer string) *DeleteByQuery { r.values.Set("analyzer", analyzer) @@ -343,6 +593,8 @@ func (r *DeleteByQuery) Analyzer(analyzer string) *DeleteByQuery { } // AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: analyze_wildcard func (r *DeleteByQuery) AnalyzeWildcard(analyzewildcard bool) *DeleteByQuery { r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) @@ -359,6 +611,8 @@ func (r *DeleteByQuery) Conflicts(conflicts conflicts.Conflicts) *DeleteByQuery } // DefaultOperator The default operator for query string query: `AND` or `OR`. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: default_operator func (r *DeleteByQuery) DefaultOperator(defaultoperator operator.Operator) *DeleteByQuery { r.values.Set("default_operator", defaultoperator.String()) @@ -366,7 +620,10 @@ func (r *DeleteByQuery) DefaultOperator(defaultoperator operator.Operator) *Dele return r } -// Df Field to use as default where no field prefix is given in the query string. +// Df The field to use as default where no field prefix is given in the query +// string. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: df func (r *DeleteByQuery) Df(df string) *DeleteByQuery { r.values.Set("df", df) @@ -374,11 +631,10 @@ func (r *DeleteByQuery) Df(df string) *DeleteByQuery { return r } -// ExpandWildcards Type of index that wildcard patterns can match. +// ExpandWildcards The type of index that wildcard patterns can match. // If the request can target data streams, this argument determines whether // wildcard expressions match hidden data streams. -// Supports comma-separated values, such as `open,hidden`. Valid values are: -// `all`, `open`, `closed`, `hidden`, `none`. +// It supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards func (r *DeleteByQuery) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *DeleteByQuery { tmp := []string{} @@ -409,6 +665,8 @@ func (r *DeleteByQuery) IgnoreUnavailable(ignoreunavailable bool) *DeleteByQuery // Lenient If `true`, format-based query failures (such as providing text to a numeric // field) in the query string will be ignored. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: lenient func (r *DeleteByQuery) Lenient(lenient bool) *DeleteByQuery { r.values.Set("lenient", strconv.FormatBool(lenient)) @@ -416,8 +674,8 @@ func (r *DeleteByQuery) Lenient(lenient bool) *DeleteByQuery { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// It is random by default. // API name: preference func (r *DeleteByQuery) Preference(preference string) *DeleteByQuery { r.values.Set("preference", preference) @@ -427,6 +685,9 @@ func (r *DeleteByQuery) Preference(preference string) *DeleteByQuery { // Refresh If `true`, Elasticsearch refreshes all shards involved in the delete by query // after the request completes. +// This is different than the delete API's `refresh` parameter, which causes +// just the shard that received the delete request to be refreshed. +// Unlike the delete API, it does not support `wait_for`. // API name: refresh func (r *DeleteByQuery) Refresh(refresh bool) *DeleteByQuery { r.values.Set("refresh", strconv.FormatBool(refresh)) @@ -451,7 +712,7 @@ func (r *DeleteByQuery) RequestsPerSecond(requestspersecond string) *DeleteByQue return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *DeleteByQuery) Routing(routing string) *DeleteByQuery { r.values.Set("routing", routing) @@ -459,7 +720,7 @@ func (r *DeleteByQuery) Routing(routing string) *DeleteByQuery { return r } -// Q Query in the Lucene query string syntax. +// Q A query in the Lucene query string syntax. // API name: q func (r *DeleteByQuery) Q(q string) *DeleteByQuery { r.values.Set("q", q) @@ -467,7 +728,7 @@ func (r *DeleteByQuery) Q(q string) *DeleteByQuery { return r } -// Scroll Period to retain the search context for scrolling. +// Scroll The period to retain the search context for scrolling. // API name: scroll func (r *DeleteByQuery) Scroll(duration string) *DeleteByQuery { r.values.Set("scroll", duration) @@ -475,7 +736,7 @@ func (r *DeleteByQuery) Scroll(duration string) *DeleteByQuery { return r } -// ScrollSize Size of the scroll request that powers the operation. +// ScrollSize The size of the scroll request that powers the operation. // API name: scroll_size func (r *DeleteByQuery) ScrollSize(scrollsize string) *DeleteByQuery { r.values.Set("scroll_size", scrollsize) @@ -483,8 +744,8 @@ func (r *DeleteByQuery) ScrollSize(scrollsize string) *DeleteByQuery { return r } -// SearchTimeout Explicit timeout for each search request. -// Defaults to no timeout. +// SearchTimeout The explicit timeout for each search request. +// It defaults to no timeout. // API name: search_timeout func (r *DeleteByQuery) SearchTimeout(duration string) *DeleteByQuery { r.values.Set("search_timeout", duration) @@ -493,7 +754,7 @@ func (r *DeleteByQuery) SearchTimeout(duration string) *DeleteByQuery { } // SearchType The type of the search operation. -// Available options: `query_then_fetch`, `dfs_query_then_fetch`. +// Available options include `query_then_fetch` and `dfs_query_then_fetch`. // API name: search_type func (r *DeleteByQuery) SearchType(searchtype searchtype.SearchType) *DeleteByQuery { r.values.Set("search_type", searchtype.String()) @@ -509,7 +770,7 @@ func (r *DeleteByQuery) Slices(slices string) *DeleteByQuery { return r } -// Sort A comma-separated list of : pairs. +// Sort A comma-separated list of `:` pairs. // API name: sort func (r *DeleteByQuery) Sort(sorts ...string) *DeleteByQuery { tmp := []string{} @@ -521,7 +782,7 @@ func (r *DeleteByQuery) Sort(sorts ...string) *DeleteByQuery { return r } -// Stats Specific `tag` of the request for logging and statistical purposes. +// Stats The specific `tag` of the request for logging and statistical purposes. // API name: stats func (r *DeleteByQuery) Stats(stats ...string) *DeleteByQuery { tmp := []string{} @@ -533,9 +794,10 @@ func (r *DeleteByQuery) Stats(stats ...string) *DeleteByQuery { return r } -// TerminateAfter Maximum number of documents to collect for each shard. +// TerminateAfter The maximum number of documents to collect for each shard. // If a query reaches this limit, Elasticsearch terminates the query early. // Elasticsearch collects documents before sorting. +// // Use with caution. // Elasticsearch applies this parameter to each shard handling the request. // When possible, let Elasticsearch perform early termination automatically. @@ -548,7 +810,7 @@ func (r *DeleteByQuery) TerminateAfter(terminateafter string) *DeleteByQuery { return r } -// Timeout Period each deletion request waits for active shards. +// Timeout The period each deletion request waits for active shards. // API name: timeout func (r *DeleteByQuery) Timeout(duration string) *DeleteByQuery { r.values.Set("timeout", duration) @@ -566,8 +828,10 @@ func (r *DeleteByQuery) Version(version bool) *DeleteByQuery { // WaitForActiveShards The number of shard copies that must be active before proceeding with the // operation. -// Set to all or any positive integer up to the total number of shards in the +// Set to `all` or any positive integer up to the total number of shards in the // index (`number_of_replicas+1`). +// The `timeout` value controls how long each write request waits for +// unavailable shards to become available. // API name: wait_for_active_shards func (r *DeleteByQuery) WaitForActiveShards(waitforactiveshards string) *DeleteByQuery { r.values.Set("wait_for_active_shards", waitforactiveshards) @@ -576,6 +840,11 @@ func (r *DeleteByQuery) WaitForActiveShards(waitforactiveshards string) *DeleteB } // WaitForCompletion If `true`, the request blocks until the operation is complete. +// If `false`, Elasticsearch performs some preflight checks, launches the +// request, and returns a task you can use to cancel or get the status of the +// task. Elasticsearch creates a record of this task as a document at +// `.tasks/task/${taskId}`. When you are done with a task, you should delete the +// task document so Elasticsearch can reclaim the space. // API name: wait_for_completion func (r *DeleteByQuery) WaitForCompletion(waitforcompletion bool) *DeleteByQuery { r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) @@ -627,30 +896,42 @@ func (r *DeleteByQuery) Pretty(pretty bool) *DeleteByQuery { return r } -// MaxDocs The maximum number of documents to delete. +// The maximum number of documents to delete. // API name: max_docs func (r *DeleteByQuery) MaxDocs(maxdocs int64) *DeleteByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxDocs = &maxdocs return r } -// Query Specifies the documents to delete using the Query DSL. +// The documents to delete specified with Query DSL. // API name: query -func (r *DeleteByQuery) Query(query *types.Query) *DeleteByQuery { +func (r *DeleteByQuery) Query(query types.QueryVariant) *DeleteByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// Slice Slice the request manually using the provided slice ID and total number of +// Slice the request manually using the provided slice ID and total number of // slices. // API name: slice -func (r *DeleteByQuery) Slice(slice *types.SlicedScroll) *DeleteByQuery { +func (r *DeleteByQuery) Slice(slice types.SlicedScrollVariant) *DeleteByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Slice = slice + r.req.Slice = slice.SlicedScrollCaster() return r } diff --git a/typedapi/core/deletebyquery/request.go b/typedapi/core/deletebyquery/request.go index 6ebc7f978f..7ca9698dff 100644 --- a/typedapi/core/deletebyquery/request.go +++ b/typedapi/core/deletebyquery/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletebyquery @@ -29,12 +29,12 @@ import ( // Request holds the request body struct for the package deletebyquery // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/delete_by_query/DeleteByQueryRequest.ts#L36-L211 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/delete_by_query/DeleteByQueryRequest.ts#L36-L310 type Request struct { // MaxDocs The maximum number of documents to delete. MaxDocs *int64 `json:"max_docs,omitempty"` - // Query Specifies the documents to delete using the Query DSL. + // Query The documents to delete specified with Query DSL. Query *types.Query `json:"query,omitempty"` // Slice Slice the request manually using the provided slice ID and total number of // slices. diff --git a/typedapi/core/deletebyquery/response.go b/typedapi/core/deletebyquery/response.go index 7fc170ba8f..78dcfbcf16 100644 --- a/typedapi/core/deletebyquery/response.go +++ b/typedapi/core/deletebyquery/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletebyquery @@ -26,24 +26,53 @@ import ( // Response holds the response body struct for the package deletebyquery // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/delete_by_query/DeleteByQueryResponse.ts#L26-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/delete_by_query/DeleteByQueryResponse.ts#L26-L88 type Response struct { - Batches *int64 `json:"batches,omitempty"` - Deleted *int64 `json:"deleted,omitempty"` - Failures []types.BulkIndexByScrollFailure `json:"failures,omitempty"` - Noops *int64 `json:"noops,omitempty"` - RequestsPerSecond *float32 `json:"requests_per_second,omitempty"` - Retries *types.Retries `json:"retries,omitempty"` - SliceId *int `json:"slice_id,omitempty"` - Task types.TaskId `json:"task,omitempty"` - Throttled types.Duration `json:"throttled,omitempty"` - ThrottledMillis *int64 `json:"throttled_millis,omitempty"` - ThrottledUntil types.Duration `json:"throttled_until,omitempty"` - ThrottledUntilMillis *int64 `json:"throttled_until_millis,omitempty"` - TimedOut *bool `json:"timed_out,omitempty"` - Took *int64 `json:"took,omitempty"` - Total *int64 `json:"total,omitempty"` - VersionConflicts *int64 `json:"version_conflicts,omitempty"` + + // Batches The number of scroll responses pulled back by the delete by query. + Batches *int64 `json:"batches,omitempty"` + // Deleted The number of documents that were successfully deleted. + Deleted *int64 `json:"deleted,omitempty"` + // Failures An array of failures if there were any unrecoverable errors during the + // process. + // If this array is not empty, the request ended abnormally because of those + // failures. + // Delete by query is implemented using batches and any failures cause the + // entire process to end but all failures in the current batch are collected + // into the array. + // You can use the `conflicts` option to prevent reindex from ending on version + // conflicts. + Failures []types.BulkIndexByScrollFailure `json:"failures,omitempty"` + // Noops This field is always equal to zero for delete by query. + // It exists only so that delete by query, update by query, and reindex APIs + // return responses with the same structure. + Noops *int64 `json:"noops,omitempty"` + // RequestsPerSecond The number of requests per second effectively run during the delete by query. + RequestsPerSecond *float32 `json:"requests_per_second,omitempty"` + // Retries The number of retries attempted by delete by query. + // `bulk` is the number of bulk actions retried. + // `search` is the number of search actions retried. + Retries *types.Retries `json:"retries,omitempty"` + SliceId *int `json:"slice_id,omitempty"` + Task types.TaskId `json:"task,omitempty"` + Throttled types.Duration `json:"throttled,omitempty"` + // ThrottledMillis The number of milliseconds the request slept to conform to + // `requests_per_second`. + ThrottledMillis *int64 `json:"throttled_millis,omitempty"` + ThrottledUntil types.Duration `json:"throttled_until,omitempty"` + // ThrottledUntilMillis This field should always be equal to zero in a `_delete_by_query` response. + // It has meaning only when using the task API, where it indicates the next time + // (in milliseconds since epoch) a throttled request will be run again in order + // to conform to `requests_per_second`. + ThrottledUntilMillis *int64 `json:"throttled_until_millis,omitempty"` + // TimedOut If `true`, some requests run during the delete by query operation timed out. + TimedOut *bool `json:"timed_out,omitempty"` + // Took The number of milliseconds from start to end of the whole operation. + Took *int64 `json:"took,omitempty"` + // Total The number of documents that were successfully processed. + Total *int64 `json:"total,omitempty"` + // VersionConflicts The number of version conflicts that the delete by query hit. + VersionConflicts *int64 `json:"version_conflicts,omitempty"` } // NewResponse returns a Response diff --git a/typedapi/core/deletebyqueryrethrottle/delete_by_query_rethrottle.go b/typedapi/core/deletebyqueryrethrottle/delete_by_query_rethrottle.go index f8aa898f77..b33c656923 100644 --- a/typedapi/core/deletebyqueryrethrottle/delete_by_query_rethrottle.go +++ b/typedapi/core/deletebyqueryrethrottle/delete_by_query_rethrottle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Throttle a delete by query operation. // @@ -90,7 +90,7 @@ func NewDeleteByQueryRethrottleFunc(tp elastictransport.Interface) NewDeleteByQu // rethrotting that slows down the query takes effect after completing the // current batch to prevent scroll timeouts. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle func New(tp elastictransport.Interface) *DeleteByQueryRethrottle { r := &DeleteByQueryRethrottle{ transport: tp, @@ -312,6 +312,7 @@ func (r *DeleteByQueryRethrottle) _taskid(taskid string) *DeleteByQueryRethrottl } // RequestsPerSecond The throttle for this request in sub-requests per second. +// To disable throttling, set it to `-1`. // API name: requests_per_second func (r *DeleteByQueryRethrottle) RequestsPerSecond(requestspersecond string) *DeleteByQueryRethrottle { r.values.Set("requests_per_second", requestspersecond) diff --git a/typedapi/core/deletebyqueryrethrottle/response.go b/typedapi/core/deletebyqueryrethrottle/response.go index 4ddc48483e..6b6b2fb84e 100644 --- a/typedapi/core/deletebyqueryrethrottle/response.go +++ b/typedapi/core/deletebyqueryrethrottle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletebyqueryrethrottle @@ -32,7 +32,7 @@ import ( // Response holds the response body struct for the package deletebyqueryrethrottle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/delete_by_query_rethrottle/DeleteByQueryRethrottleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/delete_by_query_rethrottle/DeleteByQueryRethrottleResponse.ts#L22-L24 type Response struct { NodeFailures []types.ErrorCause `json:"node_failures,omitempty"` // Nodes Task information grouped by node, if `group_by` was set to `node` (the diff --git a/typedapi/core/deletescript/delete_script.go b/typedapi/core/deletescript/delete_script.go index cee7b9baca..37be4be033 100644 --- a/typedapi/core/deletescript/delete_script.go +++ b/typedapi/core/deletescript/delete_script.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete a script or search template. // Deletes a stored script or search template. @@ -80,7 +80,7 @@ func NewDeleteScriptFunc(tp elastictransport.Interface) NewDeleteScript { // Delete a script or search template. // Deletes a stored script or search template. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script func New(tp elastictransport.Interface) *DeleteScript { r := &DeleteScript{ transport: tp, @@ -290,7 +290,7 @@ func (r *DeleteScript) Header(key, value string) *DeleteScript { return r } -// Id Identifier for the stored script or search template. +// Id The identifier for the stored script or search template. // API Name: id func (r *DeleteScript) _id(id string) *DeleteScript { r.paramSet |= idMask @@ -299,9 +299,10 @@ func (r *DeleteScript) _id(id string) *DeleteScript { return r } -// MasterTimeout Period to wait for a connection to the master node. +// MasterTimeout The period to wait for a connection to the master node. // If no response is received before the timeout expires, the request fails and // returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. // API name: master_timeout func (r *DeleteScript) MasterTimeout(duration string) *DeleteScript { r.values.Set("master_timeout", duration) @@ -309,9 +310,10 @@ func (r *DeleteScript) MasterTimeout(duration string) *DeleteScript { return r } -// Timeout Period to wait for a response. +// Timeout The period to wait for a response. // If no response is received before the timeout expires, the request fails and // returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. // API name: timeout func (r *DeleteScript) Timeout(duration string) *DeleteScript { r.values.Set("timeout", duration) diff --git a/typedapi/core/deletescript/response.go b/typedapi/core/deletescript/response.go index 6882b0fe54..6158c58666 100644 --- a/typedapi/core/deletescript/response.go +++ b/typedapi/core/deletescript/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletescript // Response holds the response body struct for the package deletescript // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/delete_script/DeleteScriptResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/delete_script/DeleteScriptResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/core/exists/exists.go b/typedapi/core/exists/exists.go index bd7f56801f..16480b1d11 100644 --- a/typedapi/core/exists/exists.go +++ b/typedapi/core/exists/exists.go @@ -16,10 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Check a document. -// Checks if a specified document exists. +// +// Verify that a document exists. +// For example, check to see if a document with the `_id` 0 exists: +// +// ``` +// HEAD my-index-000001/_doc/0 +// ``` +// +// If the document exists, the API returns a status code of `200 - OK`. +// If the document doesn’t exist, the API returns `404 - Not Found`. +// +// **Versioning support** +// +// You can use the `version` parameter to check the document only if its current +// version is equal to the specified one. +// +// Internally, Elasticsearch has marked the old document as deleted and added an +// entirely new document. +// The old version of the document doesn't disappear immediately, although you +// won't be able to access it. +// Elasticsearch cleans up deleted documents in the background as you continue +// to index more data. package exists import ( @@ -82,9 +103,30 @@ func NewExistsFunc(tp elastictransport.Interface) NewExists { } // Check a document. -// Checks if a specified document exists. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html +// Verify that a document exists. +// For example, check to see if a document with the `_id` 0 exists: +// +// ``` +// HEAD my-index-000001/_doc/0 +// ``` +// +// If the document exists, the API returns a status code of `200 - OK`. +// If the document doesn’t exist, the API returns `404 - Not Found`. +// +// **Versioning support** +// +// You can use the `version` parameter to check the document only if its current +// version is equal to the specified one. +// +// Internally, Elasticsearch has marked the old document as deleted and added an +// entirely new document. +// The old version of the document doesn't disappear immediately, although you +// won't be able to access it. +// Elasticsearch cleans up deleted documents in the background as you continue +// to index more data. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get func New(tp elastictransport.Interface) *Exists { r := &Exists{ transport: tp, @@ -251,7 +293,7 @@ func (r *Exists) Header(key, value string) *Exists { return r } -// Id Identifier of the document. +// Id A unique document identifier. // API Name: id func (r *Exists) _id(id string) *Exists { r.paramSet |= idMask @@ -260,8 +302,8 @@ func (r *Exists) _id(id string) *Exists { return r } -// Index Comma-separated list of data streams, indices, and aliases. -// Supports wildcards (`*`). +// Index A comma-separated list of data streams, indices, and aliases. +// It supports wildcards (`*`). // API Name: index func (r *Exists) _index(index string) *Exists { r.paramSet |= indexMask @@ -270,8 +312,16 @@ func (r *Exists) _index(index string) *Exists { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// By default, the operation is randomized between the shard replicas. +// +// If it is set to `_local`, the operation will prefer to be run on a local +// allocated shard when possible. +// If it is set to a custom value, the value is used to guarantee that the same +// shards will be used for the same custom value. +// This can help with "jumping values" when hitting different shards in +// different refresh states. +// A sample value can be something like the web session ID or the user name. // API name: preference func (r *Exists) Preference(preference string) *Exists { r.values.Set("preference", preference) @@ -287,8 +337,10 @@ func (r *Exists) Realtime(realtime bool) *Exists { return r } -// Refresh If `true`, Elasticsearch refreshes all shards involved in the delete by query -// after the request completes. +// Refresh If `true`, the request refreshes the relevant shards before retrieving the +// document. +// Setting it to `true` should be done after careful thought and verification +// that this does not cause a heavy load on the system (and slow down indexing). // API name: refresh func (r *Exists) Refresh(refresh bool) *Exists { r.values.Set("refresh", strconv.FormatBool(refresh)) @@ -296,7 +348,7 @@ func (r *Exists) Refresh(refresh bool) *Exists { return r } -// Routing Target the specified primary shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *Exists) Routing(routing string) *Exists { r.values.Set("routing", routing) @@ -304,8 +356,8 @@ func (r *Exists) Routing(routing string) *Exists { return r } -// Source_ `true` or `false` to return the `_source` field or not, or a list of fields -// to return. +// Source_ Indicates whether to return the `_source` field (`true` or `false`) or lists +// the fields to return. // API name: _source func (r *Exists) Source_(sourceconfigparam string) *Exists { r.values.Set("_source", sourceconfigparam) @@ -313,7 +365,10 @@ func (r *Exists) Source_(sourceconfigparam string) *Exists { return r } -// SourceExcludes_ A comma-separated list of source fields to exclude in the response. +// SourceExcludes_ A comma-separated list of source fields to exclude from the response. +// You can also use this parameter to exclude fields from the subset specified +// in `_source_includes` query parameter. +// If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_excludes func (r *Exists) SourceExcludes_(fields ...string) *Exists { r.values.Set("_source_excludes", strings.Join(fields, ",")) @@ -322,6 +377,10 @@ func (r *Exists) SourceExcludes_(fields ...string) *Exists { } // SourceIncludes_ A comma-separated list of source fields to include in the response. +// If this parameter is specified, only these source fields are returned. +// You can exclude fields from this subset using the `_source_excludes` query +// parameter. +// If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_includes func (r *Exists) SourceIncludes_(fields ...string) *Exists { r.values.Set("_source_includes", strings.Join(fields, ",")) @@ -329,9 +388,9 @@ func (r *Exists) SourceIncludes_(fields ...string) *Exists { return r } -// StoredFields List of stored fields to return as part of a hit. +// StoredFields A comma-separated list of stored fields to return as part of a hit. // If no fields are specified, no stored fields are included in the response. -// If this field is specified, the `_source` parameter defaults to false. +// If this field is specified, the `_source` parameter defaults to `false`. // API name: stored_fields func (r *Exists) StoredFields(fields ...string) *Exists { r.values.Set("stored_fields", strings.Join(fields, ",")) @@ -349,7 +408,7 @@ func (r *Exists) Version(versionnumber string) *Exists { return r } -// VersionType Specific version type: `external`, `external_gte`. +// VersionType The version type. // API name: version_type func (r *Exists) VersionType(versiontype versiontype.VersionType) *Exists { r.values.Set("version_type", versiontype.String()) diff --git a/typedapi/core/existssource/exists_source.go b/typedapi/core/existssource/exists_source.go index 6310bd1864..e027f0f4c1 100644 --- a/typedapi/core/existssource/exists_source.go +++ b/typedapi/core/existssource/exists_source.go @@ -16,10 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Check for a document source. -// Checks if a document's `_source` is stored. +// +// Check whether a document source exists in an index. +// For example: +// +// ``` +// HEAD my-index-000001/_source/1 +// ``` +// +// A document's source is not available if it is disabled in the mapping. package existssource import ( @@ -82,9 +90,17 @@ func NewExistsSourceFunc(tp elastictransport.Interface) NewExistsSource { } // Check for a document source. -// Checks if a document's `_source` is stored. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html +// Check whether a document source exists in an index. +// For example: +// +// ``` +// HEAD my-index-000001/_source/1 +// ``` +// +// A document's source is not available if it is disabled in the mapping. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get func New(tp elastictransport.Interface) *ExistsSource { r := &ExistsSource{ transport: tp, @@ -251,7 +267,7 @@ func (r *ExistsSource) Header(key, value string) *ExistsSource { return r } -// Id Identifier of the document. +// Id A unique identifier for the document. // API Name: id func (r *ExistsSource) _id(id string) *ExistsSource { r.paramSet |= idMask @@ -260,8 +276,8 @@ func (r *ExistsSource) _id(id string) *ExistsSource { return r } -// Index Comma-separated list of data streams, indices, and aliases. -// Supports wildcards (`*`). +// Index A comma-separated list of data streams, indices, and aliases. +// It supports wildcards (`*`). // API Name: index func (r *ExistsSource) _index(index string) *ExistsSource { r.paramSet |= indexMask @@ -270,8 +286,8 @@ func (r *ExistsSource) _index(index string) *ExistsSource { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// By default, the operation is randomized between the shard replicas. // API name: preference func (r *ExistsSource) Preference(preference string) *ExistsSource { r.values.Set("preference", preference) @@ -279,7 +295,7 @@ func (r *ExistsSource) Preference(preference string) *ExistsSource { return r } -// Realtime If true, the request is real-time as opposed to near-real-time. +// Realtime If `true`, the request is real-time as opposed to near-real-time. // API name: realtime func (r *ExistsSource) Realtime(realtime bool) *ExistsSource { r.values.Set("realtime", strconv.FormatBool(realtime)) @@ -287,8 +303,10 @@ func (r *ExistsSource) Realtime(realtime bool) *ExistsSource { return r } -// Refresh If `true`, Elasticsearch refreshes all shards involved in the delete by query -// after the request completes. +// Refresh If `true`, the request refreshes the relevant shards before retrieving the +// document. +// Setting it to `true` should be done after careful thought and verification +// that this does not cause a heavy load on the system (and slow down indexing). // API name: refresh func (r *ExistsSource) Refresh(refresh bool) *ExistsSource { r.values.Set("refresh", strconv.FormatBool(refresh)) @@ -296,7 +314,7 @@ func (r *ExistsSource) Refresh(refresh bool) *ExistsSource { return r } -// Routing Target the specified primary shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *ExistsSource) Routing(routing string) *ExistsSource { r.values.Set("routing", routing) @@ -304,8 +322,8 @@ func (r *ExistsSource) Routing(routing string) *ExistsSource { return r } -// Source_ `true` or `false` to return the `_source` field or not, or a list of fields -// to return. +// Source_ Indicates whether to return the `_source` field (`true` or `false`) or lists +// the fields to return. // API name: _source func (r *ExistsSource) Source_(sourceconfigparam string) *ExistsSource { r.values.Set("_source", sourceconfigparam) @@ -329,9 +347,8 @@ func (r *ExistsSource) SourceIncludes_(fields ...string) *ExistsSource { return r } -// Version Explicit version number for concurrency control. -// The specified version must match the current version of the document for the -// request to succeed. +// Version The version number for concurrency control. +// It must match the current version of the document for the request to succeed. // API name: version func (r *ExistsSource) Version(versionnumber string) *ExistsSource { r.values.Set("version", versionnumber) @@ -339,7 +356,7 @@ func (r *ExistsSource) Version(versionnumber string) *ExistsSource { return r } -// VersionType Specific version type: `external`, `external_gte`. +// VersionType The version type. // API name: version_type func (r *ExistsSource) VersionType(versiontype versiontype.VersionType) *ExistsSource { r.values.Set("version_type", versiontype.String()) diff --git a/typedapi/core/explain/explain.go b/typedapi/core/explain/explain.go index 375105e3dc..0c4999e707 100644 --- a/typedapi/core/explain/explain.go +++ b/typedapi/core/explain/explain.go @@ -16,11 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Explain a document match result. -// Returns information about why a specific document matches, or doesn’t match, -// a query. +// Get information about why a specific document matches, or doesn't match, a +// query. +// It computes a score explanation for a query and a specific document. package explain import ( @@ -90,10 +91,11 @@ func NewExplainFunc(tp elastictransport.Interface) NewExplain { } // Explain a document match result. -// Returns information about why a specific document matches, or doesn’t match, -// a query. +// Get information about why a specific document matches, or doesn't match, a +// query. +// It computes a score explanation for a query and a specific document. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-explain.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain func New(tp elastictransport.Interface) *Explain { r := &Explain{ transport: tp, @@ -101,8 +103,6 @@ func New(tp elastictransport.Interface) *Explain { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -320,7 +320,7 @@ func (r *Explain) Header(key, value string) *Explain { return r } -// Id Defines the document ID. +// Id The document identifier. // API Name: id func (r *Explain) _id(id string) *Explain { r.paramSet |= idMask @@ -329,7 +329,7 @@ func (r *Explain) _id(id string) *Explain { return r } -// Index Index names used to limit the request. +// Index Index names that are used to limit the request. // Only a single index name can be provided to this parameter. // API Name: index func (r *Explain) _index(index string) *Explain { @@ -339,8 +339,8 @@ func (r *Explain) _index(index string) *Explain { return r } -// Analyzer Analyzer to use for the query string. -// This parameter can only be used when the `q` query string parameter is +// Analyzer The analyzer to use for the query string. +// This parameter can be used only when the `q` query string parameter is // specified. // API name: analyzer func (r *Explain) Analyzer(analyzer string) *Explain { @@ -350,6 +350,8 @@ func (r *Explain) Analyzer(analyzer string) *Explain { } // AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: analyze_wildcard func (r *Explain) AnalyzeWildcard(analyzewildcard bool) *Explain { r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) @@ -358,6 +360,8 @@ func (r *Explain) AnalyzeWildcard(analyzewildcard bool) *Explain { } // DefaultOperator The default operator for query string query: `AND` or `OR`. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: default_operator func (r *Explain) DefaultOperator(defaultoperator operator.Operator) *Explain { r.values.Set("default_operator", defaultoperator.String()) @@ -365,7 +369,10 @@ func (r *Explain) DefaultOperator(defaultoperator operator.Operator) *Explain { return r } -// Df Field to use as default where no field prefix is given in the query string. +// Df The field to use as default where no field prefix is given in the query +// string. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: df func (r *Explain) Df(df string) *Explain { r.values.Set("df", df) @@ -375,6 +382,8 @@ func (r *Explain) Df(df string) *Explain { // Lenient If `true`, format-based query failures (such as providing text to a numeric // field) in the query string will be ignored. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: lenient func (r *Explain) Lenient(lenient bool) *Explain { r.values.Set("lenient", strconv.FormatBool(lenient)) @@ -382,8 +391,8 @@ func (r *Explain) Lenient(lenient bool) *Explain { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// It is random by default. // API name: preference func (r *Explain) Preference(preference string) *Explain { r.values.Set("preference", preference) @@ -391,7 +400,7 @@ func (r *Explain) Preference(preference string) *Explain { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *Explain) Routing(routing string) *Explain { r.values.Set("routing", routing) @@ -399,7 +408,7 @@ func (r *Explain) Routing(routing string) *Explain { return r } -// Source_ True or false to return the `_source` field or not, or a list of fields to +// Source_ `True` or `false` to return the `_source` field or not or a list of fields to // return. // API name: _source func (r *Explain) Source_(sourceconfigparam string) *Explain { @@ -409,6 +418,9 @@ func (r *Explain) Source_(sourceconfigparam string) *Explain { } // SourceExcludes_ A comma-separated list of source fields to exclude from the response. +// You can also use this parameter to exclude fields from the subset specified +// in `_source_includes` query parameter. +// If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_excludes func (r *Explain) SourceExcludes_(fields ...string) *Explain { r.values.Set("_source_excludes", strings.Join(fields, ",")) @@ -417,6 +429,10 @@ func (r *Explain) SourceExcludes_(fields ...string) *Explain { } // SourceIncludes_ A comma-separated list of source fields to include in the response. +// If this parameter is specified, only these source fields are returned. +// You can exclude fields from this subset using the `_source_excludes` query +// parameter. +// If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_includes func (r *Explain) SourceIncludes_(fields ...string) *Explain { r.values.Set("_source_includes", strings.Join(fields, ",")) @@ -432,7 +448,7 @@ func (r *Explain) StoredFields(fields ...string) *Explain { return r } -// Q Query in the Lucene query string syntax. +// Q The query in the Lucene query string syntax. // API name: q func (r *Explain) Q(q string) *Explain { r.values.Set("q", q) @@ -484,11 +500,15 @@ func (r *Explain) Pretty(pretty bool) *Explain { return r } -// Query Defines the search definition using the Query DSL. +// Defines the search definition using the Query DSL. // API name: query -func (r *Explain) Query(query *types.Query) *Explain { +func (r *Explain) Query(query types.QueryVariant) *Explain { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } diff --git a/typedapi/core/explain/request.go b/typedapi/core/explain/request.go index 11f51ae3ee..3faaed39af 100644 --- a/typedapi/core/explain/request.go +++ b/typedapi/core/explain/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package explain @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package explain // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/explain/ExplainRequest.ts#L26-L107 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/explain/ExplainRequest.ts#L26-L125 type Request struct { // Query Defines the search definition using the Query DSL. diff --git a/typedapi/core/explain/response.go b/typedapi/core/explain/response.go index de99af9d51..1cc2dc6169 100644 --- a/typedapi/core/explain/response.go +++ b/typedapi/core/explain/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package explain @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package explain // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/explain/ExplainResponse.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/explain/ExplainResponse.ts#L23-L31 type Response struct { Explanation *types.ExplanationDetail `json:"explanation,omitempty"` Get *types.InlineGet `json:"get,omitempty"` diff --git a/typedapi/core/fieldcaps/field_caps.go b/typedapi/core/fieldcaps/field_caps.go index d97e9706cd..8a268d7893 100644 --- a/typedapi/core/fieldcaps/field_caps.go +++ b/typedapi/core/fieldcaps/field_caps.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get the field capabilities. // @@ -98,7 +98,7 @@ func NewFieldCapsFunc(tp elastictransport.Interface) NewFieldCaps { // For example, a runtime field with a type of keyword is returned the same as // any other field that belongs to the `keyword` family. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps func New(tp elastictransport.Interface) *FieldCaps { r := &FieldCaps{ transport: tp, @@ -106,8 +106,6 @@ func New(tp elastictransport.Interface) *FieldCaps { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -324,9 +322,9 @@ func (r *FieldCaps) Header(key, value string) *FieldCaps { return r } -// Index Comma-separated list of data streams, indices, and aliases used to limit the -// request. Supports wildcards (*). To target all data streams and indices, omit -// this parameter or use * or _all. +// Index A comma-separated list of data streams, indices, and aliases used to limit +// the request. Supports wildcards (*). To target all data streams and indices, +// omit this parameter or use * or _all. // API Name: index func (r *FieldCaps) Index(index string) *FieldCaps { r.paramSet |= indexMask @@ -348,7 +346,7 @@ func (r *FieldCaps) AllowNoIndices(allownoindices bool) *FieldCaps { return r } -// ExpandWildcards Type of index that wildcard patterns can match. If the request can target +// ExpandWildcards The type of index that wildcard patterns can match. If the request can target // data streams, this argument determines whether wildcard expressions match // hidden data streams. Supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards @@ -378,8 +376,7 @@ func (r *FieldCaps) IncludeUnmapped(includeunmapped bool) *FieldCaps { return r } -// Filters An optional set of filters: can include -// +metadata,-metadata,-nested,-multifield,-parent +// Filters A comma-separated list of filters to apply to the response. // API name: filters func (r *FieldCaps) Filters(filters string) *FieldCaps { r.values.Set("filters", filters) @@ -387,7 +384,10 @@ func (r *FieldCaps) Filters(filters string) *FieldCaps { return r } -// Types Only return results for fields that have one of the types in the list +// Types A comma-separated list of field types to include. +// Any fields that do not match one of these types will be excluded from the +// results. +// It defaults to empty, meaning that all field types are returned. // API name: types func (r *FieldCaps) Types(types ...string) *FieldCaps { tmp := []string{} @@ -451,32 +451,54 @@ func (r *FieldCaps) Pretty(pretty bool) *FieldCaps { return r } -// Fields List of fields to retrieve capabilities for. Wildcard (`*`) expressions are +// A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are // supported. // API name: fields func (r *FieldCaps) Fields(fields ...string) *FieldCaps { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Fields = fields return r } -// IndexFilter Allows to filter indices if the provided query rewrites to match_none on -// every shard. +// Filter indices if the provided query rewrites to `match_none` on every shard. +// +// IMPORTANT: The filtering is done on a best-effort basis, it uses index +// statistics and mappings to rewrite queries to `match_none` instead of fully +// running the request. +// For instance a range query over a date field can rewrite to `match_none` if +// all documents within a shard (including deleted documents) are outside of the +// provided range. +// However, not all queries can rewrite to `match_none` so this API may return +// an index even if the provided filter matches no document. // API name: index_filter -func (r *FieldCaps) IndexFilter(indexfilter *types.Query) *FieldCaps { +func (r *FieldCaps) IndexFilter(indexfilter types.QueryVariant) *FieldCaps { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.IndexFilter = indexfilter + r.req.IndexFilter = indexfilter.QueryCaster() return r } -// RuntimeMappings Defines ad-hoc runtime fields in the request similar to the way it is done in +// Define ad-hoc runtime fields in the request similar to the way it is done in // search requests. // These fields exist only as part of the query and take precedence over fields // defined with the same name in the index mappings. // API name: runtime_mappings -func (r *FieldCaps) RuntimeMappings(runtimefields types.RuntimeFields) *FieldCaps { - r.req.RuntimeMappings = runtimefields +func (r *FieldCaps) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *FieldCaps { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } diff --git a/typedapi/core/fieldcaps/request.go b/typedapi/core/fieldcaps/request.go index eb82b3656f..8de6021098 100644 --- a/typedapi/core/fieldcaps/request.go +++ b/typedapi/core/fieldcaps/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package fieldcaps @@ -32,16 +32,24 @@ import ( // Request holds the request body struct for the package fieldcaps // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/field_caps/FieldCapabilitiesRequest.ts#L25-L111 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/field_caps/FieldCapabilitiesRequest.ts#L25-L130 type Request struct { - // Fields List of fields to retrieve capabilities for. Wildcard (`*`) expressions are + // Fields A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are // supported. Fields []string `json:"fields,omitempty"` - // IndexFilter Allows to filter indices if the provided query rewrites to match_none on - // every shard. + // IndexFilter Filter indices if the provided query rewrites to `match_none` on every shard. + // + // IMPORTANT: The filtering is done on a best-effort basis, it uses index + // statistics and mappings to rewrite queries to `match_none` instead of fully + // running the request. + // For instance a range query over a date field can rewrite to `match_none` if + // all documents within a shard (including deleted documents) are outside of the + // provided range. + // However, not all queries can rewrite to `match_none` so this API may return + // an index even if the provided filter matches no document. IndexFilter *types.Query `json:"index_filter,omitempty"` - // RuntimeMappings Defines ad-hoc runtime fields in the request similar to the way it is done in + // RuntimeMappings Define ad-hoc runtime fields in the request similar to the way it is done in // search requests. // These fields exist only as part of the query and take precedence over fields // defined with the same name in the index mappings. diff --git a/typedapi/core/fieldcaps/response.go b/typedapi/core/fieldcaps/response.go index 097dcae114..7690004976 100644 --- a/typedapi/core/fieldcaps/response.go +++ b/typedapi/core/fieldcaps/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package fieldcaps @@ -32,10 +32,12 @@ import ( // Response holds the response body struct for the package fieldcaps // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/field_caps/FieldCapabilitiesResponse.ts#L24-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/field_caps/FieldCapabilitiesResponse.ts#L24-L38 type Response struct { - Fields map[string]map[string]types.FieldCapability `json:"fields"` - Indices []string `json:"indices"` + Fields map[string]map[string]types.FieldCapability `json:"fields"` + // Indices The list of indices where this field has the same type family, or null if all + // indices have the same type family for the field. + Indices []string `json:"indices"` } // NewResponse returns a Response diff --git a/typedapi/core/get/get.go b/typedapi/core/get/get.go index 247b4b6453..cc6dd44634 100644 --- a/typedapi/core/get/get.go +++ b/typedapi/core/get/get.go @@ -16,10 +16,82 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get a document by its ID. -// Retrieves the document with the specified ID from an index. +// +// Get a document and its source or stored fields from an index. +// +// By default, this API is realtime and is not affected by the refresh rate of +// the index (when data will become visible for search). +// In the case where stored fields are requested with the `stored_fields` +// parameter and the document has been updated but is not yet refreshed, the API +// will have to parse and analyze the source to extract the stored fields. +// To turn off realtime behavior, set the `realtime` parameter to false. +// +// **Source filtering** +// +// By default, the API returns the contents of the `_source` field unless you +// have used the `stored_fields` parameter or the `_source` field is turned off. +// You can turn off `_source` retrieval by using the `_source` parameter: +// +// ``` +// GET my-index-000001/_doc/0?_source=false +// ``` +// +// If you only need one or two fields from the `_source`, use the +// `_source_includes` or `_source_excludes` parameters to include or filter out +// particular fields. +// This can be helpful with large documents where partial retrieval can save on +// network overhead +// Both parameters take a comma separated list of fields or wildcard +// expressions. +// For example: +// +// ``` +// GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities +// ``` +// +// If you only want to specify includes, you can use a shorter notation: +// +// ``` +// GET my-index-000001/_doc/0?_source=*.id +// ``` +// +// **Routing** +// +// If routing is used during indexing, the routing value also needs to be +// specified to retrieve a document. +// For example: +// +// ``` +// GET my-index-000001/_doc/2?routing=user1 +// ``` +// +// This request gets the document with ID 2, but it is routed based on the user. +// The document is not fetched if the correct routing is not specified. +// +// **Distributed** +// +// The GET operation is hashed into a specific shard ID. +// It is then redirected to one of the replicas within that shard ID and returns +// the result. +// The replicas are the primary shard and its replicas within that shard ID +// group. +// This means that the more replicas you have, the better your GET scaling will +// be. +// +// **Versioning support** +// +// You can use the `version` parameter to retrieve the document only if its +// current version is equal to the specified one. +// +// Internally, Elasticsearch has marked the old document as deleted and added an +// entirely new document. +// The old version of the document doesn't disappear immediately, although you +// won't be able to access it. +// Elasticsearch cleans up deleted documents in the background as you continue +// to index more data. package get import ( @@ -85,9 +157,81 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { } // Get a document by its ID. -// Retrieves the document with the specified ID from an index. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html +// Get a document and its source or stored fields from an index. +// +// By default, this API is realtime and is not affected by the refresh rate of +// the index (when data will become visible for search). +// In the case where stored fields are requested with the `stored_fields` +// parameter and the document has been updated but is not yet refreshed, the API +// will have to parse and analyze the source to extract the stored fields. +// To turn off realtime behavior, set the `realtime` parameter to false. +// +// **Source filtering** +// +// By default, the API returns the contents of the `_source` field unless you +// have used the `stored_fields` parameter or the `_source` field is turned off. +// You can turn off `_source` retrieval by using the `_source` parameter: +// +// ``` +// GET my-index-000001/_doc/0?_source=false +// ``` +// +// If you only need one or two fields from the `_source`, use the +// `_source_includes` or `_source_excludes` parameters to include or filter out +// particular fields. +// This can be helpful with large documents where partial retrieval can save on +// network overhead +// Both parameters take a comma separated list of fields or wildcard +// expressions. +// For example: +// +// ``` +// GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities +// ``` +// +// If you only want to specify includes, you can use a shorter notation: +// +// ``` +// GET my-index-000001/_doc/0?_source=*.id +// ``` +// +// **Routing** +// +// If routing is used during indexing, the routing value also needs to be +// specified to retrieve a document. +// For example: +// +// ``` +// GET my-index-000001/_doc/2?routing=user1 +// ``` +// +// This request gets the document with ID 2, but it is routed based on the user. +// The document is not fetched if the correct routing is not specified. +// +// **Distributed** +// +// The GET operation is hashed into a specific shard ID. +// It is then redirected to one of the replicas within that shard ID and returns +// the result. +// The replicas are the primary shard and its replicas within that shard ID +// group. +// This means that the more replicas you have, the better your GET scaling will +// be. +// +// **Versioning support** +// +// You can use the `version` parameter to retrieve the document only if its +// current version is equal to the specified one. +// +// Internally, Elasticsearch has marked the old document as deleted and added an +// entirely new document. +// The old version of the document doesn't disappear immediately, although you +// won't be able to access it. +// Elasticsearch cleans up deleted documents in the background as you continue +// to index more data. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get func New(tp elastictransport.Interface) *Get { r := &Get{ transport: tp, @@ -339,7 +483,7 @@ func (r *Get) Header(key, value string) *Get { return r } -// Id Unique identifier of the document. +// Id A unique document identifier. // API Name: id func (r *Get) _id(id string) *Get { r.paramSet |= idMask @@ -348,7 +492,7 @@ func (r *Get) _id(id string) *Get { return r } -// Index Name of the index that contains the document. +// Index The name of the index that contains the document. // API Name: index func (r *Get) _index(index string) *Get { r.paramSet |= indexMask @@ -357,11 +501,11 @@ func (r *Get) _index(index string) *Get { return r } -// ForceSyntheticSource Should this request force synthetic _source? -// Use this to test if the mapping supports synthetic _source and to get a sense -// of the worst case performance. -// Fetches with this enabled will be slower the enabling synthetic source -// natively in the index. +// ForceSyntheticSource Indicates whether the request forces synthetic `_source`. +// Use this paramater to test if the mapping supports synthetic `_source` and to +// get a sense of the worst case performance. +// Fetches with this parameter enabled will be slower than enabling synthetic +// source natively in the index. // API name: force_synthetic_source func (r *Get) ForceSyntheticSource(forcesyntheticsource bool) *Get { r.values.Set("force_synthetic_source", strconv.FormatBool(forcesyntheticsource)) @@ -369,8 +513,16 @@ func (r *Get) ForceSyntheticSource(forcesyntheticsource bool) *Get { return r } -// Preference Specifies the node or shard the operation should be performed on. Random by -// default. +// Preference The node or shard the operation should be performed on. +// By default, the operation is randomized between the shard replicas. +// +// If it is set to `_local`, the operation will prefer to be run on a local +// allocated shard when possible. +// If it is set to a custom value, the value is used to guarantee that the same +// shards will be used for the same custom value. +// This can help with "jumping values" when hitting different shards in +// different refresh states. +// A sample value can be something like the web session ID or the user name. // API name: preference func (r *Get) Preference(preference string) *Get { r.values.Set("preference", preference) @@ -386,8 +538,10 @@ func (r *Get) Realtime(realtime bool) *Get { return r } -// Refresh If true, Elasticsearch refreshes the affected shards to make this operation -// visible to search. If false, do nothing with refreshes. +// Refresh If `true`, the request refreshes the relevant shards before retrieving the +// document. +// Setting it to `true` should be done after careful thought and verification +// that this does not cause a heavy load on the system (and slow down indexing). // API name: refresh func (r *Get) Refresh(refresh bool) *Get { r.values.Set("refresh", strconv.FormatBool(refresh)) @@ -395,7 +549,7 @@ func (r *Get) Refresh(refresh bool) *Get { return r } -// Routing Target the specified primary shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *Get) Routing(routing string) *Get { r.values.Set("routing", routing) @@ -403,8 +557,8 @@ func (r *Get) Routing(routing string) *Get { return r } -// Source_ True or false to return the _source field or not, or a list of fields to -// return. +// Source_ Indicates whether to return the `_source` field (`true` or `false`) or lists +// the fields to return. // API name: _source func (r *Get) Source_(sourceconfigparam string) *Get { r.values.Set("_source", sourceconfigparam) @@ -412,7 +566,10 @@ func (r *Get) Source_(sourceconfigparam string) *Get { return r } -// SourceExcludes_ A comma-separated list of source fields to exclude in the response. +// SourceExcludes_ A comma-separated list of source fields to exclude from the response. +// You can also use this parameter to exclude fields from the subset specified +// in `_source_includes` query parameter. +// If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_excludes func (r *Get) SourceExcludes_(fields ...string) *Get { r.values.Set("_source_excludes", strings.Join(fields, ",")) @@ -421,6 +578,10 @@ func (r *Get) SourceExcludes_(fields ...string) *Get { } // SourceIncludes_ A comma-separated list of source fields to include in the response. +// If this parameter is specified, only these source fields are returned. +// You can exclude fields from this subset using the `_source_excludes` query +// parameter. +// If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_includes func (r *Get) SourceIncludes_(fields ...string) *Get { r.values.Set("_source_includes", strings.Join(fields, ",")) @@ -428,9 +589,11 @@ func (r *Get) SourceIncludes_(fields ...string) *Get { return r } -// StoredFields List of stored fields to return as part of a hit. +// StoredFields A comma-separated list of stored fields to return as part of a hit. // If no fields are specified, no stored fields are included in the response. -// If this field is specified, the `_source` parameter defaults to false. +// If this field is specified, the `_source` parameter defaults to `false`. +// Only leaf fields can be retrieved with the `stored_field` option. +// Object fields can't be returned;​if specified, the request fails. // API name: stored_fields func (r *Get) StoredFields(fields ...string) *Get { r.values.Set("stored_fields", strings.Join(fields, ",")) @@ -438,8 +601,8 @@ func (r *Get) StoredFields(fields ...string) *Get { return r } -// Version Explicit version number for concurrency control. The specified version must -// match the current version of the document for the request to succeed. +// Version The version number for concurrency control. +// It must match the current version of the document for the request to succeed. // API name: version func (r *Get) Version(versionnumber string) *Get { r.values.Set("version", versionnumber) @@ -447,7 +610,7 @@ func (r *Get) Version(versionnumber string) *Get { return r } -// VersionType Specific version type: internal, external, external_gte. +// VersionType The version type. // API name: version_type func (r *Get) VersionType(versiontype versiontype.VersionType) *Get { r.values.Set("version_type", versiontype.String()) diff --git a/typedapi/core/get/response.go b/typedapi/core/get/response.go index 6ae6ffa055..88fef5c7e8 100644 --- a/typedapi/core/get/response.go +++ b/typedapi/core/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package get @@ -26,18 +26,33 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/get/GetResponse.ts#L23-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/get/GetResponse.ts#L23-L34 type Response struct { - Fields map[string]json.RawMessage `json:"fields,omitempty"` - Found bool `json:"found"` - Id_ string `json:"_id"` - Ignored_ []string `json:"_ignored,omitempty"` - Index_ string `json:"_index"` - PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` - Routing_ *string `json:"_routing,omitempty"` - SeqNo_ *int64 `json:"_seq_no,omitempty"` - Source_ json.RawMessage `json:"_source,omitempty"` - Version_ *int64 `json:"_version,omitempty"` + + // Fields If the `stored_fields` parameter is set to `true` and `found` is `true`, it + // contains the document fields stored in the index. + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Found Indicates whether the document exists. + Found bool `json:"found"` + // Id_ The unique identifier for the document. + Id_ string `json:"_id"` + Ignored_ []string `json:"_ignored,omitempty"` + // Index_ The name of the index the document belongs to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Routing_ The explicit routing, if set. + Routing_ *string `json:"_routing,omitempty"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Source_ If `found` is `true`, it contains the document data formatted in JSON. + // If the `_source` parameter is set to `false` or the `stored_fields` parameter + // is set to `true`, it is excluded. + Source_ json.RawMessage `json:"_source,omitempty"` + // Version_ The document version, which is ncremented each time the document is updated. + Version_ *int64 `json:"_version,omitempty"` } // NewResponse returns a Response diff --git a/typedapi/core/getscript/get_script.go b/typedapi/core/getscript/get_script.go index 97fda7c57c..e8ebcb35de 100644 --- a/typedapi/core/getscript/get_script.go +++ b/typedapi/core/getscript/get_script.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get a script or search template. // Retrieves a stored script or search template. @@ -80,7 +80,7 @@ func NewGetScriptFunc(tp elastictransport.Interface) NewGetScript { // Get a script or search template. // Retrieves a stored script or search template. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script func New(tp elastictransport.Interface) *GetScript { r := &GetScript{ transport: tp, @@ -290,7 +290,7 @@ func (r *GetScript) Header(key, value string) *GetScript { return r } -// Id Identifier for the stored script or search template. +// Id The identifier for the stored script or search template. // API Name: id func (r *GetScript) _id(id string) *GetScript { r.paramSet |= idMask @@ -299,7 +299,10 @@ func (r *GetScript) _id(id string) *GetScript { return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. // API name: master_timeout func (r *GetScript) MasterTimeout(duration string) *GetScript { r.values.Set("master_timeout", duration) diff --git a/typedapi/core/getscript/response.go b/typedapi/core/getscript/response.go index 7589aeef88..7ea9ad3724 100644 --- a/typedapi/core/getscript/response.go +++ b/typedapi/core/getscript/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getscript @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getscript // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/get_script/GetScriptResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/get_script/GetScriptResponse.ts#L23-L29 type Response struct { Found bool `json:"found"` Id_ string `json:"_id"` diff --git a/typedapi/core/getscriptcontext/get_script_context.go b/typedapi/core/getscriptcontext/get_script_context.go index 0bdfdb85c3..cc8a9c85a7 100644 --- a/typedapi/core/getscriptcontext/get_script_context.go +++ b/typedapi/core/getscriptcontext/get_script_context.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get script contexts. // @@ -74,7 +74,7 @@ func NewGetScriptContextFunc(tp elastictransport.Interface) NewGetScriptContext // // Get a list of supported script contexts and their methods. // -// https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-contexts.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context func New(tp elastictransport.Interface) *GetScriptContext { r := &GetScriptContext{ transport: tp, diff --git a/typedapi/core/getscriptcontext/response.go b/typedapi/core/getscriptcontext/response.go index c9d4c581c0..be74e3771a 100644 --- a/typedapi/core/getscriptcontext/response.go +++ b/typedapi/core/getscriptcontext/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getscriptcontext @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getscriptcontext // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/get_script_context/GetScriptContextResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/get_script_context/GetScriptContextResponse.ts#L22-L26 type Response struct { Contexts []types.GetScriptContext `json:"contexts"` } diff --git a/typedapi/core/getscriptlanguages/get_script_languages.go b/typedapi/core/getscriptlanguages/get_script_languages.go index 78b41231d0..c979c57400 100644 --- a/typedapi/core/getscriptlanguages/get_script_languages.go +++ b/typedapi/core/getscriptlanguages/get_script_languages.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get script languages. // @@ -74,7 +74,7 @@ func NewGetScriptLanguagesFunc(tp elastictransport.Interface) NewGetScriptLangua // // Get a list of available script types, languages, and contexts. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages func New(tp elastictransport.Interface) *GetScriptLanguages { r := &GetScriptLanguages{ transport: tp, diff --git a/typedapi/core/getscriptlanguages/response.go b/typedapi/core/getscriptlanguages/response.go index ae13193895..892081879c 100644 --- a/typedapi/core/getscriptlanguages/response.go +++ b/typedapi/core/getscriptlanguages/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getscriptlanguages @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getscriptlanguages // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/get_script_languages/GetScriptLanguagesResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/get_script_languages/GetScriptLanguagesResponse.ts#L22-L27 type Response struct { LanguageContexts []types.LanguageContext `json:"language_contexts"` TypesAllowed []string `json:"types_allowed"` diff --git a/typedapi/core/getsource/get_source.go b/typedapi/core/getsource/get_source.go index e7acdba510..41ea9ebe70 100644 --- a/typedapi/core/getsource/get_source.go +++ b/typedapi/core/getsource/get_source.go @@ -16,10 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get a document's source. -// Returns the source of a document. +// +// Get the source of a document. +// For example: +// +// ``` +// GET my-index-000001/_source/1 +// ``` +// +// You can use the source filtering parameters to control which parts of the +// `_source` are returned: +// +// ``` +// GET +// my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities +// ``` package getsource import ( @@ -84,9 +98,23 @@ func NewGetSourceFunc(tp elastictransport.Interface) NewGetSource { } // Get a document's source. -// Returns the source of a document. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html +// Get the source of a document. +// For example: +// +// ``` +// GET my-index-000001/_source/1 +// ``` +// +// You can use the source filtering parameters to control which parts of the +// `_source` are returned: +// +// ``` +// GET +// my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities +// ``` +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get func New(tp elastictransport.Interface) *GetSource { r := &GetSource{ transport: tp, @@ -302,7 +330,7 @@ func (r *GetSource) Header(key, value string) *GetSource { return r } -// Id Unique identifier of the document. +// Id A unique document identifier. // API Name: id func (r *GetSource) _id(id string) *GetSource { r.paramSet |= idMask @@ -311,7 +339,7 @@ func (r *GetSource) _id(id string) *GetSource { return r } -// Index Name of the index that contains the document. +// Index The name of the index that contains the document. // API Name: index func (r *GetSource) _index(index string) *GetSource { r.paramSet |= indexMask @@ -320,8 +348,8 @@ func (r *GetSource) _index(index string) *GetSource { return r } -// Preference Specifies the node or shard the operation should be performed on. Random by -// default. +// Preference The node or shard the operation should be performed on. +// By default, the operation is randomized between the shard replicas. // API name: preference func (r *GetSource) Preference(preference string) *GetSource { r.values.Set("preference", preference) @@ -329,7 +357,7 @@ func (r *GetSource) Preference(preference string) *GetSource { return r } -// Realtime Boolean) If true, the request is real-time as opposed to near-real-time. +// Realtime If `true`, the request is real-time as opposed to near-real-time. // API name: realtime func (r *GetSource) Realtime(realtime bool) *GetSource { r.values.Set("realtime", strconv.FormatBool(realtime)) @@ -337,8 +365,10 @@ func (r *GetSource) Realtime(realtime bool) *GetSource { return r } -// Refresh If true, Elasticsearch refreshes the affected shards to make this operation -// visible to search. If false, do nothing with refreshes. +// Refresh If `true`, the request refreshes the relevant shards before retrieving the +// document. +// Setting it to `true` should be done after careful thought and verification +// that this does not cause a heavy load on the system (and slow down indexing). // API name: refresh func (r *GetSource) Refresh(refresh bool) *GetSource { r.values.Set("refresh", strconv.FormatBool(refresh)) @@ -346,7 +376,7 @@ func (r *GetSource) Refresh(refresh bool) *GetSource { return r } -// Routing Target the specified primary shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *GetSource) Routing(routing string) *GetSource { r.values.Set("routing", routing) @@ -354,8 +384,8 @@ func (r *GetSource) Routing(routing string) *GetSource { return r } -// Source_ True or false to return the _source field or not, or a list of fields to -// return. +// Source_ Indicates whether to return the `_source` field (`true` or `false`) or lists +// the fields to return. // API name: _source func (r *GetSource) Source_(sourceconfigparam string) *GetSource { r.values.Set("_source", sourceconfigparam) @@ -379,6 +409,7 @@ func (r *GetSource) SourceIncludes_(fields ...string) *GetSource { return r } +// StoredFields A comma-separated list of stored fields to return as part of a hit. // API name: stored_fields func (r *GetSource) StoredFields(fields ...string) *GetSource { r.values.Set("stored_fields", strings.Join(fields, ",")) @@ -386,8 +417,8 @@ func (r *GetSource) StoredFields(fields ...string) *GetSource { return r } -// Version Explicit version number for concurrency control. The specified version must -// match the current version of the document for the request to succeed. +// Version The version number for concurrency control. +// It must match the current version of the document for the request to succeed. // API name: version func (r *GetSource) Version(versionnumber string) *GetSource { r.values.Set("version", versionnumber) @@ -395,7 +426,7 @@ func (r *GetSource) Version(versionnumber string) *GetSource { return r } -// VersionType Specific version type: internal, external, external_gte. +// VersionType The version type. // API name: version_type func (r *GetSource) VersionType(versiontype versiontype.VersionType) *GetSource { r.values.Set("version_type", versiontype.String()) diff --git a/typedapi/core/getsource/response.go b/typedapi/core/getsource/response.go index 9705b03d39..dfe6d6d98c 100644 --- a/typedapi/core/getsource/response.go +++ b/typedapi/core/getsource/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getsource @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getsource // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/get_source/SourceResponse.ts#L20-L23 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/get_source/SourceResponse.ts#L20-L23 type Response = json.RawMessage diff --git a/typedapi/core/healthreport/health_report.go b/typedapi/core/healthreport/health_report.go index e3232450c8..f232c38428 100644 --- a/typedapi/core/healthreport/health_report.go +++ b/typedapi/core/healthreport/health_report.go @@ -16,9 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns the health of the cluster. +// Get the cluster health. +// Get a report with the health status of an Elasticsearch cluster. +// The report contains a list of indicators that compose Elasticsearch +// functionality. +// +// Each indicator has a health status of: green, unknown, yellow or red. +// The indicator will provide an explanation and metadata describing the reason +// for its current health status. +// +// The cluster’s status is controlled by the worst indicator status. +// +// In the event that an indicator’s status is non-green, a list of impacts may +// be present in the indicator result which detail the functionalities that are +// negatively affected by the health issue. +// Each impact carries with it a severity level, an area of the system that is +// affected, and a simple description of the impact on the system. +// +// Some health indicators can determine the root cause of a health problem and +// prescribe a set of steps that can be performed in order to improve the health +// of the system. +// The root cause and remediation steps are encapsulated in a diagnosis. +// A diagnosis contains a cause detailing a root cause analysis, an action +// containing a brief description of the steps to take to fix the problem, the +// list of affected resources (if applicable), and a detailed step-by-step +// troubleshooting guide to fix the diagnosed problem. +// +// NOTE: The health indicators perform root cause analysis of non-green health +// statuses. This can be computationally expensive when called frequently. +// When setting up automated polling of the API for health status, set verbose +// to false to disable the more expensive analysis logic. package healthreport import ( @@ -74,9 +103,38 @@ func NewHealthReportFunc(tp elastictransport.Interface) NewHealthReport { } } -// Returns the health of the cluster. +// Get the cluster health. +// Get a report with the health status of an Elasticsearch cluster. +// The report contains a list of indicators that compose Elasticsearch +// functionality. +// +// Each indicator has a health status of: green, unknown, yellow or red. +// The indicator will provide an explanation and metadata describing the reason +// for its current health status. +// +// The cluster’s status is controlled by the worst indicator status. +// +// In the event that an indicator’s status is non-green, a list of impacts may +// be present in the indicator result which detail the functionalities that are +// negatively affected by the health issue. +// Each impact carries with it a severity level, an area of the system that is +// affected, and a simple description of the impact on the system. +// +// Some health indicators can determine the root cause of a health problem and +// prescribe a set of steps that can be performed in order to improve the health +// of the system. +// The root cause and remediation steps are encapsulated in a diagnosis. +// A diagnosis contains a cause detailing a root cause analysis, an action +// containing a brief description of the steps to take to fix the problem, the +// list of affected resources (if applicable), and a detailed step-by-step +// troubleshooting guide to fix the diagnosed problem. +// +// NOTE: The health indicators perform root cause analysis of non-green health +// statuses. This can be computationally expensive when called frequently. +// When setting up automated polling of the API for health status, set verbose +// to false to disable the more expensive analysis logic. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/health-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report func New(tp elastictransport.Interface) *HealthReport { r := &HealthReport{ transport: tp, diff --git a/typedapi/core/healthreport/response.go b/typedapi/core/healthreport/response.go index eca4db23fb..629510e751 100644 --- a/typedapi/core/healthreport/response.go +++ b/typedapi/core/healthreport/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package healthreport @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package healthreport // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/Response.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/Response.ts#L22-L28 type Response struct { ClusterName string `json:"cluster_name"` Indicators types.Indicators `json:"indicators"` diff --git a/typedapi/core/index/index.go b/typedapi/core/index/index.go index d086d585bf..fc15f60d56 100644 --- a/typedapi/core/index/index.go +++ b/typedapi/core/index/index.go @@ -16,13 +16,207 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Index a document. -// Adds a JSON document to the specified data stream or index and makes it +// Create or update a document in an index. +// +// Add a JSON document to the specified data stream or index and make it // searchable. // If the target is an index and the document already exists, the request // updates the document and increments its version. +// +// NOTE: You cannot use this API to send update requests for existing documents +// in a data stream. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To add or overwrite a document using the `PUT //_doc/<_id>` request +// format, you must have the `create`, `index`, or `write` index privilege. +// * To add a document using the `POST //_doc/` request format, you must +// have the `create_doc`, `create`, `index`, or `write` index privilege. +// * To automatically create a data stream or index with this API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// NOTE: Replica shards might not all be started when an indexing operation +// returns successfully. +// By default, only the primary is required. Set `wait_for_active_shards` to +// change this default behavior. +// +// **Automatically create data streams and indices** +// +// If the request's target doesn't exist and matches an index template with a +// `data_stream` definition, the index operation automatically creates the data +// stream. +// +// If the target doesn't exist and doesn't match a data stream template, the +// operation automatically creates the index and applies any matching index +// templates. +// +// NOTE: Elasticsearch includes several built-in index templates. To avoid +// naming collisions with these templates, refer to index pattern documentation. +// +// If no mapping exists, the index operation creates a dynamic mapping. +// By default, new fields and objects are automatically added to the mapping if +// needed. +// +// Automatic index creation is controlled by the `action.auto_create_index` +// setting. +// If it is `true`, any index can be created automatically. +// You can modify this setting to explicitly allow or block automatic creation +// of indices that match specified patterns or set it to `false` to turn off +// automatic index creation entirely. +// Specify a comma-separated list of patterns you want to allow or prefix each +// pattern with `+` or `-` to indicate whether it should be allowed or blocked. +// When a list is specified, the default behaviour is to disallow. +// +// NOTE: The `action.auto_create_index` setting affects the automatic creation +// of indices only. +// It does not affect the creation of data streams. +// +// **Optimistic concurrency control** +// +// Index operations can be made conditional and only be performed if the last +// modification to the document was assigned the sequence number and primary +// term specified by the `if_seq_no` and `if_primary_term` parameters. +// If a mismatch is detected, the operation will result in a +// `VersionConflictException` and a status code of `409`. +// +// **Routing** +// +// By default, shard placement — or routing — is controlled by using a hash of +// the document's ID value. +// For more explicit control, the value fed into the hash function used by the +// router can be directly specified on a per-operation basis using the `routing` +// parameter. +// +// When setting up explicit mapping, you can also use the `_routing` field to +// direct the index operation to extract the routing value from the document +// itself. +// This does come at the (very minimal) cost of an additional document parsing +// pass. +// If the `_routing` mapping is defined and set to be required, the index +// operation will fail if no routing value is provided or extracted. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Distributed** +// +// The index operation is directed to the primary shard based on its route and +// performed on the actual node containing this shard. +// After the primary shard completes the operation, if needed, the update is +// distributed to applicable replicas. +// +// **Active shards** +// +// To improve the resiliency of writes to the system, indexing operations can be +// configured to wait for a certain number of active shard copies before +// proceeding with the operation. +// If the requisite number of active shard copies are not available, then the +// write operation must wait and retry, until either the requisite shard copies +// have started or a timeout occurs. +// By default, write operations only wait for the primary shards to be active +// before proceeding (that is to say `wait_for_active_shards` is `1`). +// This default can be overridden in the index settings dynamically by setting +// `index.write.wait_for_active_shards`. +// To alter this behavior per operation, use the `wait_for_active_shards +// request` parameter. +// +// Valid values are all or any positive integer up to the total number of +// configured copies per shard in the index (which is `number_of_replicas`+1). +// Specifying a negative value or a number greater than the number of shard +// copies will throw an error. +// +// For example, suppose you have a cluster of three nodes, A, B, and C and you +// create an index index with the number of replicas set to 3 (resulting in 4 +// shard copies, one more copy than there are nodes). +// If you attempt an indexing operation, by default the operation will only +// ensure the primary copy of each shard is available before proceeding. +// This means that even if B and C went down and A hosted the primary shard +// copies, the indexing operation would still proceed with only one copy of the +// data. +// If `wait_for_active_shards` is set on the request to `3` (and all three nodes +// are up), the indexing operation will require 3 active shard copies before +// proceeding. +// This requirement should be met because there are 3 active nodes in the +// cluster, each one holding a copy of the shard. +// However, if you set `wait_for_active_shards` to `all` (or to `4`, which is +// the same in this situation), the indexing operation will not proceed as you +// do not have all 4 copies of each shard active in the index. +// The operation will timeout unless a new node is brought up in the cluster to +// host the fourth copy of the shard. +// +// It is important to note that this setting greatly reduces the chances of the +// write operation not writing to the requisite number of shard copies, but it +// does not completely eliminate the possibility, because this check occurs +// before the write operation starts. +// After the write operation is underway, it is still possible for replication +// to fail on any number of shard copies but still succeed on the primary. +// The `_shards` section of the API response reveals the number of shard copies +// on which replication succeeded and failed. +// +// **No operation (noop) updates** +// +// When updating a document by using this API, a new version of the document is +// always created even if the document hasn't changed. +// If this isn't acceptable use the `_update` API with `detect_noop` set to +// `true`. +// The `detect_noop` option isn't available on this API because it doesn’t fetch +// the old source and isn't able to compare it against the new source. +// +// There isn't a definitive rule for when noop updates aren't acceptable. +// It's a combination of lots of factors like how frequently your data source +// sends updates that are actually noops and how many queries per second +// Elasticsearch runs on the shard receiving the updates. +// +// **Versioning** +// +// Each indexed document is given a version number. +// By default, internal versioning is used that starts at 1 and increments with +// each update, deletes included. +// Optionally, the version number can be set to an external value (for example, +// if maintained in a database). +// To enable this functionality, `version_type` should be set to `external`. +// The value provided must be a numeric, long value greater than or equal to 0, +// and less than around `9.2e+18`. +// +// NOTE: Versioning is completely real time, and is not affected by the near +// real time aspects of search operations. +// If no version is provided, the operation runs without any version checks. +// +// When using the external version type, the system checks to see if the version +// number passed to the index request is greater than the version of the +// currently stored document. +// If true, the document will be indexed and the new version number used. +// If the value provided is less than or equal to the stored document's version +// number, a version conflict will occur and the index operation will fail. For +// example: +// +// ``` +// PUT my-index-000001/_doc/1?version=2&version_type=external +// +// { +// "user": { +// "id": "elkbee" +// } +// } +// +// In this example, the operation will succeed since the supplied version of 2 +// is higher than the current document version of 1. +// If the document was already updated and its version was set to 2 or higher, +// the indexing command will fail and result in a conflict (409 HTTP status +// code). +// +// A nice side effect is that there is no need to maintain strict ordering of +// async indexing operations run as a result of changes to a source database, as +// long as version numbers from the source database are used. +// Even the simple case of updating the Elasticsearch index using data from a +// database is simplified if external versioning is used, as only the latest +// version will be used if the index operations arrive out of order. package index import ( @@ -91,13 +285,207 @@ func NewIndexFunc(tp elastictransport.Interface) NewIndex { } } -// Index a document. -// Adds a JSON document to the specified data stream or index and makes it +// Create or update a document in an index. +// +// Add a JSON document to the specified data stream or index and make it // searchable. // If the target is an index and the document already exists, the request // updates the document and increments its version. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html +// NOTE: You cannot use this API to send update requests for existing documents +// in a data stream. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To add or overwrite a document using the `PUT //_doc/<_id>` request +// format, you must have the `create`, `index`, or `write` index privilege. +// * To add a document using the `POST //_doc/` request format, you must +// have the `create_doc`, `create`, `index`, or `write` index privilege. +// * To automatically create a data stream or index with this API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// NOTE: Replica shards might not all be started when an indexing operation +// returns successfully. +// By default, only the primary is required. Set `wait_for_active_shards` to +// change this default behavior. +// +// **Automatically create data streams and indices** +// +// If the request's target doesn't exist and matches an index template with a +// `data_stream` definition, the index operation automatically creates the data +// stream. +// +// If the target doesn't exist and doesn't match a data stream template, the +// operation automatically creates the index and applies any matching index +// templates. +// +// NOTE: Elasticsearch includes several built-in index templates. To avoid +// naming collisions with these templates, refer to index pattern documentation. +// +// If no mapping exists, the index operation creates a dynamic mapping. +// By default, new fields and objects are automatically added to the mapping if +// needed. +// +// Automatic index creation is controlled by the `action.auto_create_index` +// setting. +// If it is `true`, any index can be created automatically. +// You can modify this setting to explicitly allow or block automatic creation +// of indices that match specified patterns or set it to `false` to turn off +// automatic index creation entirely. +// Specify a comma-separated list of patterns you want to allow or prefix each +// pattern with `+` or `-` to indicate whether it should be allowed or blocked. +// When a list is specified, the default behaviour is to disallow. +// +// NOTE: The `action.auto_create_index` setting affects the automatic creation +// of indices only. +// It does not affect the creation of data streams. +// +// **Optimistic concurrency control** +// +// Index operations can be made conditional and only be performed if the last +// modification to the document was assigned the sequence number and primary +// term specified by the `if_seq_no` and `if_primary_term` parameters. +// If a mismatch is detected, the operation will result in a +// `VersionConflictException` and a status code of `409`. +// +// **Routing** +// +// By default, shard placement — or routing — is controlled by using a hash of +// the document's ID value. +// For more explicit control, the value fed into the hash function used by the +// router can be directly specified on a per-operation basis using the `routing` +// parameter. +// +// When setting up explicit mapping, you can also use the `_routing` field to +// direct the index operation to extract the routing value from the document +// itself. +// This does come at the (very minimal) cost of an additional document parsing +// pass. +// If the `_routing` mapping is defined and set to be required, the index +// operation will fail if no routing value is provided or extracted. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Distributed** +// +// The index operation is directed to the primary shard based on its route and +// performed on the actual node containing this shard. +// After the primary shard completes the operation, if needed, the update is +// distributed to applicable replicas. +// +// **Active shards** +// +// To improve the resiliency of writes to the system, indexing operations can be +// configured to wait for a certain number of active shard copies before +// proceeding with the operation. +// If the requisite number of active shard copies are not available, then the +// write operation must wait and retry, until either the requisite shard copies +// have started or a timeout occurs. +// By default, write operations only wait for the primary shards to be active +// before proceeding (that is to say `wait_for_active_shards` is `1`). +// This default can be overridden in the index settings dynamically by setting +// `index.write.wait_for_active_shards`. +// To alter this behavior per operation, use the `wait_for_active_shards +// request` parameter. +// +// Valid values are all or any positive integer up to the total number of +// configured copies per shard in the index (which is `number_of_replicas`+1). +// Specifying a negative value or a number greater than the number of shard +// copies will throw an error. +// +// For example, suppose you have a cluster of three nodes, A, B, and C and you +// create an index index with the number of replicas set to 3 (resulting in 4 +// shard copies, one more copy than there are nodes). +// If you attempt an indexing operation, by default the operation will only +// ensure the primary copy of each shard is available before proceeding. +// This means that even if B and C went down and A hosted the primary shard +// copies, the indexing operation would still proceed with only one copy of the +// data. +// If `wait_for_active_shards` is set on the request to `3` (and all three nodes +// are up), the indexing operation will require 3 active shard copies before +// proceeding. +// This requirement should be met because there are 3 active nodes in the +// cluster, each one holding a copy of the shard. +// However, if you set `wait_for_active_shards` to `all` (or to `4`, which is +// the same in this situation), the indexing operation will not proceed as you +// do not have all 4 copies of each shard active in the index. +// The operation will timeout unless a new node is brought up in the cluster to +// host the fourth copy of the shard. +// +// It is important to note that this setting greatly reduces the chances of the +// write operation not writing to the requisite number of shard copies, but it +// does not completely eliminate the possibility, because this check occurs +// before the write operation starts. +// After the write operation is underway, it is still possible for replication +// to fail on any number of shard copies but still succeed on the primary. +// The `_shards` section of the API response reveals the number of shard copies +// on which replication succeeded and failed. +// +// **No operation (noop) updates** +// +// When updating a document by using this API, a new version of the document is +// always created even if the document hasn't changed. +// If this isn't acceptable use the `_update` API with `detect_noop` set to +// `true`. +// The `detect_noop` option isn't available on this API because it doesn’t fetch +// the old source and isn't able to compare it against the new source. +// +// There isn't a definitive rule for when noop updates aren't acceptable. +// It's a combination of lots of factors like how frequently your data source +// sends updates that are actually noops and how many queries per second +// Elasticsearch runs on the shard receiving the updates. +// +// **Versioning** +// +// Each indexed document is given a version number. +// By default, internal versioning is used that starts at 1 and increments with +// each update, deletes included. +// Optionally, the version number can be set to an external value (for example, +// if maintained in a database). +// To enable this functionality, `version_type` should be set to `external`. +// The value provided must be a numeric, long value greater than or equal to 0, +// and less than around `9.2e+18`. +// +// NOTE: Versioning is completely real time, and is not affected by the near +// real time aspects of search operations. +// If no version is provided, the operation runs without any version checks. +// +// When using the external version type, the system checks to see if the version +// number passed to the index request is greater than the version of the +// currently stored document. +// If true, the document will be indexed and the new version number used. +// If the value provided is less than or equal to the stored document's version +// number, a version conflict will occur and the index operation will fail. For +// example: +// +// ``` +// PUT my-index-000001/_doc/1?version=2&version_type=external +// +// { +// "user": { +// "id": "elkbee" +// } +// } +// +// In this example, the operation will succeed since the supplied version of 2 +// is higher than the current document version of 1. +// If the document was already updated and its version was set to 2 or higher, +// the indexing command will fail and result in a conflict (409 HTTP status +// code). +// +// A nice side effect is that there is no need to maintain strict ordering of +// async indexing operations run as a result of changes to a source database, as +// long as version numbers from the source database are used. +// Even the simple case of updating the Elasticsearch index using data from a +// database is simplified if external versioning is used, as only the latest +// version will be used if the index operations arrive out of order. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create func New(tp elastictransport.Interface) *Index { r := &Index{ transport: tp, @@ -105,8 +493,6 @@ func New(tp elastictransport.Interface) *Index { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -342,7 +728,9 @@ func (r *Index) Header(key, value string) *Index { return r } -// Id Unique identifier for the document. +// Id A unique identifier for the document. +// To automatically generate a document ID, use the `POST //_doc/` +// request format and omit this parameter. // API Name: id func (r *Index) Id(id string) *Index { r.paramSet |= idMask @@ -351,7 +739,13 @@ func (r *Index) Id(id string) *Index { return r } -// Index Name of the data stream or index to target. +// Index The name of the data stream or index to target. +// If the target doesn't exist and matches the name or wildcard (`*`) pattern of +// an index template with a `data_stream` definition, this request creates the +// data stream. +// If the target doesn't exist and doesn't match a data stream template, this +// request creates the index. +// You can check for existing targets with the resolve index API. // API Name: index func (r *Index) _index(index string) *Index { r.paramSet |= indexMask @@ -376,14 +770,23 @@ func (r *Index) IfSeqNo(sequencenumber string) *Index { return r } -// OpType Set to create to only index the document if it does not already exist (put if -// absent). +// IncludeSourceOnError True or false if to include the document source in the error message in case +// of parsing errors. +// API name: include_source_on_error +func (r *Index) IncludeSourceOnError(includesourceonerror bool) *Index { + r.values.Set("include_source_on_error", strconv.FormatBool(includesourceonerror)) + + return r +} + +// OpType Set to `create` to only index the document if it does not already exist (put +// if absent). // If a document with the specified `_id` already exists, the indexing operation // will fail. -// Same as using the `/_create` endpoint. -// Valid values: `index`, `create`. -// If document id is specified, it defaults to `index`. +// The behavior is the same as using the `/_create` endpoint. +// If a document ID is specified, this paramater defaults to `index`. // Otherwise, it defaults to `create`. +// If the request targets a data stream, an `op_type` of `create` is required. // API name: op_type func (r *Index) OpType(optype optype.OpType) *Index { r.values.Set("op_type", optype.String()) @@ -391,7 +794,7 @@ func (r *Index) OpType(optype optype.OpType) *Index { return r } -// Pipeline ID of the pipeline to use to preprocess incoming documents. +// Pipeline The ID of the pipeline to use to preprocess incoming documents. // If the index has a default ingest pipeline specified, then setting the value // to `_none` disables the default ingest pipeline for this request. // If a final pipeline is configured it will always run, regardless of the value @@ -404,9 +807,10 @@ func (r *Index) Pipeline(pipeline string) *Index { } // Refresh If `true`, Elasticsearch refreshes the affected shards to make this operation -// visible to search, if `wait_for` then wait for a refresh to make this -// operation visible to search, if `false` do nothing with refreshes. -// Valid values: `true`, `false`, `wait_for`. +// visible to search. +// If `wait_for`, it waits for a refresh to make this operation visible to +// search. +// If `false`, it does nothing with refreshes. // API name: refresh func (r *Index) Refresh(refresh refresh.Refresh) *Index { r.values.Set("refresh", refresh.String()) @@ -414,7 +818,7 @@ func (r *Index) Refresh(refresh refresh.Refresh) *Index { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value that is used to route operations to a specific shard. // API name: routing func (r *Index) Routing(routing string) *Index { r.values.Set("routing", routing) @@ -422,8 +826,16 @@ func (r *Index) Routing(routing string) *Index { return r } -// Timeout Period the request waits for the following operations: automatic index +// Timeout The period the request waits for the following operations: automatic index // creation, dynamic mapping updates, waiting for active shards. +// +// This parameter is useful for situations where the primary shard assigned to +// perform the operation might not be available when the operation runs. +// Some reasons for this might be that the primary shard is currently recovering +// from a gateway or undergoing relocation. +// By default, the operation will wait on the primary shard to become available +// for at least 1 minute before failing and responding with an error. +// The actual wait time could be longer, particularly when multiple waits occur. // API name: timeout func (r *Index) Timeout(duration string) *Index { r.values.Set("timeout", duration) @@ -431,9 +843,8 @@ func (r *Index) Timeout(duration string) *Index { return r } -// Version Explicit version number for concurrency control. -// The specified version must match the current version of the document for the -// request to succeed. +// Version An explicit version number for concurrency control. +// It must be a non-negative long number. // API name: version func (r *Index) Version(versionnumber string) *Index { r.values.Set("version", versionnumber) @@ -441,7 +852,7 @@ func (r *Index) Version(versionnumber string) *Index { return r } -// VersionType Specific version type: `external`, `external_gte`. +// VersionType The version type. // API name: version_type func (r *Index) VersionType(versiontype versiontype.VersionType) *Index { r.values.Set("version_type", versiontype.String()) @@ -451,8 +862,9 @@ func (r *Index) VersionType(versiontype versiontype.VersionType) *Index { // WaitForActiveShards The number of shard copies that must be active before proceeding with the // operation. -// Set to all or any positive integer up to the total number of shards in the -// index (`number_of_replicas+1`). +// You can set it to `all` or any positive integer up to the total number of +// shards in the index (`number_of_replicas+1`). +// The default value of `1` means it waits for each primary shard to be active. // API name: wait_for_active_shards func (r *Index) WaitForActiveShards(waitforactiveshards string) *Index { r.values.Set("wait_for_active_shards", waitforactiveshards) diff --git a/typedapi/core/index/request.go b/typedapi/core/index/request.go index 8e3b832b17..e7bad748ba 100644 --- a/typedapi/core/index/request.go +++ b/typedapi/core/index/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package index @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package index // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/index/IndexRequest.ts#L35-L119 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/index/IndexRequest.ts#L35-L268 type Request = json.RawMessage // NewRequest returns a Request diff --git a/typedapi/core/index/response.go b/typedapi/core/index/response.go index 347dfbe199..5f9874529c 100644 --- a/typedapi/core/index/response.go +++ b/typedapi/core/index/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package index @@ -27,16 +27,25 @@ import ( // Response holds the response body struct for the package index // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/index/IndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/index/IndexResponse.ts#L22-L24 type Response struct { - ForcedRefresh *bool `json:"forced_refresh,omitempty"` - Id_ string `json:"_id"` - Index_ string `json:"_index"` - PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` - Result result.Result `json:"result"` - SeqNo_ *int64 `json:"_seq_no,omitempty"` - Shards_ types.ShardStatistics `json:"_shards"` - Version_ int64 `json:"_version"` + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + // Id_ The unique identifier for the added document. + Id_ string `json:"_id"` + // Index_ The name of the index the document was added to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Result The result of the indexing operation: `created` or `updated`. + Result result.Result `json:"result"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Shards_ Information about the replication process of the operation. + Shards_ types.ShardStatistics `json:"_shards"` + // Version_ The document version, which is incremented each time the document is updated. + Version_ int64 `json:"_version"` } // NewResponse returns a Response diff --git a/typedapi/core/info/info.go b/typedapi/core/info/info.go index 56052df2c5..334cdb0bd4 100644 --- a/typedapi/core/info/info.go +++ b/typedapi/core/info/info.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get cluster info. -// Returns basic information about the cluster. +// Get basic build, version, and cluster information. package info import ( @@ -70,9 +70,9 @@ func NewInfoFunc(tp elastictransport.Interface) NewInfo { } // Get cluster info. -// Returns basic information about the cluster. +// Get basic build, version, and cluster information. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info func New(tp elastictransport.Interface) *Info { r := &Info{ transport: tp, diff --git a/typedapi/core/info/response.go b/typedapi/core/info/response.go index b098544d82..f62c11b0b5 100644 --- a/typedapi/core/info/response.go +++ b/typedapi/core/info/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package info @@ -26,13 +26,17 @@ import ( // Response holds the response body struct for the package info // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/info/RootNodeInfoResponse.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/info/RootNodeInfoResponse.ts#L23-L40 type Response struct { - ClusterName string `json:"cluster_name"` - ClusterUuid string `json:"cluster_uuid"` - Name string `json:"name"` - Tagline string `json:"tagline"` - Version types.ElasticsearchVersionInfo `json:"version"` + + // ClusterName The responding cluster's name. + ClusterName string `json:"cluster_name"` + ClusterUuid string `json:"cluster_uuid"` + // Name The responding node's name. + Name string `json:"name"` + Tagline string `json:"tagline"` + // Version The running version of Elasticsearch. + Version types.ElasticsearchVersionInfo `json:"version"` } // NewResponse returns a Response diff --git a/typedapi/core/knnsearch/knn_search.go b/typedapi/core/knnsearch/knn_search.go index e110958e70..1653a7b256 100644 --- a/typedapi/core/knnsearch/knn_search.go +++ b/typedapi/core/knnsearch/knn_search.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Run a knn search. // @@ -35,6 +35,15 @@ // // The kNN search API supports restricting the search using a filter. // The search will return the top k documents that also match the filter query. +// +// A kNN search response has the exact same structure as a search API response. +// However, certain sections have a meaning specific to kNN search: +// +// * The document `_score` is determined by the similarity between the query and +// document vector. +// * The `hits.total` object contains the total number of nearest neighbor +// candidates considered, which is `num_candidates * num_shards`. The +// `hits.total.relation` will always be `eq`, indicating an exact value. package knnsearch import ( @@ -115,7 +124,16 @@ func NewKnnSearchFunc(tp elastictransport.Interface) NewKnnSearch { // The kNN search API supports restricting the search using a filter. // The search will return the top k documents that also match the filter query. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html +// A kNN search response has the exact same structure as a search API response. +// However, certain sections have a meaning specific to kNN search: +// +// * The document `_score` is determined by the similarity between the query and +// document vector. +// * The `hits.total` object contains the total number of nearest neighbor +// candidates considered, which is `num_candidates * num_shards`. The +// `hits.total.relation` will always be `eq`, indicating an exact value. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html func New(tp elastictransport.Interface) *KnnSearch { r := &KnnSearch{ transport: tp, @@ -123,8 +141,6 @@ func New(tp elastictransport.Interface) *KnnSearch { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -337,7 +353,7 @@ func (r *KnnSearch) Header(key, value string) *KnnSearch { } // Index A comma-separated list of index names to search; -// use `_all` or to perform the operation on all indices +// use `_all` or to perform the operation on all indices. // API Name: index func (r *KnnSearch) _index(index string) *KnnSearch { r.paramSet |= indexMask @@ -346,7 +362,7 @@ func (r *KnnSearch) _index(index string) *KnnSearch { return r } -// Routing A comma-separated list of specific routing values +// Routing A comma-separated list of specific routing values. // API name: routing func (r *KnnSearch) Routing(routing string) *KnnSearch { r.values.Set("routing", routing) @@ -398,63 +414,99 @@ func (r *KnnSearch) Pretty(pretty bool) *KnnSearch { return r } -// DocvalueFields The request returns doc values for field names matching these patterns -// in the hits.fields property of the response. Accepts wildcard (*) patterns. +// The request returns doc values for field names matching these patterns +// in the `hits.fields` property of the response. +// It accepts wildcard (`*`) patterns. // API name: docvalue_fields -func (r *KnnSearch) DocvalueFields(docvaluefields ...types.FieldAndFormat) *KnnSearch { - r.req.DocvalueFields = docvaluefields +func (r *KnnSearch) DocvalueFields(docvaluefields ...types.FieldAndFormatVariant) *KnnSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docvaluefields { + + r.req.DocvalueFields = append(r.req.DocvalueFields, *v.FieldAndFormatCaster()) + } return r } -// Fields The request returns values for field names matching these patterns -// in the hits.fields property of the response. Accepts wildcard (*) patterns. +// The request returns values for field names matching these patterns +// in the `hits.fields` property of the response. +// It accepts wildcard (`*`) patterns. // API name: fields func (r *KnnSearch) Fields(fields ...string) *KnnSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Fields = fields return r } -// Filter Query to filter the documents that can match. The kNN search will return the -// top +// A query to filter the documents that can match. The kNN search will return +// the top // `k` documents that also match this filter. The value can be a single query or // a // list of queries. If `filter` isn't provided, all documents are allowed to // match. // API name: filter -func (r *KnnSearch) Filter(filters ...types.Query) *KnnSearch { - r.req.Filter = filters +func (r *KnnSearch) Filter(filters ...types.QueryVariant) *KnnSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Filter = make([]types.Query, len(filters)) + for i, v := range filters { + r.req.Filter[i] = *v.QueryCaster() + } return r } -// Knn kNN query to execute +// The kNN query to run. // API name: knn -func (r *KnnSearch) Knn(knn *types.CoreKnnQuery) *KnnSearch { +func (r *KnnSearch) Knn(knn types.CoreKnnQueryVariant) *KnnSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Knn = *knn + r.req.Knn = *knn.CoreKnnQueryCaster() return r } -// Source_ Indicates which source fields are returned for matching documents. These -// fields are returned in the hits._source property of the search response. +// Indicates which source fields are returned for matching documents. These +// fields are returned in the `hits._source` property of the search response. // API name: _source -func (r *KnnSearch) Source_(sourceconfig types.SourceConfig) *KnnSearch { - r.req.Source_ = sourceconfig +func (r *KnnSearch) Source_(sourceconfig types.SourceConfigVariant) *KnnSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source_ = *sourceconfig.SourceConfigCaster() return r } -// StoredFields List of stored fields to return as part of a hit. If no fields are specified, +// A list of stored fields to return as part of a hit. If no fields are +// specified, // no stored fields are included in the response. If this field is specified, -// the _source -// parameter defaults to false. You can pass _source: true to return both source -// fields +// the `_source` +// parameter defaults to `false`. You can pass `_source: true` to return both +// source fields // and stored fields in the search response. // API name: stored_fields func (r *KnnSearch) StoredFields(fields ...string) *KnnSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.StoredFields = fields return r diff --git a/typedapi/core/knnsearch/request.go b/typedapi/core/knnsearch/request.go index b1afd0fcb1..c3872672a0 100644 --- a/typedapi/core/knnsearch/request.go +++ b/typedapi/core/knnsearch/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package knnsearch @@ -32,32 +32,35 @@ import ( // Request holds the request body struct for the package knnsearch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/knn_search/KnnSearchRequest.ts#L26-L96 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/knn_search/KnnSearchRequest.ts#L26-L112 type Request struct { // DocvalueFields The request returns doc values for field names matching these patterns - // in the hits.fields property of the response. Accepts wildcard (*) patterns. + // in the `hits.fields` property of the response. + // It accepts wildcard (`*`) patterns. DocvalueFields []types.FieldAndFormat `json:"docvalue_fields,omitempty"` // Fields The request returns values for field names matching these patterns - // in the hits.fields property of the response. Accepts wildcard (*) patterns. + // in the `hits.fields` property of the response. + // It accepts wildcard (`*`) patterns. Fields []string `json:"fields,omitempty"` - // Filter Query to filter the documents that can match. The kNN search will return the - // top + // Filter A query to filter the documents that can match. The kNN search will return + // the top // `k` documents that also match this filter. The value can be a single query or // a // list of queries. If `filter` isn't provided, all documents are allowed to // match. Filter []types.Query `json:"filter,omitempty"` - // Knn kNN query to execute + // Knn The kNN query to run. Knn types.CoreKnnQuery `json:"knn"` // Source_ Indicates which source fields are returned for matching documents. These - // fields are returned in the hits._source property of the search response. + // fields are returned in the `hits._source` property of the search response. Source_ types.SourceConfig `json:"_source,omitempty"` - // StoredFields List of stored fields to return as part of a hit. If no fields are specified, + // StoredFields A list of stored fields to return as part of a hit. If no fields are + // specified, // no stored fields are included in the response. If this field is specified, - // the _source - // parameter defaults to false. You can pass _source: true to return both source - // fields + // the `_source` + // parameter defaults to `false`. You can pass `_source: true` to return both + // source fields // and stored fields in the search response. StoredFields []string `json:"stored_fields,omitempty"` } diff --git a/typedapi/core/knnsearch/response.go b/typedapi/core/knnsearch/response.go index c7eff86e72..eaad772fef 100644 --- a/typedapi/core/knnsearch/response.go +++ b/typedapi/core/knnsearch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package knnsearch @@ -28,23 +28,23 @@ import ( // Response holds the response body struct for the package knnsearch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/knn_search/KnnSearchResponse.ts#L26-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/knn_search/KnnSearchResponse.ts#L26-L54 type Response struct { - // Fields Contains field values for the documents. These fields + // Fields The field values for the documents. These fields // must be specified in the request using the `fields` parameter. Fields map[string]json.RawMessage `json:"fields,omitempty"` - // Hits Contains returned documents and metadata. + // Hits The returned documents and metadata. Hits types.HitsMetadata `json:"hits"` - // MaxScore Highest returned document score. This value is null for requests + // MaxScore The highest returned document score. This value is null for requests // that do not sort by score. MaxScore *types.Float64 `json:"max_score,omitempty"` - // Shards_ Contains a count of shards used for the request. + // Shards_ A count of shards used for the request. Shards_ types.ShardStatistics `json:"_shards"` // TimedOut If true, the request timed out before completion; // returned results may be partial or empty. TimedOut bool `json:"timed_out"` - // Took Milliseconds it took Elasticsearch to execute the request. + // Took The milliseconds it took Elasticsearch to run the request. Took int64 `json:"took"` } diff --git a/typedapi/core/mget/mget.go b/typedapi/core/mget/mget.go index 151b3bc4ae..f8b66f15df 100644 --- a/typedapi/core/mget/mget.go +++ b/typedapi/core/mget/mget.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get multiple documents. // @@ -25,6 +25,23 @@ // document IDs in the request body. // To ensure fast responses, this multi get (mget) API responds with partial // results if one or more shards fail. +// +// **Filter source fields** +// +// By default, the `_source` field is returned for every document (if stored). +// Use the `_source` and `_source_include` or `source_exclude` attributes to +// filter what fields are returned for a particular document. +// You can include the `_source`, `_source_includes`, and `_source_excludes` +// query parameters in the request URI to specify the defaults to use when there +// are no per-document instructions. +// +// **Get stored fields** +// +// Use the `stored_fields` attribute to specify the set of stored fields you +// want to retrieve. +// Any requested fields that are not stored are ignored. +// You can include the `stored_fields` query parameter in the request URI to +// specify the defaults to use when there are no per-document instructions. package mget import ( @@ -93,7 +110,24 @@ func NewMgetFunc(tp elastictransport.Interface) NewMget { // To ensure fast responses, this multi get (mget) API responds with partial // results if one or more shards fail. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html +// **Filter source fields** +// +// By default, the `_source` field is returned for every document (if stored). +// Use the `_source` and `_source_include` or `source_exclude` attributes to +// filter what fields are returned for a particular document. +// You can include the `_source`, `_source_includes`, and `_source_excludes` +// query parameters in the request URI to specify the defaults to use when there +// are no per-document instructions. +// +// **Get stored fields** +// +// Use the `stored_fields` attribute to specify the set of stored fields you +// want to retrieve. +// Any requested fields that are not stored are ignored. +// You can include the `stored_fields` query parameter in the request URI to +// specify the defaults to use when there are no per-document instructions. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget func New(tp elastictransport.Interface) *Mget { r := &Mget{ transport: tp, @@ -101,8 +135,6 @@ func New(tp elastictransport.Interface) *Mget { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -458,19 +490,31 @@ func (r *Mget) Pretty(pretty bool) *Mget { return r } -// Docs The documents you want to retrieve. Required if no index is specified in the +// The documents you want to retrieve. Required if no index is specified in the // request URI. // API name: docs -func (r *Mget) Docs(docs ...types.MgetOperation) *Mget { - r.req.Docs = docs +func (r *Mget) Docs(docs ...types.MgetOperationVariant) *Mget { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docs { + + r.req.Docs = append(r.req.Docs, *v.MgetOperationCaster()) + } return r } -// Ids The IDs of the documents you want to retrieve. Allowed when the index is +// The IDs of the documents you want to retrieve. Allowed when the index is // specified in the request URI. // API name: ids func (r *Mget) Ids(ids ...string) *Mget { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Ids = ids return r diff --git a/typedapi/core/mget/request.go b/typedapi/core/mget/request.go index 8b3ac75549..14a4725352 100644 --- a/typedapi/core/mget/request.go +++ b/typedapi/core/mget/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package mget @@ -32,7 +32,7 @@ import ( // Request holds the request body struct for the package mget // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/mget/MultiGetRequest.ts#L25-L104 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/mget/MultiGetRequest.ts#L25-L127 type Request struct { // Docs The documents you want to retrieve. Required if no index is specified in the diff --git a/typedapi/core/mget/response.go b/typedapi/core/mget/response.go index 95c3864b14..6ac72d83f0 100644 --- a/typedapi/core/mget/response.go +++ b/typedapi/core/mget/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package mget @@ -32,8 +32,15 @@ import ( // Response holds the response body struct for the package mget // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/mget/MultiGetResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/mget/MultiGetResponse.ts#L22-L31 type Response struct { + + // Docs The response includes a docs array that contains the documents in the order + // specified in the request. + // The structure of the returned documents is similar to that returned by the + // get API. + // If there is a failure getting a particular document, the error is included in + // place of the document. Docs []types.MgetResponseItem `json:"docs"` } diff --git a/typedapi/core/msearch/msearch.go b/typedapi/core/msearch/msearch.go index 6af4f4468c..a23e430bd8 100644 --- a/typedapi/core/msearch/msearch.go +++ b/typedapi/core/msearch/msearch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Run multiple searches. // @@ -121,7 +121,7 @@ func NewMsearchFunc(tp elastictransport.Interface) NewMsearch { // When sending requests to this endpoint the `Content-Type` header should be // set to `application/x-ndjson`. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch func New(tp elastictransport.Interface) *Msearch { r := &Msearch{ transport: tp, diff --git a/typedapi/core/msearch/request.go b/typedapi/core/msearch/request.go index 5344e59dc2..6a8c54beaa 100644 --- a/typedapi/core/msearch/request.go +++ b/typedapi/core/msearch/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package msearch @@ -26,5 +26,5 @@ import ( // Request holds the request body struct for the package msearch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/msearch/MultiSearchRequest.ts#L25-L124 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/msearch/MultiSearchRequest.ts#L25-L135 type Request = []types.MsearchRequestItem diff --git a/typedapi/core/msearch/response.go b/typedapi/core/msearch/response.go index 5557e95c57..039591e99a 100644 --- a/typedapi/core/msearch/response.go +++ b/typedapi/core/msearch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package msearch @@ -33,7 +33,7 @@ import ( // Response holds the response body struct for the package msearch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/msearch/MultiSearchResponse.ts#L25-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/msearch/MultiSearchResponse.ts#L25-L27 type Response struct { Responses []types.MsearchResponseItem `json:"responses"` Took int64 `json:"took"` diff --git a/typedapi/core/msearchtemplate/msearch_template.go b/typedapi/core/msearchtemplate/msearch_template.go index c4a87508fe..8db4cf768d 100644 --- a/typedapi/core/msearchtemplate/msearch_template.go +++ b/typedapi/core/msearchtemplate/msearch_template.go @@ -16,9 +16,26 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Run multiple templated searches. +// +// Run multiple templated searches with a single request. +// If you are providing a text file or text input to `curl`, use the +// `--data-binary` flag instead of `-d` to preserve newlines. +// For example: +// +// ``` +// $ cat requests +// { "index": "my-index" } +// { "id": "my-search-template", "params": { "query_string": "hello world", +// "from": 0, "size": 10 }} +// { "index": "my-other-index" } +// { "id": "my-other-search-template", "params": { "query_type": "match_all" }} +// +// $ curl -H "Content-Type: application/x-ndjson" -XGET +// localhost:9200/_msearch/template --data-binary "@requests"; echo +// ``` package msearchtemplate import ( @@ -82,7 +99,24 @@ func NewMsearchTemplateFunc(tp elastictransport.Interface) NewMsearchTemplate { // Run multiple templated searches. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html +// Run multiple templated searches with a single request. +// If you are providing a text file or text input to `curl`, use the +// `--data-binary` flag instead of `-d` to preserve newlines. +// For example: +// +// ``` +// $ cat requests +// { "index": "my-index" } +// { "id": "my-search-template", "params": { "query_string": "hello world", +// "from": 0, "size": 10 }} +// { "index": "my-other-index" } +// { "id": "my-other-search-template", "params": { "query_type": "match_all" }} +// +// $ curl -H "Content-Type: application/x-ndjson" -XGET +// localhost:9200/_msearch/template --data-binary "@requests"; echo +// ``` +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template func New(tp elastictransport.Interface) *MsearchTemplate { r := &MsearchTemplate{ transport: tp, @@ -317,8 +351,8 @@ func (r *MsearchTemplate) Header(key, value string) *MsearchTemplate { return r } -// Index Comma-separated list of data streams, indices, and aliases to search. -// Supports wildcards (`*`). +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). // To search all data streams and indices, omit this parameter or use `*`. // API Name: index func (r *MsearchTemplate) Index(index string) *MsearchTemplate { @@ -337,7 +371,7 @@ func (r *MsearchTemplate) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Mse return r } -// MaxConcurrentSearches Maximum number of concurrent searches the API can run. +// MaxConcurrentSearches The maximum number of concurrent searches the API can run. // API name: max_concurrent_searches func (r *MsearchTemplate) MaxConcurrentSearches(maxconcurrentsearches string) *MsearchTemplate { r.values.Set("max_concurrent_searches", maxconcurrentsearches) @@ -346,7 +380,6 @@ func (r *MsearchTemplate) MaxConcurrentSearches(maxconcurrentsearches string) *M } // SearchType The type of the search operation. -// Available options: `query_then_fetch`, `dfs_query_then_fetch`. // API name: search_type func (r *MsearchTemplate) SearchType(searchtype searchtype.SearchType) *MsearchTemplate { r.values.Set("search_type", searchtype.String()) diff --git a/typedapi/core/msearchtemplate/request.go b/typedapi/core/msearchtemplate/request.go index e9bcead078..4b6cd5f1eb 100644 --- a/typedapi/core/msearchtemplate/request.go +++ b/typedapi/core/msearchtemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package msearchtemplate @@ -26,5 +26,5 @@ import ( // Request holds the request body struct for the package msearchtemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/msearch_template/MultiSearchTemplateRequest.ts#L25-L72 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/msearch_template/MultiSearchTemplateRequest.ts#L25-L116 type Request = []types.RequestItem diff --git a/typedapi/core/msearchtemplate/response.go b/typedapi/core/msearchtemplate/response.go index c466364917..629099926d 100644 --- a/typedapi/core/msearchtemplate/response.go +++ b/typedapi/core/msearchtemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package msearchtemplate @@ -33,7 +33,7 @@ import ( // Response holds the response body struct for the package msearchtemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/msearch_template/MultiSearchTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/msearch_template/MultiSearchTemplateResponse.ts#L22-L31 type Response struct { Responses []types.MsearchResponseItem `json:"responses"` Took int64 `json:"took"` diff --git a/typedapi/core/mtermvectors/mtermvectors.go b/typedapi/core/mtermvectors/mtermvectors.go index 987e152850..7b8d608e38 100644 --- a/typedapi/core/mtermvectors/mtermvectors.go +++ b/typedapi/core/mtermvectors/mtermvectors.go @@ -16,15 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get multiple term vectors. // +// Get multiple term vectors with a single request. // You can specify existing documents by index and ID or provide artificial // documents in the body of the request. // You can specify the index in the request body or request URI. // The response contains a `docs` array with all the fetched termvectors. // Each element has the structure provided by the termvectors API. +// +// **Artificial documents** +// +// You can also use `mtermvectors` to generate term vectors for artificial +// documents provided in the body of the request. +// The mapping used is determined by the specified `_index`. package mtermvectors import ( @@ -88,13 +95,20 @@ func NewMtermvectorsFunc(tp elastictransport.Interface) NewMtermvectors { // Get multiple term vectors. // +// Get multiple term vectors with a single request. // You can specify existing documents by index and ID or provide artificial // documents in the body of the request. // You can specify the index in the request body or request URI. // The response contains a `docs` array with all the fetched termvectors. // Each element has the structure provided by the termvectors API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-termvectors.html +// **Artificial documents** +// +// You can also use `mtermvectors` to generate term vectors for artificial +// documents provided in the body of the request. +// The mapping used is determined by the specified `_index`. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors func New(tp elastictransport.Interface) *Mtermvectors { r := &Mtermvectors{ transport: tp, @@ -102,8 +116,6 @@ func New(tp elastictransport.Interface) *Mtermvectors { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -320,7 +332,7 @@ func (r *Mtermvectors) Header(key, value string) *Mtermvectors { return r } -// Index Name of the index that contains the documents. +// Index The name of the index that contains the documents. // API Name: index func (r *Mtermvectors) Index(index string) *Mtermvectors { r.paramSet |= indexMask @@ -329,10 +341,10 @@ func (r *Mtermvectors) Index(index string) *Mtermvectors { return r } -// Fields Comma-separated list or wildcard expressions of fields to include in the +// Fields A comma-separated list or wildcard expressions of fields to include in the // statistics. -// Used as the default list unless a specific field list is provided in the -// `completion_fields` or `fielddata_fields` parameters. +// It is used as the default list unless a specific field list is provided in +// the `completion_fields` or `fielddata_fields` parameters. // API name: fields func (r *Mtermvectors) Fields(fields ...string) *Mtermvectors { r.values.Set("fields", strings.Join(fields, ",")) @@ -373,8 +385,8 @@ func (r *Mtermvectors) Positions(positions bool) *Mtermvectors { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// It is random by default. // API name: preference func (r *Mtermvectors) Preference(preference string) *Mtermvectors { r.values.Set("preference", preference) @@ -390,7 +402,7 @@ func (r *Mtermvectors) Realtime(realtime bool) *Mtermvectors { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *Mtermvectors) Routing(routing string) *Mtermvectors { r.values.Set("routing", routing) @@ -414,7 +426,7 @@ func (r *Mtermvectors) Version(versionnumber string) *Mtermvectors { return r } -// VersionType Specific version type. +// VersionType The version type. // API name: version_type func (r *Mtermvectors) VersionType(versiontype versiontype.VersionType) *Mtermvectors { r.values.Set("version_type", versiontype.String()) @@ -466,19 +478,33 @@ func (r *Mtermvectors) Pretty(pretty bool) *Mtermvectors { return r } -// Docs Array of existing or artificial documents. +// An array of existing or artificial documents. // API name: docs -func (r *Mtermvectors) Docs(docs ...types.MTermVectorsOperation) *Mtermvectors { - r.req.Docs = docs +func (r *Mtermvectors) Docs(docs ...types.MTermVectorsOperationVariant) *Mtermvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docs { + r.req.Docs = append(r.req.Docs, *v.MTermVectorsOperationCaster()) + + } return r } -// Ids Simplified syntax to specify documents by their ID if they're in the same +// A simplified syntax to specify documents by their ID if they're in the same // index. // API name: ids func (r *Mtermvectors) Ids(ids ...string) *Mtermvectors { - r.req.Ids = ids + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ids { + r.req.Ids = append(r.req.Ids, v) + + } return r } diff --git a/typedapi/core/mtermvectors/request.go b/typedapi/core/mtermvectors/request.go index fb4d62498f..bede681710 100644 --- a/typedapi/core/mtermvectors/request.go +++ b/typedapi/core/mtermvectors/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package mtermvectors @@ -29,12 +29,12 @@ import ( // Request holds the request body struct for the package mtermvectors // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/mtermvectors/MultiTermVectorsRequest.ts#L31-L116 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/mtermvectors/MultiTermVectorsRequest.ts#L31-L134 type Request struct { - // Docs Array of existing or artificial documents. + // Docs An array of existing or artificial documents. Docs []types.MTermVectorsOperation `json:"docs,omitempty"` - // Ids Simplified syntax to specify documents by their ID if they're in the same + // Ids A simplified syntax to specify documents by their ID if they're in the same // index. Ids []string `json:"ids,omitempty"` } diff --git a/typedapi/core/mtermvectors/response.go b/typedapi/core/mtermvectors/response.go index e75b41a6b4..b89a6a48c8 100644 --- a/typedapi/core/mtermvectors/response.go +++ b/typedapi/core/mtermvectors/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package mtermvectors @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mtermvectors // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/mtermvectors/MultiTermVectorsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/mtermvectors/MultiTermVectorsResponse.ts#L22-L24 type Response struct { Docs []types.TermVectorsResult `json:"docs"` } diff --git a/typedapi/core/openpointintime/open_point_in_time.go b/typedapi/core/openpointintime/open_point_in_time.go index bc4c9ead69..316ead2b5b 100644 --- a/typedapi/core/openpointintime/open_point_in_time.go +++ b/typedapi/core/openpointintime/open_point_in_time.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Open a point in time. // @@ -34,7 +34,53 @@ // // A point in time must be opened explicitly before being used in search // requests. -// The `keep_alive` parameter tells Elasticsearch how long it should persist. +// +// A subsequent search request with the `pit` parameter must not specify +// `index`, `routing`, or `preference` values as these parameters are copied +// from the point in time. +// +// Just like regular searches, you can use `from` and `size` to page through +// point in time search results, up to the first 10,000 hits. +// If you want to retrieve more hits, use PIT with `search_after`. +// +// IMPORTANT: The open point in time request and each subsequent search request +// can return different identifiers; always use the most recently received ID +// for the next search request. +// +// When a PIT that contains shard failures is used in a search request, the +// missing are always reported in the search response as a +// `NoShardAvailableActionException` exception. +// To get rid of these exceptions, a new PIT needs to be created so that shards +// missing from the previous PIT can be handled, assuming they become available +// in the meantime. +// +// **Keeping point in time alive** +// +// The `keep_alive` parameter, which is passed to a open point in time request +// and search request, extends the time to live of the corresponding point in +// time. +// The value does not need to be long enough to process all data — it just needs +// to be long enough for the next request. +// +// Normally, the background merge process optimizes the index by merging +// together smaller segments to create new, bigger segments. +// Once the smaller segments are no longer needed they are deleted. +// However, open point-in-times prevent the old segments from being deleted +// since they are still in use. +// +// TIP: Keeping older segments alive means that more disk space and file handles +// are needed. +// Ensure that you have configured your nodes to have ample free file handles. +// +// Additionally, if a segment contains deleted or updated documents then the +// point in time must keep track of whether each document in the segment was +// live at the time of the initial search request. +// Ensure that your nodes have sufficient heap space if you have many open +// point-in-times on an index that is subject to ongoing deletes or updates. +// Note that a point-in-time doesn't prevent its associated indices from being +// deleted. +// You can check how many point-in-times (that is, search contexts) are open +// with the nodes stats API. package openpointintime import ( @@ -114,9 +160,55 @@ func NewOpenPointInTimeFunc(tp elastictransport.Interface) NewOpenPointInTime { // // A point in time must be opened explicitly before being used in search // requests. -// The `keep_alive` parameter tells Elasticsearch how long it should persist. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html +// A subsequent search request with the `pit` parameter must not specify +// `index`, `routing`, or `preference` values as these parameters are copied +// from the point in time. +// +// Just like regular searches, you can use `from` and `size` to page through +// point in time search results, up to the first 10,000 hits. +// If you want to retrieve more hits, use PIT with `search_after`. +// +// IMPORTANT: The open point in time request and each subsequent search request +// can return different identifiers; always use the most recently received ID +// for the next search request. +// +// When a PIT that contains shard failures is used in a search request, the +// missing are always reported in the search response as a +// `NoShardAvailableActionException` exception. +// To get rid of these exceptions, a new PIT needs to be created so that shards +// missing from the previous PIT can be handled, assuming they become available +// in the meantime. +// +// **Keeping point in time alive** +// +// The `keep_alive` parameter, which is passed to a open point in time request +// and search request, extends the time to live of the corresponding point in +// time. +// The value does not need to be long enough to process all data — it just needs +// to be long enough for the next request. +// +// Normally, the background merge process optimizes the index by merging +// together smaller segments to create new, bigger segments. +// Once the smaller segments are no longer needed they are deleted. +// However, open point-in-times prevent the old segments from being deleted +// since they are still in use. +// +// TIP: Keeping older segments alive means that more disk space and file handles +// are needed. +// Ensure that you have configured your nodes to have ample free file handles. +// +// Additionally, if a segment contains deleted or updated documents then the +// point in time must keep track of whether each document in the segment was +// live at the time of the initial search request. +// Ensure that your nodes have sufficient heap space if you have many open +// point-in-times on an index that is subject to ongoing deletes or updates. +// Note that a point-in-time doesn't prevent its associated indices from being +// deleted. +// You can check how many point-in-times (that is, search contexts) are open +// with the nodes stats API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time func New(tp elastictransport.Interface) *OpenPointInTime { r := &OpenPointInTime{ transport: tp, @@ -124,8 +216,6 @@ func New(tp elastictransport.Interface) *OpenPointInTime { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -347,7 +437,7 @@ func (r *OpenPointInTime) _index(index string) *OpenPointInTime { return r } -// KeepAlive Extends the time to live of the corresponding point in time. +// KeepAlive Extend the length of time that the point in time persists. // API name: keep_alive func (r *OpenPointInTime) KeepAlive(duration string) *OpenPointInTime { r.values.Set("keep_alive", duration) @@ -364,8 +454,8 @@ func (r *OpenPointInTime) IgnoreUnavailable(ignoreunavailable bool) *OpenPointIn return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// By default, it is random. // API name: preference func (r *OpenPointInTime) Preference(preference string) *OpenPointInTime { r.values.Set("preference", preference) @@ -373,7 +463,7 @@ func (r *OpenPointInTime) Preference(preference string) *OpenPointInTime { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value that is used to route operations to a specific shard. // API name: routing func (r *OpenPointInTime) Routing(routing string) *OpenPointInTime { r.values.Set("routing", routing) @@ -381,10 +471,10 @@ func (r *OpenPointInTime) Routing(routing string) *OpenPointInTime { return r } -// ExpandWildcards Type of index that wildcard patterns can match. +// ExpandWildcards The type of index that wildcard patterns can match. // If the request can target data streams, this argument determines whether // wildcard expressions match hidden data streams. -// Supports comma-separated values, such as `open,hidden`. Valid values are: +// It supports comma-separated values, such as `open,hidden`. Valid values are: // `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards func (r *OpenPointInTime) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *OpenPointInTime { @@ -397,6 +487,19 @@ func (r *OpenPointInTime) ExpandWildcards(expandwildcards ...expandwildcard.Expa return r } +// AllowPartialSearchResults Indicates whether the point in time tolerates unavailable shards or shard +// failures when initially creating the PIT. +// If `false`, creating a point in time request when a shard is missing or +// unavailable will throw an exception. +// If `true`, the point in time will contain all the shards that are available +// at the time of the request. +// API name: allow_partial_search_results +func (r *OpenPointInTime) AllowPartialSearchResults(allowpartialsearchresults bool) *OpenPointInTime { + r.values.Set("allow_partial_search_results", strconv.FormatBool(allowpartialsearchresults)) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -441,12 +544,15 @@ func (r *OpenPointInTime) Pretty(pretty bool) *OpenPointInTime { return r } -// IndexFilter Allows to filter indices if the provided query rewrites to `match_none` on -// every shard. +// Filter indices if the provided query rewrites to `match_none` on every shard. // API name: index_filter -func (r *OpenPointInTime) IndexFilter(indexfilter *types.Query) *OpenPointInTime { +func (r *OpenPointInTime) IndexFilter(indexfilter types.QueryVariant) *OpenPointInTime { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.IndexFilter = indexfilter + r.req.IndexFilter = indexfilter.QueryCaster() return r } diff --git a/typedapi/core/openpointintime/request.go b/typedapi/core/openpointintime/request.go index f8e4f1c841..8b7be75c04 100644 --- a/typedapi/core/openpointintime/request.go +++ b/typedapi/core/openpointintime/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package openpointintime @@ -29,11 +29,10 @@ import ( // Request holds the request body struct for the package openpointintime // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/open_point_in_time/OpenPointInTimeRequest.ts#L25-L81 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/open_point_in_time/OpenPointInTimeRequest.ts#L25-L121 type Request struct { - // IndexFilter Allows to filter indices if the provided query rewrites to `match_none` on - // every shard. + // IndexFilter Filter indices if the provided query rewrites to `match_none` on every shard. IndexFilter *types.Query `json:"index_filter,omitempty"` } diff --git a/typedapi/core/openpointintime/response.go b/typedapi/core/openpointintime/response.go index db125717b6..d55acad4bb 100644 --- a/typedapi/core/openpointintime/response.go +++ b/typedapi/core/openpointintime/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package openpointintime @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package openpointintime // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/open_point_in_time/OpenPointInTimeResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/open_point_in_time/OpenPointInTimeResponse.ts#L23-L29 type Response struct { Id string `json:"id"` // Shards_ Shards used to create the PIT diff --git a/typedapi/core/ping/ping.go b/typedapi/core/ping/ping.go index 8d8e72fe98..24c5fa2662 100644 --- a/typedapi/core/ping/ping.go +++ b/typedapi/core/ping/ping.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Ping the cluster. -// Returns whether the cluster is running. +// Get information about whether the cluster is running. package ping import ( @@ -68,9 +68,9 @@ func NewPingFunc(tp elastictransport.Interface) NewPing { } // Ping the cluster. -// Returns whether the cluster is running. +// Get information about whether the cluster is running. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster func New(tp elastictransport.Interface) *Ping { r := &Ping{ transport: tp, diff --git a/typedapi/core/putscript/put_script.go b/typedapi/core/putscript/put_script.go index 0bdd4f9310..06b95746f1 100644 --- a/typedapi/core/putscript/put_script.go +++ b/typedapi/core/putscript/put_script.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create or update a script or search template. // Creates or updates a stored script or search template. @@ -88,7 +88,7 @@ func NewPutScriptFunc(tp elastictransport.Interface) NewPutScript { // Create or update a script or search template. // Creates or updates a stored script or search template. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script func New(tp elastictransport.Interface) *PutScript { r := &PutScript{ transport: tp, @@ -96,8 +96,6 @@ func New(tp elastictransport.Interface) *PutScript { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -326,8 +324,8 @@ func (r *PutScript) Header(key, value string) *PutScript { return r } -// Id Identifier for the stored script or search template. -// Must be unique within the cluster. +// Id The identifier for the stored script or search template. +// It must be unique within the cluster. // API Name: id func (r *PutScript) _id(id string) *PutScript { r.paramSet |= idMask @@ -336,7 +334,7 @@ func (r *PutScript) _id(id string) *PutScript { return r } -// Context Context in which the script or search template should run. +// Context The context in which the script or search template should run. // To prevent errors, the API immediately compiles the script or template in // this context. // API Name: context @@ -347,9 +345,10 @@ func (r *PutScript) Context(context string) *PutScript { return r } -// MasterTimeout Period to wait for a connection to the master node. +// MasterTimeout The period to wait for a connection to the master node. // If no response is received before the timeout expires, the request fails and // returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. // API name: master_timeout func (r *PutScript) MasterTimeout(duration string) *PutScript { r.values.Set("master_timeout", duration) @@ -357,9 +356,10 @@ func (r *PutScript) MasterTimeout(duration string) *PutScript { return r } -// Timeout Period to wait for a response. +// Timeout The period to wait for a response. // If no response is received before the timeout expires, the request fails and // returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. // API name: timeout func (r *PutScript) Timeout(duration string) *PutScript { r.values.Set("timeout", duration) @@ -411,11 +411,15 @@ func (r *PutScript) Pretty(pretty bool) *PutScript { return r } -// Script Contains the script or search template, its parameters, and its language. +// The script or search template, its parameters, and its language. // API name: script -func (r *PutScript) Script(script *types.StoredScript) *PutScript { +func (r *PutScript) Script(script types.StoredScriptVariant) *PutScript { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Script = *script + r.req.Script = *script.StoredScriptCaster() return r } diff --git a/typedapi/core/putscript/request.go b/typedapi/core/putscript/request.go index 1b40b2e0c5..18a2c55798 100644 --- a/typedapi/core/putscript/request.go +++ b/typedapi/core/putscript/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putscript @@ -29,10 +29,10 @@ import ( // Request holds the request body struct for the package putscript // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/put_script/PutScriptRequest.ts#L25-L66 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/put_script/PutScriptRequest.ts#L25-L87 type Request struct { - // Script Contains the script or search template, its parameters, and its language. + // Script The script or search template, its parameters, and its language. Script types.StoredScript `json:"script"` } diff --git a/typedapi/core/putscript/response.go b/typedapi/core/putscript/response.go index 59f3f1b34e..deb66d0185 100644 --- a/typedapi/core/putscript/response.go +++ b/typedapi/core/putscript/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putscript // Response holds the response body struct for the package putscript // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/put_script/PutScriptResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/put_script/PutScriptResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/core/rankeval/rank_eval.go b/typedapi/core/rankeval/rank_eval.go index 2357fe94a4..2b5a3a5874 100644 --- a/typedapi/core/rankeval/rank_eval.go +++ b/typedapi/core/rankeval/rank_eval.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Evaluate ranked search results. // @@ -88,7 +88,7 @@ func NewRankEvalFunc(tp elastictransport.Interface) NewRankEval { // Evaluate the quality of ranked search results over a set of typical search // queries. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval func New(tp elastictransport.Interface) *RankEval { r := &RankEval{ transport: tp, @@ -96,8 +96,6 @@ func New(tp elastictransport.Interface) *RankEval { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -314,8 +312,9 @@ func (r *RankEval) Header(key, value string) *RankEval { return r } -// Index Comma-separated list of data streams, indices, and index aliases used to -// limit the request. Wildcard (`*`) expressions are supported. +// Index A comma-separated list of data streams, indices, and index aliases used to +// limit the request. +// Wildcard (`*`) expressions are supported. // To target all data streams and indices in a cluster, omit this parameter or // use `_all` or `*`. // API Name: index @@ -411,19 +410,30 @@ func (r *RankEval) Pretty(pretty bool) *RankEval { return r } -// Metric Definition of the evaluation metric to calculate. +// Definition of the evaluation metric to calculate. // API name: metric -func (r *RankEval) Metric(metric *types.RankEvalMetric) *RankEval { +func (r *RankEval) Metric(metric types.RankEvalMetricVariant) *RankEval { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Metric = metric + r.req.Metric = metric.RankEvalMetricCaster() return r } -// Requests A set of typical search requests, together with their provided ratings. +// A set of typical search requests, together with their provided ratings. // API name: requests -func (r *RankEval) Requests(requests ...types.RankEvalRequestItem) *RankEval { - r.req.Requests = requests +func (r *RankEval) Requests(requests ...types.RankEvalRequestItemVariant) *RankEval { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range requests { + r.req.Requests = append(r.req.Requests, *v.RankEvalRequestItemCaster()) + + } return r } diff --git a/typedapi/core/rankeval/request.go b/typedapi/core/rankeval/request.go index 746bf36c4b..65a9feccd6 100644 --- a/typedapi/core/rankeval/request.go +++ b/typedapi/core/rankeval/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package rankeval @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package rankeval // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/rank_eval/RankEvalRequest.ts#L24-L64 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/rank_eval/RankEvalRequest.ts#L24-L76 type Request struct { // Metric Definition of the evaluation metric to calculate. diff --git a/typedapi/core/rankeval/response.go b/typedapi/core/rankeval/response.go index 971d7aa599..7ae6f3c426 100644 --- a/typedapi/core/rankeval/response.go +++ b/typedapi/core/rankeval/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package rankeval @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package rankeval // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/rank_eval/RankEvalResponse.ts#L26-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/rank_eval/RankEvalResponse.ts#L26-L34 type Response struct { // Details The details section contains one entry for every query in the original diff --git a/typedapi/core/reindex/reindex.go b/typedapi/core/reindex/reindex.go index 24e722d98e..9bd8264b29 100644 --- a/typedapi/core/reindex/reindex.go +++ b/typedapi/core/reindex/reindex.go @@ -16,12 +16,289 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Reindex documents. -// Copies documents from a source to a destination. The source can be any -// existing index, alias, or data stream. The destination must differ from the -// source. For example, you cannot reindex a data stream into itself. +// +// Copy documents from a source to a destination. +// You can copy all documents to the destination index or reindex a subset of +// the documents. +// The source can be any existing index, alias, or data stream. +// The destination must differ from the source. +// For example, you cannot reindex a data stream into itself. +// +// IMPORTANT: Reindex requires `_source` to be enabled for all documents in the +// source. +// The destination should be configured as wanted before calling the reindex +// API. +// Reindex does not copy the settings from the source or its associated +// template. +// Mappings, shard counts, and replicas, for example, must be configured ahead +// of time. +// +// If the Elasticsearch security features are enabled, you must have the +// following security privileges: +// +// * The `read` index privilege for the source data stream, index, or alias. +// * The `write` index privilege for the destination data stream, index, or +// index alias. +// * To automatically create a data stream or index with a reindex API request, +// you must have the `auto_configure`, `create_index`, or `manage` index +// privilege for the destination data stream, index, or alias. +// * If reindexing from a remote cluster, the `source.remote.user` must have the +// `monitor` cluster privilege and the `read` index privilege for the source +// data stream, index, or alias. +// +// If reindexing from a remote cluster, you must explicitly allow the remote +// host in the `reindex.remote.whitelist` setting. +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// The `dest` element can be configured like the index API to control optimistic +// concurrency control. +// Omitting `version_type` or setting it to `internal` causes Elasticsearch to +// blindly dump documents into the destination, overwriting any that happen to +// have the same ID. +// +// Setting `version_type` to `external` causes Elasticsearch to preserve the +// `version` from the source, create any documents that are missing, and update +// any documents that have an older version in the destination than they do in +// the source. +// +// Setting `op_type` to `create` causes the reindex API to create only missing +// documents in the destination. +// All existing documents will cause a version conflict. +// +// IMPORTANT: Because data streams are append-only, any reindex request to a +// destination data stream must have an `op_type` of `create`. +// A reindex can only add new documents to a destination data stream. +// It cannot update existing documents in a destination data stream. +// +// By default, version conflicts abort the reindex process. +// To continue reindexing if there are conflicts, set the `conflicts` request +// body property to `proceed`. +// In this case, the response includes a count of the version conflicts that +// were encountered. +// Note that the handling of other error types is unaffected by the `conflicts` +// property. +// Additionally, if you opt to count version conflicts, the operation could +// attempt to reindex more documents from the source than `max_docs` until it +// has successfully indexed `max_docs` documents into the target or it has gone +// through every document in the source query. +// +// NOTE: The reindex API makes no effort to handle ID collisions. +// The last document written will "win" but the order isn't usually predictable +// so it is not a good idea to rely on this behavior. +// Instead, make sure that IDs are unique by using a script. +// +// **Running reindex asynchronously** +// +// If the request contains `wait_for_completion=false`, Elasticsearch performs +// some preflight checks, launches the request, and returns a task you can use +// to cancel or get the status of the task. +// Elasticsearch creates a record of this task as a document at +// `_tasks/`. +// +// **Reindex from multiple sources** +// +// If you have many sources to reindex it is generally better to reindex them +// one at a time rather than using a glob pattern to pick up multiple sources. +// That way you can resume the process if there are any errors by removing the +// partially completed source and starting over. +// It also makes parallelizing the process fairly simple: split the list of +// sources to reindex and run each list in parallel. +// +// For example, you can use a bash script like this: +// +// ``` +// for index in i1 i2 i3 i4 i5; do +// +// curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty +// +// -d'{ +// "source": { +// "index": "'$index'" +// }, +// "dest": { +// "index": "'$index'-reindexed" +// } +// }' +// +// done +// ``` +// +// **Throttling** +// +// Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, +// for example) to throttle the rate at which reindex issues batches of index +// operations. +// Requests are throttled by padding each batch with a wait time. +// To turn off throttling, set `requests_per_second` to `-1`. +// +// The throttling is done by waiting between batches so that the scroll that +// reindex uses internally can be given a timeout that takes into account the +// padding. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is `1000`, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single bulk request, large batch sizes cause +// Elasticsearch to create many requests and then wait for a while before +// starting the next set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Reindex supports sliced scroll to parallelize the reindexing process. +// This parallelization can improve efficiency and provide a convenient way to +// break the request down into smaller parts. +// +// NOTE: Reindexing from remote clusters does not support manual or automatic +// slicing. +// +// You can slice a reindex request manually by providing a slice ID and total +// number of slices to each request. +// You can also let reindex automatically parallelize by using sliced scroll to +// slice on `_id`. +// The `slices` parameter specifies the number of slices to use. +// +// Adding `slices` to the reindex request just automates the manual process, +// creating sub-requests which means it has some quirks: +// +// * You can see these requests in the tasks API. These sub-requests are "child" +// tasks of the task for the request with slices. +// * Fetching the status of the task for the request with `slices` only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with `slices` will cancel each sub-request. +// * Due to the nature of `slices`, each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// `slices` are distributed proportionally to each sub-request. Combine that +// with the previous point about distribution being uneven and you should +// conclude that using `max_docs` with `slices` might not result in exactly +// `max_docs` documents being reindexed. +// * Each sub-request gets a slightly different snapshot of the source, though +// these are all taken at approximately the same time. +// +// If slicing automatically, setting `slices` to `auto` will choose a reasonable +// number for most indices. +// If slicing manually or otherwise tuning automatic slicing, use the following +// guidelines. +// +// Query performance is most efficient when the number of slices is equal to the +// number of shards in the index. +// If that number is large (for example, `500`), choose a lower number as too +// many slices will hurt performance. +// Setting slices higher than the number of shards generally does not improve +// efficiency and adds overhead. +// +// Indexing performance scales linearly across available resources with the +// number of slices. +// +// Whether query or indexing performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// +// **Modify documents during reindexing** +// +// Like `_update_by_query`, reindex operations support a script that modifies +// the document. +// Unlike `_update_by_query`, the script is allowed to modify the document's +// metadata. +// +// Just as in `_update_by_query`, you can set `ctx.op` to change the operation +// that is run on the destination. +// For example, set `ctx.op` to `noop` if your script decides that the document +// doesn’t have to be indexed in the destination. This "no operation" will be +// reported in the `noop` counter in the response body. +// Set `ctx.op` to `delete` if your script decides that the document must be +// deleted from the destination. +// The deletion will be reported in the `deleted` counter in the response body. +// Setting `ctx.op` to anything else will return an error, as will setting any +// other field in `ctx`. +// +// Think of the possibilities! Just be careful; you are able to change: +// +// * `_id` +// * `_index` +// * `_version` +// * `_routing` +// +// Setting `_version` to `null` or clearing it from the `ctx` map is just like +// not sending the version in an indexing request. +// It will cause the document to be overwritten in the destination regardless of +// the version on the target or the version type you use in the reindex API. +// +// **Reindex from remote** +// +// Reindex supports reindexing from a remote Elasticsearch cluster. +// The `host` parameter must contain a scheme, host, port, and optional path. +// The `username` and `password` parameters are optional and when they are +// present the reindex operation will connect to the remote Elasticsearch node +// using basic authentication. +// Be sure to use HTTPS when using basic authentication or the password will be +// sent in plain text. +// There are a range of settings available to configure the behavior of the +// HTTPS connection. +// +// When using Elastic Cloud, it is also possible to authenticate against the +// remote cluster through the use of a valid API key. +// Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` +// setting. +// It can be set to a comma delimited list of allowed remote host and port +// combinations. +// Scheme is ignored; only the host and port are used. +// For example: +// +// ``` +// reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, +// localhost:*"] +// ``` +// +// The list of allowed hosts must be configured on any nodes that will +// coordinate the reindex. +// This feature should work with remote clusters of any version of +// Elasticsearch. +// This should enable you to upgrade from any version of Elasticsearch to the +// current version by reindexing from a cluster of the old version. +// +// WARNING: Elasticsearch does not support forward compatibility across major +// versions. +// For example, you cannot reindex from a 7.x cluster into a 6.x cluster. +// +// To enable queries sent to older versions of Elasticsearch, the `query` +// parameter is sent directly to the remote host without validation or +// modification. +// +// NOTE: Reindexing from remote clusters does not support manual or automatic +// slicing. +// +// Reindexing from a remote server uses an on-heap buffer that defaults to a +// maximum size of 100mb. +// If the remote index includes very large documents you'll need to use a +// smaller batch size. +// It is also possible to set the socket read timeout on the remote connection +// with the `socket_timeout` field and the connection timeout with the +// `connect_timeout` field. +// Both default to 30 seconds. +// +// **Configuring SSL parameters** +// +// Reindex from remote supports configurable SSL settings. +// These must be specified in the `elasticsearch.yml` file, with the exception +// of the secure settings, which you add in the Elasticsearch keystore. +// It is not possible to configure SSL in the body of the reindex request. package reindex import ( @@ -78,11 +355,288 @@ func NewReindexFunc(tp elastictransport.Interface) NewReindex { } // Reindex documents. -// Copies documents from a source to a destination. The source can be any -// existing index, alias, or data stream. The destination must differ from the -// source. For example, you cannot reindex a data stream into itself. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html +// Copy documents from a source to a destination. +// You can copy all documents to the destination index or reindex a subset of +// the documents. +// The source can be any existing index, alias, or data stream. +// The destination must differ from the source. +// For example, you cannot reindex a data stream into itself. +// +// IMPORTANT: Reindex requires `_source` to be enabled for all documents in the +// source. +// The destination should be configured as wanted before calling the reindex +// API. +// Reindex does not copy the settings from the source or its associated +// template. +// Mappings, shard counts, and replicas, for example, must be configured ahead +// of time. +// +// If the Elasticsearch security features are enabled, you must have the +// following security privileges: +// +// * The `read` index privilege for the source data stream, index, or alias. +// * The `write` index privilege for the destination data stream, index, or +// index alias. +// * To automatically create a data stream or index with a reindex API request, +// you must have the `auto_configure`, `create_index`, or `manage` index +// privilege for the destination data stream, index, or alias. +// * If reindexing from a remote cluster, the `source.remote.user` must have the +// `monitor` cluster privilege and the `read` index privilege for the source +// data stream, index, or alias. +// +// If reindexing from a remote cluster, you must explicitly allow the remote +// host in the `reindex.remote.whitelist` setting. +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// The `dest` element can be configured like the index API to control optimistic +// concurrency control. +// Omitting `version_type` or setting it to `internal` causes Elasticsearch to +// blindly dump documents into the destination, overwriting any that happen to +// have the same ID. +// +// Setting `version_type` to `external` causes Elasticsearch to preserve the +// `version` from the source, create any documents that are missing, and update +// any documents that have an older version in the destination than they do in +// the source. +// +// Setting `op_type` to `create` causes the reindex API to create only missing +// documents in the destination. +// All existing documents will cause a version conflict. +// +// IMPORTANT: Because data streams are append-only, any reindex request to a +// destination data stream must have an `op_type` of `create`. +// A reindex can only add new documents to a destination data stream. +// It cannot update existing documents in a destination data stream. +// +// By default, version conflicts abort the reindex process. +// To continue reindexing if there are conflicts, set the `conflicts` request +// body property to `proceed`. +// In this case, the response includes a count of the version conflicts that +// were encountered. +// Note that the handling of other error types is unaffected by the `conflicts` +// property. +// Additionally, if you opt to count version conflicts, the operation could +// attempt to reindex more documents from the source than `max_docs` until it +// has successfully indexed `max_docs` documents into the target or it has gone +// through every document in the source query. +// +// NOTE: The reindex API makes no effort to handle ID collisions. +// The last document written will "win" but the order isn't usually predictable +// so it is not a good idea to rely on this behavior. +// Instead, make sure that IDs are unique by using a script. +// +// **Running reindex asynchronously** +// +// If the request contains `wait_for_completion=false`, Elasticsearch performs +// some preflight checks, launches the request, and returns a task you can use +// to cancel or get the status of the task. +// Elasticsearch creates a record of this task as a document at +// `_tasks/`. +// +// **Reindex from multiple sources** +// +// If you have many sources to reindex it is generally better to reindex them +// one at a time rather than using a glob pattern to pick up multiple sources. +// That way you can resume the process if there are any errors by removing the +// partially completed source and starting over. +// It also makes parallelizing the process fairly simple: split the list of +// sources to reindex and run each list in parallel. +// +// For example, you can use a bash script like this: +// +// ``` +// for index in i1 i2 i3 i4 i5; do +// +// curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty +// +// -d'{ +// "source": { +// "index": "'$index'" +// }, +// "dest": { +// "index": "'$index'-reindexed" +// } +// }' +// +// done +// ``` +// +// **Throttling** +// +// Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, +// for example) to throttle the rate at which reindex issues batches of index +// operations. +// Requests are throttled by padding each batch with a wait time. +// To turn off throttling, set `requests_per_second` to `-1`. +// +// The throttling is done by waiting between batches so that the scroll that +// reindex uses internally can be given a timeout that takes into account the +// padding. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is `1000`, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single bulk request, large batch sizes cause +// Elasticsearch to create many requests and then wait for a while before +// starting the next set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Reindex supports sliced scroll to parallelize the reindexing process. +// This parallelization can improve efficiency and provide a convenient way to +// break the request down into smaller parts. +// +// NOTE: Reindexing from remote clusters does not support manual or automatic +// slicing. +// +// You can slice a reindex request manually by providing a slice ID and total +// number of slices to each request. +// You can also let reindex automatically parallelize by using sliced scroll to +// slice on `_id`. +// The `slices` parameter specifies the number of slices to use. +// +// Adding `slices` to the reindex request just automates the manual process, +// creating sub-requests which means it has some quirks: +// +// * You can see these requests in the tasks API. These sub-requests are "child" +// tasks of the task for the request with slices. +// * Fetching the status of the task for the request with `slices` only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with `slices` will cancel each sub-request. +// * Due to the nature of `slices`, each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// `slices` are distributed proportionally to each sub-request. Combine that +// with the previous point about distribution being uneven and you should +// conclude that using `max_docs` with `slices` might not result in exactly +// `max_docs` documents being reindexed. +// * Each sub-request gets a slightly different snapshot of the source, though +// these are all taken at approximately the same time. +// +// If slicing automatically, setting `slices` to `auto` will choose a reasonable +// number for most indices. +// If slicing manually or otherwise tuning automatic slicing, use the following +// guidelines. +// +// Query performance is most efficient when the number of slices is equal to the +// number of shards in the index. +// If that number is large (for example, `500`), choose a lower number as too +// many slices will hurt performance. +// Setting slices higher than the number of shards generally does not improve +// efficiency and adds overhead. +// +// Indexing performance scales linearly across available resources with the +// number of slices. +// +// Whether query or indexing performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// +// **Modify documents during reindexing** +// +// Like `_update_by_query`, reindex operations support a script that modifies +// the document. +// Unlike `_update_by_query`, the script is allowed to modify the document's +// metadata. +// +// Just as in `_update_by_query`, you can set `ctx.op` to change the operation +// that is run on the destination. +// For example, set `ctx.op` to `noop` if your script decides that the document +// doesn’t have to be indexed in the destination. This "no operation" will be +// reported in the `noop` counter in the response body. +// Set `ctx.op` to `delete` if your script decides that the document must be +// deleted from the destination. +// The deletion will be reported in the `deleted` counter in the response body. +// Setting `ctx.op` to anything else will return an error, as will setting any +// other field in `ctx`. +// +// Think of the possibilities! Just be careful; you are able to change: +// +// * `_id` +// * `_index` +// * `_version` +// * `_routing` +// +// Setting `_version` to `null` or clearing it from the `ctx` map is just like +// not sending the version in an indexing request. +// It will cause the document to be overwritten in the destination regardless of +// the version on the target or the version type you use in the reindex API. +// +// **Reindex from remote** +// +// Reindex supports reindexing from a remote Elasticsearch cluster. +// The `host` parameter must contain a scheme, host, port, and optional path. +// The `username` and `password` parameters are optional and when they are +// present the reindex operation will connect to the remote Elasticsearch node +// using basic authentication. +// Be sure to use HTTPS when using basic authentication or the password will be +// sent in plain text. +// There are a range of settings available to configure the behavior of the +// HTTPS connection. +// +// When using Elastic Cloud, it is also possible to authenticate against the +// remote cluster through the use of a valid API key. +// Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` +// setting. +// It can be set to a comma delimited list of allowed remote host and port +// combinations. +// Scheme is ignored; only the host and port are used. +// For example: +// +// ``` +// reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, +// localhost:*"] +// ``` +// +// The list of allowed hosts must be configured on any nodes that will +// coordinate the reindex. +// This feature should work with remote clusters of any version of +// Elasticsearch. +// This should enable you to upgrade from any version of Elasticsearch to the +// current version by reindexing from a cluster of the old version. +// +// WARNING: Elasticsearch does not support forward compatibility across major +// versions. +// For example, you cannot reindex from a 7.x cluster into a 6.x cluster. +// +// To enable queries sent to older versions of Elasticsearch, the `query` +// parameter is sent directly to the remote host without validation or +// modification. +// +// NOTE: Reindexing from remote clusters does not support manual or automatic +// slicing. +// +// Reindexing from a remote server uses an on-heap buffer that defaults to a +// maximum size of 100mb. +// If the remote index includes very large documents you'll need to use a +// smaller batch size. +// It is also possible to set the socket read timeout on the remote connection +// with the `socket_timeout` field and the connection timeout with the +// `connect_timeout` field. +// Both default to 30 seconds. +// +// **Configuring SSL parameters** +// +// Reindex from remote supports configurable SSL settings. +// These must be specified in the `elasticsearch.yml` file, with the exception +// of the secure settings, which you add in the Elasticsearch keystore. +// It is not possible to configure SSL in the body of the reindex request. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex func New(tp elastictransport.Interface) *Reindex { r := &Reindex{ transport: tp, @@ -90,8 +644,6 @@ func New(tp elastictransport.Interface) *Reindex { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -307,7 +859,7 @@ func (r *Reindex) Refresh(refresh bool) *Reindex { } // RequestsPerSecond The throttle for this request in sub-requests per second. -// Defaults to no throttle. +// By default, there is no throttle. // API name: requests_per_second func (r *Reindex) RequestsPerSecond(requestspersecond string) *Reindex { r.values.Set("requests_per_second", requestspersecond) @@ -315,8 +867,8 @@ func (r *Reindex) RequestsPerSecond(requestspersecond string) *Reindex { return r } -// Scroll Specifies how long a consistent view of the index should be maintained for -// scrolled search. +// Scroll The period of time that a consistent view of the index should be maintained +// for scrolled search. // API name: scroll func (r *Reindex) Scroll(duration string) *Reindex { r.values.Set("scroll", duration) @@ -325,7 +877,19 @@ func (r *Reindex) Scroll(duration string) *Reindex { } // Slices The number of slices this task should be divided into. -// Defaults to 1 slice, meaning the task isn’t sliced into subtasks. +// It defaults to one slice, which means the task isn't sliced into subtasks. +// +// Reindex supports sliced scroll to parallelize the reindexing process. +// This parallelization can improve efficiency and provide a convenient way to +// break the request down into smaller parts. +// +// NOTE: Reindexing from remote clusters does not support manual or automatic +// slicing. +// +// If set to `auto`, Elasticsearch chooses the number of slices to use. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple sources, it will choose the number of slices based on +// the index or backing index with the smallest number of shards. // API name: slices func (r *Reindex) Slices(slices string) *Reindex { r.values.Set("slices", slices) @@ -333,8 +897,10 @@ func (r *Reindex) Slices(slices string) *Reindex { return r } -// Timeout Period each indexing waits for automatic index creation, dynamic mapping +// Timeout The period each indexing waits for automatic index creation, dynamic mapping // updates, and waiting for active shards. +// By default, Elasticsearch waits for at least one minute before failing. +// The actual wait time could be longer, particularly when multiple waits occur. // API name: timeout func (r *Reindex) Timeout(duration string) *Reindex { r.values.Set("timeout", duration) @@ -344,8 +910,10 @@ func (r *Reindex) Timeout(duration string) *Reindex { // WaitForActiveShards The number of shard copies that must be active before proceeding with the // operation. -// Set to `all` or any positive integer up to the total number of shards in the -// index (`number_of_replicas+1`). +// Set it to `all` or any positive integer up to the total number of shards in +// the index (`number_of_replicas+1`). +// The default value is one, which means it waits for each primary shard to be +// active. // API name: wait_for_active_shards func (r *Reindex) WaitForActiveShards(waitforactiveshards string) *Reindex { r.values.Set("wait_for_active_shards", waitforactiveshards) @@ -413,54 +981,85 @@ func (r *Reindex) Pretty(pretty bool) *Reindex { return r } -// Conflicts Set to proceed to continue reindexing even if there are conflicts. +// Indicates whether to continue reindexing even when there are conflicts. // API name: conflicts func (r *Reindex) Conflicts(conflicts conflicts.Conflicts) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Conflicts = &conflicts - return r } -// Dest The destination you are copying to. +// The destination you are copying to. // API name: dest -func (r *Reindex) Dest(dest *types.ReindexDestination) *Reindex { +func (r *Reindex) Dest(dest types.ReindexDestinationVariant) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Dest = *dest + r.req.Dest = *dest.ReindexDestinationCaster() return r } -// MaxDocs The maximum number of documents to reindex. +// The maximum number of documents to reindex. +// By default, all documents are reindexed. +// If it is a value less then or equal to `scroll_size`, a scroll will not be +// used to retrieve the results for the operation. +// +// If `conflicts` is set to `proceed`, the reindex operation could attempt to +// reindex more documents from the source than `max_docs` until it has +// successfully indexed `max_docs` documents into the target or it has gone +// through every document in the source query. // API name: max_docs func (r *Reindex) MaxDocs(maxdocs int64) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxDocs = &maxdocs return r } -// Script The script to run to update the document source or metadata when reindexing. +// The script to run to update the document source or metadata when reindexing. // API name: script -func (r *Reindex) Script(script *types.Script) *Reindex { +func (r *Reindex) Script(script types.ScriptVariant) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Script = script + r.req.Script = script.ScriptCaster() return r } // API name: size func (r *Reindex) Size(size int64) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Size = &size return r } -// Source The source you are copying from. +// The source you are copying from. // API name: source -func (r *Reindex) Source(source *types.ReindexSource) *Reindex { +func (r *Reindex) Source(source types.ReindexSourceVariant) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Source = *source + r.req.Source = *source.ReindexSourceCaster() return r } diff --git a/typedapi/core/reindex/request.go b/typedapi/core/reindex/request.go index 981b644de7..6de5eab43a 100644 --- a/typedapi/core/reindex/request.go +++ b/typedapi/core/reindex/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package reindex @@ -30,14 +30,22 @@ import ( // Request holds the request body struct for the package reindex // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/reindex/ReindexRequest.ts#L27-L104 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/reindex/ReindexRequest.ts#L27-L309 type Request struct { - // Conflicts Set to proceed to continue reindexing even if there are conflicts. + // Conflicts Indicates whether to continue reindexing even when there are conflicts. Conflicts *conflicts.Conflicts `json:"conflicts,omitempty"` // Dest The destination you are copying to. Dest types.ReindexDestination `json:"dest"` // MaxDocs The maximum number of documents to reindex. + // By default, all documents are reindexed. + // If it is a value less then or equal to `scroll_size`, a scroll will not be + // used to retrieve the results for the operation. + // + // If `conflicts` is set to `proceed`, the reindex operation could attempt to + // reindex more documents from the source than `max_docs` until it has + // successfully indexed `max_docs` documents into the target or it has gone + // through every document in the source query. MaxDocs *int64 `json:"max_docs,omitempty"` // Script The script to run to update the document source or metadata when reindexing. Script *types.Script `json:"script,omitempty"` diff --git a/typedapi/core/reindex/response.go b/typedapi/core/reindex/response.go index a67f8c5189..6084d81236 100644 --- a/typedapi/core/reindex/response.go +++ b/typedapi/core/reindex/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package reindex @@ -26,24 +26,53 @@ import ( // Response holds the response body struct for the package reindex // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/reindex/ReindexResponse.ts#L26-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/reindex/ReindexResponse.ts#L26-L92 type Response struct { - Batches *int64 `json:"batches,omitempty"` - Created *int64 `json:"created,omitempty"` - Deleted *int64 `json:"deleted,omitempty"` - Failures []types.BulkIndexByScrollFailure `json:"failures,omitempty"` - Noops *int64 `json:"noops,omitempty"` - RequestsPerSecond *float32 `json:"requests_per_second,omitempty"` - Retries *types.Retries `json:"retries,omitempty"` - SliceId *int `json:"slice_id,omitempty"` - Task types.TaskId `json:"task,omitempty"` - ThrottledMillis *int64 `json:"throttled_millis,omitempty"` - ThrottledUntilMillis *int64 `json:"throttled_until_millis,omitempty"` - TimedOut *bool `json:"timed_out,omitempty"` - Took *int64 `json:"took,omitempty"` - Total *int64 `json:"total,omitempty"` - Updated *int64 `json:"updated,omitempty"` - VersionConflicts *int64 `json:"version_conflicts,omitempty"` + + // Batches The number of scroll responses that were pulled back by the reindex. + Batches *int64 `json:"batches,omitempty"` + // Created The number of documents that were successfully created. + Created *int64 `json:"created,omitempty"` + // Deleted The number of documents that were successfully deleted. + Deleted *int64 `json:"deleted,omitempty"` + // Failures If there were any unrecoverable errors during the process, it is an array of + // those failures. + // If this array is not empty, the request ended because of those failures. + // Reindex is implemented using batches and any failure causes the entire + // process to end but all failures in the current batch are collected into the + // array. + // You can use the `conflicts` option to prevent the reindex from ending on + // version conflicts. + Failures []types.BulkIndexByScrollFailure `json:"failures,omitempty"` + // Noops The number of documents that were ignored because the script used for the + // reindex returned a `noop` value for `ctx.op`. + Noops *int64 `json:"noops,omitempty"` + // RequestsPerSecond The number of requests per second effectively run during the reindex. + RequestsPerSecond *float32 `json:"requests_per_second,omitempty"` + // Retries The number of retries attempted by reindex. + Retries *types.Retries `json:"retries,omitempty"` + SliceId *int `json:"slice_id,omitempty"` + Task types.TaskId `json:"task,omitempty"` + // ThrottledMillis The number of milliseconds the request slept to conform to + // `requests_per_second`. + ThrottledMillis *int64 `json:"throttled_millis,omitempty"` + // ThrottledUntilMillis This field should always be equal to zero in a reindex response. + // It has meaning only when using the task API, where it indicates the next time + // (in milliseconds since epoch) that a throttled request will be run again in + // order to conform to `requests_per_second`. + ThrottledUntilMillis *int64 `json:"throttled_until_millis,omitempty"` + // TimedOut If any of the requests that ran during the reindex timed out, it is `true`. + TimedOut *bool `json:"timed_out,omitempty"` + // Took The total milliseconds the entire operation took. + Took *int64 `json:"took,omitempty"` + // Total The number of documents that were successfully processed. + Total *int64 `json:"total,omitempty"` + // Updated The number of documents that were successfully updated. + // That is to say, a document with the same ID already existed before the + // reindex updated it. + Updated *int64 `json:"updated,omitempty"` + // VersionConflicts The number of version conflicts that occurred. + VersionConflicts *int64 `json:"version_conflicts,omitempty"` } // NewResponse returns a Response diff --git a/typedapi/core/reindexrethrottle/reindex_rethrottle.go b/typedapi/core/reindexrethrottle/reindex_rethrottle.go index 7ea80fa9e5..c384f3c775 100644 --- a/typedapi/core/reindexrethrottle/reindex_rethrottle.go +++ b/typedapi/core/reindexrethrottle/reindex_rethrottle.go @@ -16,11 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Throttle a reindex operation. // // Change the number of requests per second for a particular reindex operation. +// For example: +// +// ``` +// POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 +// ``` +// +// Rethrottling that speeds up the query takes effect immediately. +// Rethrottling that slows down the query will take effect after completing the +// current batch. +// This behavior prevents scroll timeouts. package reindexrethrottle import ( @@ -81,8 +91,18 @@ func NewReindexRethrottleFunc(tp elastictransport.Interface) NewReindexRethrottl // Throttle a reindex operation. // // Change the number of requests per second for a particular reindex operation. +// For example: +// +// ``` +// POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 +// ``` +// +// Rethrottling that speeds up the query takes effect immediately. +// Rethrottling that slows down the query will take effect after completing the +// current batch. +// This behavior prevents scroll timeouts. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex func New(tp elastictransport.Interface) *ReindexRethrottle { r := &ReindexRethrottle{ transport: tp, @@ -294,7 +314,7 @@ func (r *ReindexRethrottle) Header(key, value string) *ReindexRethrottle { return r } -// TaskId Identifier for the task. +// TaskId The task identifier, which can be found by using the tasks API. // API Name: taskid func (r *ReindexRethrottle) _taskid(taskid string) *ReindexRethrottle { r.paramSet |= taskidMask @@ -304,6 +324,8 @@ func (r *ReindexRethrottle) _taskid(taskid string) *ReindexRethrottle { } // RequestsPerSecond The throttle for this request in sub-requests per second. +// It can be either `-1` to turn off throttling or any decimal number like `1.7` +// or `12` to throttle to that level. // API name: requests_per_second func (r *ReindexRethrottle) RequestsPerSecond(requestspersecond string) *ReindexRethrottle { r.values.Set("requests_per_second", requestspersecond) diff --git a/typedapi/core/reindexrethrottle/response.go b/typedapi/core/reindexrethrottle/response.go index 88e2c1c473..d598b1a3a0 100644 --- a/typedapi/core/reindexrethrottle/response.go +++ b/typedapi/core/reindexrethrottle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package reindexrethrottle @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package reindexrethrottle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/reindex_rethrottle/ReindexRethrottleResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/reindex_rethrottle/ReindexRethrottleResponse.ts#L23-L25 type Response struct { Nodes map[string]types.ReindexNode `json:"nodes"` } diff --git a/typedapi/core/rendersearchtemplate/render_search_template.go b/typedapi/core/rendersearchtemplate/render_search_template.go index b50e478131..c368c47d22 100644 --- a/typedapi/core/rendersearchtemplate/render_search_template.go +++ b/typedapi/core/rendersearchtemplate/render_search_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Render a search template. // @@ -39,10 +39,6 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) -const ( - idMask = iota + 1 -) - // ErrBuildPath is returned in case of missing parameters within the build of the request. var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") @@ -85,7 +81,7 @@ func NewRenderSearchTemplateFunc(tp elastictransport.Interface) NewRenderSearchT // // Render a search template as a search request body. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/render-search-template-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template func New(tp elastictransport.Interface) *RenderSearchTemplate { r := &RenderSearchTemplate{ transport: tp, @@ -93,8 +89,6 @@ func New(tp elastictransport.Interface) *RenderSearchTemplate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -164,19 +158,6 @@ func (r *RenderSearchTemplate) HttpRequest(ctx context.Context) (*http.Request, path.WriteString("/") path.WriteString("template") - method = http.MethodPost - case r.paramSet == idMask: - path.WriteString("/") - path.WriteString("_render") - path.WriteString("/") - path.WriteString("template") - path.WriteString("/") - - if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.RecordPathPart(ctx, "id", r.id) - } - path.WriteString(r.id) - method = http.MethodPost } @@ -315,17 +296,6 @@ func (r *RenderSearchTemplate) Header(key, value string) *RenderSearchTemplate { return r } -// Id ID of the search template to render. -// If no `source` is specified, this or the `id` request body parameter is -// required. -// API Name: id -func (r *RenderSearchTemplate) Id(id string) *RenderSearchTemplate { - r.paramSet |= idMask - r.id = id - - return r -} - // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -372,29 +342,75 @@ func (r *RenderSearchTemplate) Pretty(pretty bool) *RenderSearchTemplate { // API name: file func (r *RenderSearchTemplate) File(file string) *RenderSearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.File = &file return r } -// Params Key-value pairs used to replace Mustache variables in the template. +// The ID of the search template to render. +// If no `source` is specified, this or the `` request path +// parameter is required. +// If you specify both this parameter and the `` parameter, the API +// uses only ``. +// API name: id +func (r *RenderSearchTemplate) Id(id string) *RenderSearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Id = &id + + return r +} + +// Key-value pairs used to replace Mustache variables in the template. // The key is the variable name. // The value is the variable value. // API name: params func (r *RenderSearchTemplate) Params(params map[string]json.RawMessage) *RenderSearchTemplate { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Params = params + return r +} +func (r *RenderSearchTemplate) AddParam(key string, value json.RawMessage) *RenderSearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Params == nil { + r.req.Params = make(map[string]json.RawMessage) + } else { + tmp = r.req.Params + } + + tmp[key] = value + + r.req.Params = tmp return r } -// Source An inline search template. -// Supports the same parameters as the search API's request body. +// An inline search template. +// It supports the same parameters as the search API's request body. // These parameters also support Mustache variables. // If no `id` or `` is specified, this parameter is required. // API name: source func (r *RenderSearchTemplate) Source(source string) *RenderSearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Source = &source diff --git a/typedapi/core/rendersearchtemplate/request.go b/typedapi/core/rendersearchtemplate/request.go index b8f31396e0..929fe00a92 100644 --- a/typedapi/core/rendersearchtemplate/request.go +++ b/typedapi/core/rendersearchtemplate/request.go @@ -16,26 +16,36 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package rendersearchtemplate import ( + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" ) // Request holds the request body struct for the package rendersearchtemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/render_search_template/RenderSearchTemplateRequest.ts#L25-L58 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/render_search_template/RenderSearchTemplateRequest.ts#L25-L76 type Request struct { File *string `json:"file,omitempty"` + // Id The ID of the search template to render. + // If no `source` is specified, this or the `` request path + // parameter is required. + // If you specify both this parameter and the `` parameter, the API + // uses only ``. + Id *string `json:"id,omitempty"` // Params Key-value pairs used to replace Mustache variables in the template. // The key is the variable name. // The value is the variable value. Params map[string]json.RawMessage `json:"params,omitempty"` // Source An inline search template. - // Supports the same parameters as the search API's request body. + // It supports the same parameters as the search API's request body. // These parameters also support Mustache variables. // If no `id` or `` is specified, this parameter is required. Source *string `json:"source,omitempty"` @@ -61,3 +71,59 @@ func (r *Request) FromJSON(data string) (*Request, error) { return &req, nil } + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "file": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "File", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.File = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "source": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Source = &o + + } + } + return nil +} diff --git a/typedapi/core/rendersearchtemplate/response.go b/typedapi/core/rendersearchtemplate/response.go index 5907b63294..f05157b216 100644 --- a/typedapi/core/rendersearchtemplate/response.go +++ b/typedapi/core/rendersearchtemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package rendersearchtemplate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package rendersearchtemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/render_search_template/RenderSearchTemplateResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/render_search_template/RenderSearchTemplateResponse.ts#L23-L25 type Response struct { TemplateOutput map[string]json.RawMessage `json:"template_output"` } diff --git a/typedapi/core/scriptspainlessexecute/request.go b/typedapi/core/scriptspainlessexecute/request.go index d494ba0aa0..3967d936fa 100644 --- a/typedapi/core/scriptspainlessexecute/request.go +++ b/typedapi/core/scriptspainlessexecute/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package scriptspainlessexecute @@ -25,18 +25,22 @@ import ( "fmt" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/painlesscontext" ) // Request holds the request body struct for the package scriptspainlessexecute // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/scripts_painless_execute/ExecutePainlessScriptRequest.ts#L24-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/scripts_painless_execute/ExecutePainlessScriptRequest.ts#L24-L64 type Request struct { // Context The context that the script should run in. - Context *string `json:"context,omitempty"` + // NOTE: Result ordering in the field contexts is not guaranteed. + Context *painlesscontext.PainlessContext `json:"context,omitempty"` // ContextSetup Additional parameters for the `context`. + // NOTE: This parameter is required for all contexts except `painless_test`, + // which is the default if no value is provided for `context`. ContextSetup *types.PainlessContextSetup `json:"context_setup,omitempty"` - // Script The Painless script to execute. + // Script The Painless script to run. Script *types.Script `json:"script,omitempty"` } diff --git a/typedapi/core/scriptspainlessexecute/response.go b/typedapi/core/scriptspainlessexecute/response.go index f4767191b7..cc30edae03 100644 --- a/typedapi/core/scriptspainlessexecute/response.go +++ b/typedapi/core/scriptspainlessexecute/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package scriptspainlessexecute @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package scriptspainlessexecute // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/scripts_painless_execute/ExecutePainlessScriptResponse.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/scripts_painless_execute/ExecutePainlessScriptResponse.ts#L20-L24 type Response struct { Result json.RawMessage `json:"result,omitempty"` } diff --git a/typedapi/core/scriptspainlessexecute/scripts_painless_execute.go b/typedapi/core/scriptspainlessexecute/scripts_painless_execute.go index 5adc0383fb..e7e0ed31bc 100644 --- a/typedapi/core/scriptspainlessexecute/scripts_painless_execute.go +++ b/typedapi/core/scriptspainlessexecute/scripts_painless_execute.go @@ -16,10 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Run a script. +// // Runs a script and returns a result. +// Use this API to build and test scripts, such as when defining a script for a +// runtime field. +// This API requires very few dependencies and is especially useful if you don't +// have permissions to write documents on a cluster. +// +// The API uses several _contexts_, which control how scripts are run, what +// variables are available at runtime, and what the return type is. +// +// Each context requires a script, but additional parameters depend on the +// context you're using for that script. package scriptspainlessexecute import ( @@ -36,6 +47,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/painlesscontext" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -75,7 +87,18 @@ func NewScriptsPainlessExecuteFunc(tp elastictransport.Interface) NewScriptsPain } // Run a script. +// // Runs a script and returns a result. +// Use this API to build and test scripts, such as when defining a script for a +// runtime field. +// This API requires very few dependencies and is especially useful if you don't +// have permissions to write documents on a cluster. +// +// The API uses several _contexts_, which control how scripts are run, what +// variables are available at runtime, and what the return type is. +// +// Each context requires a script, but additional parameters depend on the +// context you're using for that script. // // https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html func New(tp elastictransport.Interface) *ScriptsPainlessExecute { @@ -85,8 +108,6 @@ func New(tp elastictransport.Interface) *ScriptsPainlessExecute { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -340,29 +361,42 @@ func (r *ScriptsPainlessExecute) Pretty(pretty bool) *ScriptsPainlessExecute { return r } -// Context The context that the script should run in. +// The context that the script should run in. +// NOTE: Result ordering in the field contexts is not guaranteed. // API name: context -func (r *ScriptsPainlessExecute) Context(context string) *ScriptsPainlessExecute { - +func (r *ScriptsPainlessExecute) Context(context painlesscontext.PainlessContext) *ScriptsPainlessExecute { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Context = &context - return r } -// ContextSetup Additional parameters for the `context`. +// Additional parameters for the `context`. +// NOTE: This parameter is required for all contexts except `painless_test`, +// which is the default if no value is provided for `context`. // API name: context_setup -func (r *ScriptsPainlessExecute) ContextSetup(contextsetup *types.PainlessContextSetup) *ScriptsPainlessExecute { +func (r *ScriptsPainlessExecute) ContextSetup(contextsetup types.PainlessContextSetupVariant) *ScriptsPainlessExecute { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.ContextSetup = contextsetup + r.req.ContextSetup = contextsetup.PainlessContextSetupCaster() return r } -// Script The Painless script to execute. +// The Painless script to run. // API name: script -func (r *ScriptsPainlessExecute) Script(script *types.Script) *ScriptsPainlessExecute { +func (r *ScriptsPainlessExecute) Script(script types.ScriptVariant) *ScriptsPainlessExecute { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Script = script + r.req.Script = script.ScriptCaster() return r } diff --git a/typedapi/core/scroll/request.go b/typedapi/core/scroll/request.go index 95099314ee..7e9f5d272c 100644 --- a/typedapi/core/scroll/request.go +++ b/typedapi/core/scroll/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package scroll @@ -32,12 +32,12 @@ import ( // Request holds the request body struct for the package scroll // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/scroll/ScrollRequest.ts#L24-L75 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/scroll/ScrollRequest.ts#L24-L88 type Request struct { - // Scroll Period to retain the search context for scrolling. + // Scroll The period to retain the search context for scrolling. Scroll types.Duration `json:"scroll,omitempty"` - // ScrollId Scroll ID of the search. + // ScrollId The scroll ID of the search. ScrollId string `json:"scroll_id"` } diff --git a/typedapi/core/scroll/response.go b/typedapi/core/scroll/response.go index f173c5febb..387638f421 100644 --- a/typedapi/core/scroll/response.go +++ b/typedapi/core/scroll/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package scroll @@ -34,22 +34,46 @@ import ( // Response holds the response body struct for the package scroll // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/scroll/ScrollResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/scroll/ScrollResponse.ts#L22-L24 type Response struct { - Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` - Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` - Fields map[string]json.RawMessage `json:"fields,omitempty"` - Hits types.HitsMetadata `json:"hits"` - MaxScore *types.Float64 `json:"max_score,omitempty"` - NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` - PitId *string `json:"pit_id,omitempty"` - Profile *types.Profile `json:"profile,omitempty"` - ScrollId_ *string `json:"_scroll_id,omitempty"` + Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` + Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Hits The returned documents and metadata. + Hits types.HitsMetadata `json:"hits"` + MaxScore *types.Float64 `json:"max_score,omitempty"` + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *types.Profile `json:"profile,omitempty"` + // ScrollId_ The identifier for the search and its search context. + // You can use this scroll ID with the scroll API to retrieve the next batch of + // search results for the request. + // This property is returned only if the `scroll` query parameter is specified + // in the request. + ScrollId_ *string `json:"_scroll_id,omitempty"` + // Shards_ A count of shards used for the request. Shards_ types.ShardStatistics `json:"_shards"` Suggest map[string][]types.Suggest `json:"suggest,omitempty"` TerminatedEarly *bool `json:"terminated_early,omitempty"` - TimedOut bool `json:"timed_out"` - Took int64 `json:"took"` + // TimedOut If `true`, the request timed out before completion; returned results may be + // partial or empty. + TimedOut bool `json:"timed_out"` + // Took The number of milliseconds it took Elasticsearch to run the request. + // This value is calculated by measuring the time elapsed between receipt of a + // request on the coordinating node and the time at which the coordinating node + // is ready to send the response. + // It includes: + // + // * Communication time between the coordinating node and data nodes + // * Time the request spends in the search thread pool, queued for execution + // * Actual run time + // + // It does not include: + // + // * Time needed to send the request to Elasticsearch + // * Time needed to serialize the JSON response + // * Time needed to send the response to a client + Took int64 `json:"took"` } // NewResponse returns a Response diff --git a/typedapi/core/scroll/scroll.go b/typedapi/core/scroll/scroll.go index e3fb9cd89e..d964a4799c 100644 --- a/typedapi/core/scroll/scroll.go +++ b/typedapi/core/scroll/scroll.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Run a scrolling search. // @@ -127,7 +127,7 @@ func NewScrollFunc(tp elastictransport.Interface) NewScroll { // the time of the initial search request. Subsequent indexing or document // changes only affect later search and scroll requests. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html#request-body-search-scroll +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll func New(tp elastictransport.Interface) *Scroll { r := &Scroll{ transport: tp, @@ -135,8 +135,6 @@ func New(tp elastictransport.Interface) *Scroll { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -397,17 +395,27 @@ func (r *Scroll) Pretty(pretty bool) *Scroll { return r } -// Scroll Period to retain the search context for scrolling. +// The period to retain the search context for scrolling. // API name: scroll -func (r *Scroll) Scroll(duration types.Duration) *Scroll { - r.req.Scroll = duration +func (r *Scroll) Scroll(duration types.DurationVariant) *Scroll { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Scroll = *duration.DurationCaster() return r } -// ScrollId Scroll ID of the search. +// The scroll ID of the search. // API name: scroll_id func (r *Scroll) ScrollId(scrollid string) *Scroll { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ScrollId = scrollid return r diff --git a/typedapi/core/search/request.go b/typedapi/core/search/request.go index 07800b94d0..77205d9196 100644 --- a/typedapi/core/search/request.go +++ b/typedapi/core/search/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package search @@ -33,28 +33,27 @@ import ( // Request holds the request body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/SearchRequest.ts#L54-L531 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/SearchRequest.ts#L53-L584 type Request struct { // Aggregations Defines the aggregations that are run as part of the search request. Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` // Collapse Collapses search results the values of the specified field. Collapse *types.FieldCollapse `json:"collapse,omitempty"` - // DocvalueFields Array of wildcard (`*`) patterns. + // DocvalueFields An array of wildcard (`*`) field patterns. // The request returns doc values for field names matching these patterns in the // `hits.fields` property of the response. DocvalueFields []types.FieldAndFormat `json:"docvalue_fields,omitempty"` - // Explain If true, returns detailed information about score computation as part of a - // hit. + // Explain If `true`, the request returns detailed information about score computation + // as part of a hit. Explain *bool `json:"explain,omitempty"` // Ext Configuration of search extensions defined by Elasticsearch plugins. Ext map[string]json.RawMessage `json:"ext,omitempty"` - // Fields Array of wildcard (`*`) patterns. + // Fields An array of wildcard (`*`) field patterns. // The request returns values for field names matching these patterns in the // `hits.fields` property of the response. Fields []types.FieldAndFormat `json:"fields,omitempty"` - // From Starting document offset. - // Needs to be non-negative. + // From The starting document offset, which must be non-negative. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. // To page through more hits, use the `search_after` parameter. @@ -62,14 +61,17 @@ type Request struct { // Highlight Specifies the highlighter to use for retrieving highlighted snippets from one // or more fields in your search results. Highlight *types.Highlight `json:"highlight,omitempty"` - // IndicesBoost Boosts the _score of documents from specified indices. + // IndicesBoost Boost the `_score` of documents from specified indices. + // The boost value is the factor by which scores are multiplied. + // A boost value greater than `1.0` increases the score. + // A boost value between `0` and `1.0` decreases the score. IndicesBoost []map[string]types.Float64 `json:"indices_boost,omitempty"` - // Knn Defines the approximate kNN search to run. + // Knn The approximate kNN search to run. Knn []types.KnnSearch `json:"knn,omitempty"` - // MinScore Minimum `_score` for matching documents. + // MinScore The minimum `_score` for matching documents. // Documents with a lower `_score` are not included in the search results. MinScore *types.Float64 `json:"min_score,omitempty"` - // Pit Limits the search to a point in time (PIT). + // Pit Limit the search to a point in time (PIT). // If you provide a PIT, you cannot specify an `` in the request path. Pit *types.PointInTimeReference `json:"pit,omitempty"` // PostFilter Use the `post_filter` parameter to filter search results. @@ -81,18 +83,19 @@ type Request struct { // NOTE: This is a debugging tool and adds significant overhead to search // execution. Profile *bool `json:"profile,omitempty"` - // Query Defines the search definition using the Query DSL. + // Query The search definition using the Query DSL. Query *types.Query `json:"query,omitempty"` - // Rank Defines the Reciprocal Rank Fusion (RRF) to use. + // Rank The Reciprocal Rank Fusion (RRF) to use. Rank *types.RankContainer `json:"rank,omitempty"` // Rescore Can be used to improve precision by reordering just the top (for example 100 // - 500) documents returned by the `query` and `post_filter` phases. Rescore []types.Rescore `json:"rescore,omitempty"` // Retriever A retriever is a specification to describe top documents returned from a - // search. A retriever replaces other elements of the search API that also - // return top documents such as query and knn. + // search. + // A retriever replaces other elements of the search API that also return top + // documents such as `query` and `knn`. Retriever *types.RetrieverContainer `json:"retriever,omitempty"` - // RuntimeMappings Defines one or more runtime fields in the search request. + // RuntimeMappings One or more runtime fields in the search request. // These fields take precedence over mapped fields with the same name. RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` // ScriptFields Retrieve a script evaluation (based on different fields) for each hit. @@ -100,52 +103,57 @@ type Request struct { // SearchAfter Used to retrieve the next page of hits using a set of sort values from the // previous page. SearchAfter []types.FieldValue `json:"search_after,omitempty"` - // SeqNoPrimaryTerm If `true`, returns sequence number and primary term of the last modification - // of each hit. + // SeqNoPrimaryTerm If `true`, the request returns sequence number and primary term of the last + // modification of each hit. SeqNoPrimaryTerm *bool `json:"seq_no_primary_term,omitempty"` - // Size The number of hits to return. + // Size The number of hits to return, which must not be negative. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. - // To page through more hits, use the `search_after` parameter. + // To page through more hits, use the `search_after` property. Size *int `json:"size,omitempty"` - // Slice Can be used to split a scrolled search into multiple slices that can be - // consumed independently. + // Slice Split a scrolled search into multiple slices that can be consumed + // independently. Slice *types.SlicedScroll `json:"slice,omitempty"` // Sort A comma-separated list of : pairs. Sort []types.SortCombinations `json:"sort,omitempty"` - // Source_ Indicates which source fields are returned for matching documents. - // These fields are returned in the hits._source property of the search + // Source_ The source fields that are returned for matching documents. + // These fields are returned in the `hits._source` property of the search // response. + // If the `stored_fields` property is specified, the `_source` property defaults + // to `false`. + // Otherwise, it defaults to `true`. Source_ types.SourceConfig `json:"_source,omitempty"` - // Stats Stats groups to associate with the search. + // Stats The stats groups to associate with the search. // Each group maintains a statistics aggregation for its associated searches. // You can retrieve these stats using the indices stats API. Stats []string `json:"stats,omitempty"` - // StoredFields List of stored fields to return as part of a hit. + // StoredFields A comma-separated list of stored fields to return as part of a hit. // If no fields are specified, no stored fields are included in the response. - // If this field is specified, the `_source` parameter defaults to `false`. + // If this field is specified, the `_source` property defaults to `false`. // You can pass `_source: true` to return both source fields and stored fields // in the search response. StoredFields []string `json:"stored_fields,omitempty"` // Suggest Defines a suggester that provides similar looking terms based on a provided // text. Suggest *types.Suggester `json:"suggest,omitempty"` - // TerminateAfter Maximum number of documents to collect for each shard. + // TerminateAfter The maximum number of documents to collect for each shard. // If a query reaches this limit, Elasticsearch terminates the query early. // Elasticsearch collects documents before sorting. - // Use with caution. - // Elasticsearch applies this parameter to each shard handling the request. + // + // IMPORTANT: Use with caution. + // Elasticsearch applies this property to each shard handling the request. // When possible, let Elasticsearch perform early termination automatically. - // Avoid specifying this parameter for requests that target data streams with + // Avoid specifying this property for requests that target data streams with // backing indices across multiple data tiers. + // // If set to `0` (default), the query does not terminate early. TerminateAfter *int64 `json:"terminate_after,omitempty"` - // Timeout Specifies the period of time to wait for a response from each shard. + // Timeout The period of time to wait for a response from each shard. // If no response is received before the timeout expires, the request fails and // returns an error. // Defaults to no timeout. Timeout *string `json:"timeout,omitempty"` - // TrackScores If true, calculate and return document scores, even if the scores are not + // TrackScores If `true`, calculate and return document scores, even if the scores are not // used for sorting. TrackScores *bool `json:"track_scores,omitempty"` // TrackTotalHits Number of hits matching the query to count accurately. @@ -154,7 +162,7 @@ type Request struct { // If `false`, the response does not include the total number of hits matching // the query. TrackTotalHits types.TrackHits `json:"track_total_hits,omitempty"` - // Version If true, returns document version as part of a hit. + // Version If `true`, the request returns the document version as part of a hit. Version *bool `json:"version,omitempty"` } diff --git a/typedapi/core/search/response.go b/typedapi/core/search/response.go index f6c0552ed8..b8c34e2941 100644 --- a/typedapi/core/search/response.go +++ b/typedapi/core/search/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package search @@ -34,22 +34,46 @@ import ( // Response holds the response body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/SearchResponse.ts#L34-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/SearchResponse.ts#L34-L36 type Response struct { - Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` - Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` - Fields map[string]json.RawMessage `json:"fields,omitempty"` - Hits types.HitsMetadata `json:"hits"` - MaxScore *types.Float64 `json:"max_score,omitempty"` - NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` - PitId *string `json:"pit_id,omitempty"` - Profile *types.Profile `json:"profile,omitempty"` - ScrollId_ *string `json:"_scroll_id,omitempty"` + Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` + Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Hits The returned documents and metadata. + Hits types.HitsMetadata `json:"hits"` + MaxScore *types.Float64 `json:"max_score,omitempty"` + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *types.Profile `json:"profile,omitempty"` + // ScrollId_ The identifier for the search and its search context. + // You can use this scroll ID with the scroll API to retrieve the next batch of + // search results for the request. + // This property is returned only if the `scroll` query parameter is specified + // in the request. + ScrollId_ *string `json:"_scroll_id,omitempty"` + // Shards_ A count of shards used for the request. Shards_ types.ShardStatistics `json:"_shards"` Suggest map[string][]types.Suggest `json:"suggest,omitempty"` TerminatedEarly *bool `json:"terminated_early,omitempty"` - TimedOut bool `json:"timed_out"` - Took int64 `json:"took"` + // TimedOut If `true`, the request timed out before completion; returned results may be + // partial or empty. + TimedOut bool `json:"timed_out"` + // Took The number of milliseconds it took Elasticsearch to run the request. + // This value is calculated by measuring the time elapsed between receipt of a + // request on the coordinating node and the time at which the coordinating node + // is ready to send the response. + // It includes: + // + // * Communication time between the coordinating node and data nodes + // * Time the request spends in the search thread pool, queued for execution + // * Actual run time + // + // It does not include: + // + // * Time needed to send the request to Elasticsearch + // * Time needed to serialize the JSON response + // * Time needed to send the response to a client + Took int64 `json:"took"` } // NewResponse returns a Response diff --git a/typedapi/core/search/search.go b/typedapi/core/search/search.go index 00be841c9a..242c8f31ec 100644 --- a/typedapi/core/search/search.go +++ b/typedapi/core/search/search.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Run a search. // @@ -24,6 +24,32 @@ // You can provide search queries using the `q` query string parameter or the // request body. // If both are specified, only the query parameter is used. +// +// If the Elasticsearch security features are enabled, you must have the read +// index privilege for the target data stream, index, or alias. For +// cross-cluster search, refer to the documentation about configuring CCS +// privileges. +// To search a point in time (PIT) for an alias, you must have the `read` index +// privilege for the alias's data streams or indices. +// +// **Search slicing** +// +// When paging through a large number of documents, it can be helpful to split +// the search into multiple slices to consume them independently with the +// `slice` and `pit` properties. +// By default the splitting is done first on the shards, then locally on each +// shard. +// The local splitting partitions the shard into contiguous ranges based on +// Lucene document IDs. +// +// For instance if the number of shards is equal to 2 and you request 4 slices, +// the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are +// assigned to the second shard. +// +// IMPORTANT: The same point-in-time ID should be used for all slices. +// If different PIT IDs are used, slices can overlap and miss documents. +// This situation can occur because the splitting criterion is based on Lucene +// document IDs, which are not stable across changes to the index. package search import ( @@ -95,7 +121,33 @@ func NewSearchFunc(tp elastictransport.Interface) NewSearch { // request body. // If both are specified, only the query parameter is used. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html +// If the Elasticsearch security features are enabled, you must have the read +// index privilege for the target data stream, index, or alias. For +// cross-cluster search, refer to the documentation about configuring CCS +// privileges. +// To search a point in time (PIT) for an alias, you must have the `read` index +// privilege for the alias's data streams or indices. +// +// **Search slicing** +// +// When paging through a large number of documents, it can be helpful to split +// the search into multiple slices to consume them independently with the +// `slice` and `pit` properties. +// By default the splitting is done first on the shards, then locally on each +// shard. +// The local splitting partitions the shard into contiguous ranges based on +// Lucene document IDs. +// +// For instance if the number of shards is equal to 2 and you request 4 slices, +// the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are +// assigned to the second shard. +// +// IMPORTANT: The same point-in-time ID should be used for all slices. +// If different PIT IDs are used, slices can overlap and miss documents. +// This situation can occur because the splitting criterion is based on Lucene +// document IDs, which are not stable across changes to the index. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search func New(tp elastictransport.Interface) *Search { r := &Search{ transport: tp, @@ -103,8 +155,6 @@ func New(tp elastictransport.Interface) *Search { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -323,8 +373,8 @@ func (r *Search) Header(key, value string) *Search { return r } -// Index Comma-separated list of data streams, indices, and aliases to search. -// Supports wildcards (`*`). +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). // To search all data streams and indices, omit this parameter or use `*` or // `_all`. // API Name: index @@ -347,8 +397,12 @@ func (r *Search) AllowNoIndices(allownoindices bool) *Search { return r } -// AllowPartialSearchResults If true, returns partial results if there are shard request timeouts or shard -// failures. If false, returns an error with no partial results. +// AllowPartialSearchResults If `true` and there are shard request timeouts or shard failures, the request +// returns partial results. +// If `false`, it returns an error with no partial results. +// +// To override the default behavior, you can set the +// `search.default_allow_partial_results` cluster setting to `false`. // API name: allow_partial_search_results func (r *Search) AllowPartialSearchResults(allowpartialsearchresults bool) *Search { r.values.Set("allow_partial_search_results", strconv.FormatBool(allowpartialsearchresults)) @@ -356,8 +410,8 @@ func (r *Search) AllowPartialSearchResults(allowpartialsearchresults bool) *Sear return r } -// Analyzer Analyzer to use for the query string. -// This parameter can only be used when the q query string parameter is +// Analyzer The analyzer to use for the query string. +// This parameter can be used only when the `q` query string parameter is // specified. // API name: analyzer func (r *Search) Analyzer(analyzer string) *Search { @@ -366,8 +420,8 @@ func (r *Search) Analyzer(analyzer string) *Search { return r } -// AnalyzeWildcard If true, wildcard and prefix queries are analyzed. -// This parameter can only be used when the q query string parameter is +// AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. +// This parameter can be used only when the `q` query string parameter is // specified. // API name: analyze_wildcard func (r *Search) AnalyzeWildcard(analyzewildcard bool) *Search { @@ -378,9 +432,9 @@ func (r *Search) AnalyzeWildcard(analyzewildcard bool) *Search { // BatchedReduceSize The number of shard results that should be reduced at once on the // coordinating node. -// This value should be used as a protection mechanism to reduce the memory -// overhead per search request if the potential number of shards in the request -// can be large. +// If the potential number of shards in the request can be large, this value +// should be used as a protection mechanism to reduce the memory overhead per +// search request. // API name: batched_reduce_size func (r *Search) BatchedReduceSize(batchedreducesize string) *Search { r.values.Set("batched_reduce_size", batchedreducesize) @@ -388,8 +442,8 @@ func (r *Search) BatchedReduceSize(batchedreducesize string) *Search { return r } -// CcsMinimizeRoundtrips If true, network round-trips between the coordinating node and the remote -// clusters are minimized when executing cross-cluster search (CCS) requests. +// CcsMinimizeRoundtrips If `true`, network round-trips between the coordinating node and the remote +// clusters are minimized when running cross-cluster search (CCS) requests. // API name: ccs_minimize_roundtrips func (r *Search) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Search { r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) @@ -397,8 +451,8 @@ func (r *Search) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Search { return r } -// DefaultOperator The default operator for query string query: AND or OR. -// This parameter can only be used when the `q` query string parameter is +// DefaultOperator The default operator for the query string query: `AND` or `OR`. +// This parameter can be used only when the `q` query string parameter is // specified. // API name: default_operator func (r *Search) DefaultOperator(defaultoperator operator.Operator) *Search { @@ -407,8 +461,9 @@ func (r *Search) DefaultOperator(defaultoperator operator.Operator) *Search { return r } -// Df Field to use as default where no field prefix is given in the query string. -// This parameter can only be used when the q query string parameter is +// Df The field to use as a default when no field prefix is given in the query +// string. +// This parameter can be used only when the `q` query string parameter is // specified. // API name: df func (r *Search) Df(df string) *Search { @@ -417,10 +472,10 @@ func (r *Search) Df(df string) *Search { return r } -// ExpandWildcards Type of index that wildcard patterns can match. +// ExpandWildcards The type of index that wildcard patterns can match. // If the request can target data streams, this argument determines whether // wildcard expressions match hidden data streams. -// Supports comma-separated values, such as `open,hidden`. +// It supports comma-separated values such as `open,hidden`. // API name: expand_wildcards func (r *Search) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Search { tmp := []string{} @@ -449,10 +504,9 @@ func (r *Search) IgnoreUnavailable(ignoreunavailable bool) *Search { return r } -// IncludeNamedQueriesScore Indicates whether hit.matched_queries should be rendered as a map that -// includes -// the name of the matched query associated with its score (true) -// or as an array containing the name of the matched queries (false) +// IncludeNamedQueriesScore If `true`, the response includes the score contribution from any named +// queries. +// // This functionality reruns each named query on every hit in a search response. // Typically, this adds a small overhead to a request. // However, using computationally expensive named queries on a large number of @@ -466,7 +520,7 @@ func (r *Search) IncludeNamedQueriesScore(includenamedqueriesscore bool) *Search // Lenient If `true`, format-based query failures (such as providing text to a numeric // field) in the query string will be ignored. -// This parameter can only be used when the `q` query string parameter is +// This parameter can be used only when the `q` query string parameter is // specified. // API name: lenient func (r *Search) Lenient(lenient bool) *Search { @@ -475,7 +529,7 @@ func (r *Search) Lenient(lenient bool) *Search { return r } -// MaxConcurrentShardRequests Defines the number of concurrent shard requests per node this search executes +// MaxConcurrentShardRequests The number of concurrent shard requests per node that the search runs // concurrently. // This value should be used to limit the impact of the search on the cluster in // order to limit the number of concurrent shard requests. @@ -486,30 +540,24 @@ func (r *Search) MaxConcurrentShardRequests(maxconcurrentshardrequests string) * return r } -// MinCompatibleShardNode The minimum version of the node that can handle the request -// Any handling node with a lower version will fail the request. -// API name: min_compatible_shard_node -func (r *Search) MinCompatibleShardNode(versionstring string) *Search { - r.values.Set("min_compatible_shard_node", versionstring) - - return r -} - -// Preference Nodes and shards used for the search. +// Preference The nodes and shards used for the search. // By default, Elasticsearch selects from eligible nodes and shards using -// adaptive replica selection, accounting for allocation awareness. Valid values -// are: -// `_only_local` to run the search only on shards on the local node; -// `_local` to, if possible, run the search on shards on the local node, or if -// not, select shards using the default method; -// `_only_nodes:,` to run the search on only the specified -// nodes IDs, where, if suitable shards exist on more than one selected node, -// use shards on those nodes using the default method, or if none of the -// specified nodes are available, select shards from any available node using -// the default method; -// `_prefer_nodes:,` to if possible, run the search on the -// specified nodes IDs, or if not, select shards using the default method; -// `_shards:,` to run the search only on the specified shards; +// adaptive replica selection, accounting for allocation awareness. +// Valid values are: +// +// * `_only_local` to run the search only on shards on the local node. +// * `_local` to, if possible, run the search on shards on the local node, or if +// not, select shards using the default method. +// * `_only_nodes:,` to run the search on only the specified +// nodes IDs. If suitable shards exist on more than one selected node, use +// shards on those nodes using the default method. If none of the specified +// nodes are available, select shards from any available node using the default +// method. +// * `_prefer_nodes:,` to if possible, run the search on the +// specified nodes IDs. If not, select shards using the default method. +// `_shards:,` to run the search only on the specified shards. You +// can combine this value with other `preference` values. However, the `_shards` +// value must come first. For example: `_shards:2,3|_local`. // `` (any string that does not start with `_`) to route searches // with the same `` to the same shards in the same order. // API name: preference @@ -519,18 +567,19 @@ func (r *Search) Preference(preference string) *Search { return r } -// PreFilterShardSize Defines a threshold that enforces a pre-filter roundtrip to prefilter search -// shards based on query rewriting if the number of shards the search request -// expands to exceeds the threshold. +// PreFilterShardSize A threshold that enforces a pre-filter roundtrip to prefilter search shards +// based on query rewriting if the number of shards the search request expands +// to exceeds the threshold. // This filter roundtrip can limit the number of shards significantly if for // instance a shard can not match any documents based on its rewrite method (if // date filters are mandatory to match but the shard bounds and the query are // disjoint). // When unspecified, the pre-filter phase is executed if any of these conditions // is met: -// the request targets more than 128 shards; -// the request targets one or more read-only index; -// the primary sort of the query targets an indexed field. +// +// * The request targets more than 128 shards. +// * The request targets one or more read-only index. +// * The primary sort of the query targets an indexed field. // API name: pre_filter_shard_size func (r *Search) PreFilterShardSize(prefiltershardsize string) *Search { r.values.Set("pre_filter_shard_size", prefiltershardsize) @@ -540,7 +589,7 @@ func (r *Search) PreFilterShardSize(prefiltershardsize string) *Search { // RequestCache If `true`, the caching of search results is enabled for requests where `size` // is `0`. -// Defaults to index level settings. +// It defaults to index level settings. // API name: request_cache func (r *Search) RequestCache(requestcache bool) *Search { r.values.Set("request_cache", strconv.FormatBool(requestcache)) @@ -548,7 +597,7 @@ func (r *Search) RequestCache(requestcache bool) *Search { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value that is used to route operations to a specific shard. // API name: routing func (r *Search) Routing(routing string) *Search { r.values.Set("routing", routing) @@ -556,9 +605,9 @@ func (r *Search) Routing(routing string) *Search { return r } -// Scroll Period to retain the search context for scrolling. See Scroll search results. +// Scroll The period to retain the search context for scrolling. // By default, this value cannot exceed `1d` (24 hours). -// You can change this limit using the `search.max_keep_alive` cluster-level +// You can change this limit by using the `search.max_keep_alive` cluster-level // setting. // API name: scroll func (r *Search) Scroll(duration string) *Search { @@ -567,7 +616,8 @@ func (r *Search) Scroll(duration string) *Search { return r } -// SearchType How distributed term frequencies are calculated for relevance scoring. +// SearchType Indicates how distributed term frequencies are calculated for relevance +// scoring. // API name: search_type func (r *Search) SearchType(searchtype searchtype.SearchType) *Search { r.values.Set("search_type", searchtype.String()) @@ -575,7 +625,7 @@ func (r *Search) SearchType(searchtype searchtype.SearchType) *Search { return r } -// SuggestField Specifies which field to use for suggestions. +// SuggestField The field to use for suggestions. // API name: suggest_field func (r *Search) SuggestField(field string) *Search { r.values.Set("suggest_field", field) @@ -583,8 +633,8 @@ func (r *Search) SuggestField(field string) *Search { return r } -// SuggestMode Specifies the suggest mode. -// This parameter can only be used when the `suggest_field` and `suggest_text` +// SuggestMode The suggest mode. +// This parameter can be used only when the `suggest_field` and `suggest_text` // query string parameters are specified. // API name: suggest_mode func (r *Search) SuggestMode(suggestmode suggestmode.SuggestMode) *Search { @@ -593,8 +643,8 @@ func (r *Search) SuggestMode(suggestmode suggestmode.SuggestMode) *Search { return r } -// SuggestSize Number of suggestions to return. -// This parameter can only be used when the `suggest_field` and `suggest_text` +// SuggestSize The number of suggestions to return. +// This parameter can be used only when the `suggest_field` and `suggest_text` // query string parameters are specified. // API name: suggest_size func (r *Search) SuggestSize(suggestsize string) *Search { @@ -604,7 +654,7 @@ func (r *Search) SuggestSize(suggestsize string) *Search { } // SuggestText The source text for which the suggestions should be returned. -// This parameter can only be used when the `suggest_field` and `suggest_text` +// This parameter can be used only when the `suggest_field` and `suggest_text` // query string parameters are specified. // API name: suggest_text func (r *Search) SuggestText(suggesttext string) *Search { @@ -654,9 +704,13 @@ func (r *Search) SourceIncludes_(fields ...string) *Search { return r } -// Q Query in the Lucene query string syntax using query parameter search. +// Q A query in the Lucene query string syntax. // Query parameter searches do not support the full Elasticsearch Query DSL but // are handy for testing. +// +// IMPORTANT: This parameter overrides the query parameter in the request body. +// If both parameters are specified, documents matching the query request body +// parameter are not returned. // API name: q func (r *Search) Q(q string) *Search { r.values.Set("q", q) @@ -720,340 +774,568 @@ func (r *Search) Pretty(pretty bool) *Search { return r } -// Aggregations Defines the aggregations that are run as part of the search request. +// Defines the aggregations that are run as part of the search request. // API name: aggregations func (r *Search) Aggregations(aggregations map[string]types.Aggregations) *Search { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aggregations = aggregations + return r +} + +func (r *Search) AddAggregation(key string, value types.AggregationsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + tmp[key] = *value.AggregationsCaster() + + r.req.Aggregations = tmp return r } -// Collapse Collapses search results the values of the specified field. +// Collapses search results the values of the specified field. // API name: collapse -func (r *Search) Collapse(collapse *types.FieldCollapse) *Search { +func (r *Search) Collapse(collapse types.FieldCollapseVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Collapse = collapse + r.req.Collapse = collapse.FieldCollapseCaster() return r } -// DocvalueFields Array of wildcard (`*`) patterns. +// An array of wildcard (`*`) field patterns. // The request returns doc values for field names matching these patterns in the // `hits.fields` property of the response. // API name: docvalue_fields -func (r *Search) DocvalueFields(docvaluefields ...types.FieldAndFormat) *Search { - r.req.DocvalueFields = docvaluefields +func (r *Search) DocvalueFields(docvaluefields ...types.FieldAndFormatVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docvaluefields { + + r.req.DocvalueFields = append(r.req.DocvalueFields, *v.FieldAndFormatCaster()) + } return r } -// Explain If true, returns detailed information about score computation as part of a -// hit. +// If `true`, the request returns detailed information about score computation +// as part of a hit. // API name: explain func (r *Search) Explain(explain bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Explain = &explain return r } -// Ext Configuration of search extensions defined by Elasticsearch plugins. +// Configuration of search extensions defined by Elasticsearch plugins. // API name: ext func (r *Search) Ext(ext map[string]json.RawMessage) *Search { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Ext = ext + return r +} +func (r *Search) AddExt(key string, value json.RawMessage) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Ext == nil { + r.req.Ext = make(map[string]json.RawMessage) + } else { + tmp = r.req.Ext + } + + tmp[key] = value + + r.req.Ext = tmp return r } -// Fields Array of wildcard (`*`) patterns. +// An array of wildcard (`*`) field patterns. // The request returns values for field names matching these patterns in the // `hits.fields` property of the response. // API name: fields -func (r *Search) Fields(fields ...types.FieldAndFormat) *Search { - r.req.Fields = fields +func (r *Search) Fields(fields ...types.FieldAndFormatVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range fields { + + r.req.Fields = append(r.req.Fields, *v.FieldAndFormatCaster()) + } return r } -// From Starting document offset. -// Needs to be non-negative. +// The starting document offset, which must be non-negative. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. // To page through more hits, use the `search_after` parameter. // API name: from func (r *Search) From(from int) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.From = &from return r } -// Highlight Specifies the highlighter to use for retrieving highlighted snippets from one +// Specifies the highlighter to use for retrieving highlighted snippets from one // or more fields in your search results. // API name: highlight -func (r *Search) Highlight(highlight *types.Highlight) *Search { +func (r *Search) Highlight(highlight types.HighlightVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Highlight = highlight + r.req.Highlight = highlight.HighlightCaster() return r } -// IndicesBoost Boosts the _score of documents from specified indices. +// Boost the `_score` of documents from specified indices. +// The boost value is the factor by which scores are multiplied. +// A boost value greater than `1.0` increases the score. +// A boost value between `0` and `1.0` decreases the score. // API name: indices_boost -func (r *Search) IndicesBoost(indicesboosts ...map[string]types.Float64) *Search { - r.req.IndicesBoost = indicesboosts +func (r *Search) IndicesBoost(indicesboost []map[string]types.Float64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndicesBoost = indicesboost return r } -// Knn Defines the approximate kNN search to run. +// The approximate kNN search to run. // API name: knn -func (r *Search) Knn(knns ...types.KnnSearch) *Search { - r.req.Knn = knns +func (r *Search) Knn(knns ...types.KnnSearchVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Knn = make([]types.KnnSearch, len(knns)) + for i, v := range knns { + r.req.Knn[i] = *v.KnnSearchCaster() + } return r } -// MinScore Minimum `_score` for matching documents. +// The minimum `_score` for matching documents. // Documents with a lower `_score` are not included in the search results. // API name: min_score func (r *Search) MinScore(minscore types.Float64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MinScore = &minscore return r } -// Pit Limits the search to a point in time (PIT). +// Limit the search to a point in time (PIT). // If you provide a PIT, you cannot specify an `` in the request path. // API name: pit -func (r *Search) Pit(pit *types.PointInTimeReference) *Search { +func (r *Search) Pit(pit types.PointInTimeReferenceVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Pit = pit + r.req.Pit = pit.PointInTimeReferenceCaster() return r } -// PostFilter Use the `post_filter` parameter to filter search results. +// Use the `post_filter` parameter to filter search results. // The search hits are filtered after the aggregations are calculated. // A post filter has no impact on the aggregation results. // API name: post_filter -func (r *Search) PostFilter(postfilter *types.Query) *Search { +func (r *Search) PostFilter(postfilter types.QueryVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.PostFilter = postfilter + r.req.PostFilter = postfilter.QueryCaster() return r } -// Profile Set to `true` to return detailed timing information about the execution of +// Set to `true` to return detailed timing information about the execution of // individual components in a search request. // NOTE: This is a debugging tool and adds significant overhead to search // execution. // API name: profile func (r *Search) Profile(profile bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Profile = &profile return r } -// Query Defines the search definition using the Query DSL. +// The search definition using the Query DSL. // API name: query -func (r *Search) Query(query *types.Query) *Search { +func (r *Search) Query(query types.QueryVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// Rank Defines the Reciprocal Rank Fusion (RRF) to use. +// The Reciprocal Rank Fusion (RRF) to use. // API name: rank -func (r *Search) Rank(rank *types.RankContainer) *Search { +func (r *Search) Rank(rank types.RankContainerVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Rank = rank + r.req.Rank = rank.RankContainerCaster() return r } -// Rescore Can be used to improve precision by reordering just the top (for example 100 +// Can be used to improve precision by reordering just the top (for example 100 // - 500) documents returned by the `query` and `post_filter` phases. // API name: rescore -func (r *Search) Rescore(rescores ...types.Rescore) *Search { - r.req.Rescore = rescores +func (r *Search) Rescore(rescores ...types.RescoreVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Rescore = make([]types.Rescore, len(rescores)) + for i, v := range rescores { + r.req.Rescore[i] = *v.RescoreCaster() + } return r } -// Retriever A retriever is a specification to describe top documents returned from a -// search. A retriever replaces other elements of the search API that also -// return top documents such as query and knn. +// A retriever is a specification to describe top documents returned from a +// search. +// A retriever replaces other elements of the search API that also return top +// documents such as `query` and `knn`. // API name: retriever -func (r *Search) Retriever(retriever *types.RetrieverContainer) *Search { +func (r *Search) Retriever(retriever types.RetrieverContainerVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Retriever = retriever + r.req.Retriever = retriever.RetrieverContainerCaster() return r } -// RuntimeMappings Defines one or more runtime fields in the search request. +// One or more runtime fields in the search request. // These fields take precedence over mapped fields with the same name. // API name: runtime_mappings -func (r *Search) RuntimeMappings(runtimefields types.RuntimeFields) *Search { - r.req.RuntimeMappings = runtimefields +func (r *Search) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } -// ScriptFields Retrieve a script evaluation (based on different fields) for each hit. +// Retrieve a script evaluation (based on different fields) for each hit. // API name: script_fields func (r *Search) ScriptFields(scriptfields map[string]types.ScriptField) *Search { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ScriptFields = scriptfields + return r +} +func (r *Search) AddScriptField(key string, value types.ScriptFieldVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ScriptField + if r.req.ScriptFields == nil { + r.req.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = r.req.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + + r.req.ScriptFields = tmp return r } -// SearchAfter Used to retrieve the next page of hits using a set of sort values from the +// Used to retrieve the next page of hits using a set of sort values from the // previous page. // API name: search_after -func (r *Search) SearchAfter(sortresults ...types.FieldValue) *Search { - r.req.SearchAfter = sortresults +func (r *Search) SearchAfter(sortresults ...types.FieldValueVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } return r } -// SeqNoPrimaryTerm If `true`, returns sequence number and primary term of the last modification -// of each hit. +// If `true`, the request returns sequence number and primary term of the last +// modification of each hit. // API name: seq_no_primary_term func (r *Search) SeqNoPrimaryTerm(seqnoprimaryterm bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.SeqNoPrimaryTerm = &seqnoprimaryterm return r } -// Size The number of hits to return. +// The number of hits to return, which must not be negative. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. -// To page through more hits, use the `search_after` parameter. +// To page through more hits, use the `search_after` property. // API name: size func (r *Search) Size(size int) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } -// Slice Can be used to split a scrolled search into multiple slices that can be -// consumed independently. +// Split a scrolled search into multiple slices that can be consumed +// independently. // API name: slice -func (r *Search) Slice(slice *types.SlicedScroll) *Search { +func (r *Search) Slice(slice types.SlicedScrollVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Slice = slice + r.req.Slice = slice.SlicedScrollCaster() return r } -// Sort A comma-separated list of : pairs. +// A comma-separated list of : pairs. // API name: sort -func (r *Search) Sort(sorts ...types.SortCombinations) *Search { - r.req.Sort = sorts +func (r *Search) Sort(sorts ...types.SortCombinationsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } return r } -// Source_ Indicates which source fields are returned for matching documents. -// These fields are returned in the hits._source property of the search +// The source fields that are returned for matching documents. +// These fields are returned in the `hits._source` property of the search // response. +// If the `stored_fields` property is specified, the `_source` property defaults +// to `false`. +// Otherwise, it defaults to `true`. // API name: _source -func (r *Search) Source_(sourceconfig types.SourceConfig) *Search { - r.req.Source_ = sourceconfig +func (r *Search) Source_(sourceconfig types.SourceConfigVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source_ = *sourceconfig.SourceConfigCaster() return r } -// Stats Stats groups to associate with the search. +// The stats groups to associate with the search. // Each group maintains a statistics aggregation for its associated searches. // You can retrieve these stats using the indices stats API. // API name: stats func (r *Search) Stats(stats ...string) *Search { - r.req.Stats = stats + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range stats { + + r.req.Stats = append(r.req.Stats, v) + } return r } -// StoredFields List of stored fields to return as part of a hit. +// A comma-separated list of stored fields to return as part of a hit. // If no fields are specified, no stored fields are included in the response. -// If this field is specified, the `_source` parameter defaults to `false`. +// If this field is specified, the `_source` property defaults to `false`. // You can pass `_source: true` to return both source fields and stored fields // in the search response. // API name: stored_fields func (r *Search) StoredFields(fields ...string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.StoredFields = fields return r } -// Suggest Defines a suggester that provides similar looking terms based on a provided +// Defines a suggester that provides similar looking terms based on a provided // text. // API name: suggest -func (r *Search) Suggest(suggest *types.Suggester) *Search { +func (r *Search) Suggest(suggest types.SuggesterVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Suggest = suggest + r.req.Suggest = suggest.SuggesterCaster() return r } -// TerminateAfter Maximum number of documents to collect for each shard. +// The maximum number of documents to collect for each shard. // If a query reaches this limit, Elasticsearch terminates the query early. // Elasticsearch collects documents before sorting. -// Use with caution. -// Elasticsearch applies this parameter to each shard handling the request. +// +// IMPORTANT: Use with caution. +// Elasticsearch applies this property to each shard handling the request. // When possible, let Elasticsearch perform early termination automatically. -// Avoid specifying this parameter for requests that target data streams with +// Avoid specifying this property for requests that target data streams with // backing indices across multiple data tiers. +// // If set to `0` (default), the query does not terminate early. // API name: terminate_after func (r *Search) TerminateAfter(terminateafter int64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.TerminateAfter = &terminateafter return r } -// Timeout Specifies the period of time to wait for a response from each shard. +// The period of time to wait for a response from each shard. // If no response is received before the timeout expires, the request fails and // returns an error. // Defaults to no timeout. // API name: timeout func (r *Search) Timeout(timeout string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Timeout = &timeout return r } -// TrackScores If true, calculate and return document scores, even if the scores are not +// If `true`, calculate and return document scores, even if the scores are not // used for sorting. // API name: track_scores func (r *Search) TrackScores(trackscores bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TrackScores = &trackscores return r } -// TrackTotalHits Number of hits matching the query to count accurately. +// Number of hits matching the query to count accurately. // If `true`, the exact number of hits is returned at the cost of some // performance. // If `false`, the response does not include the total number of hits matching // the query. // API name: track_total_hits -func (r *Search) TrackTotalHits(trackhits types.TrackHits) *Search { - r.req.TrackTotalHits = trackhits +func (r *Search) TrackTotalHits(trackhits types.TrackHitsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TrackTotalHits = *trackhits.TrackHitsCaster() return r } -// Version If true, returns document version as part of a hit. +// If `true`, the request returns the document version as part of a hit. // API name: version func (r *Search) Version(version bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &version return r diff --git a/typedapi/core/searchmvt/request.go b/typedapi/core/searchmvt/request.go index 8698246c47..d4a7780414 100644 --- a/typedapi/core/searchmvt/request.go +++ b/typedapi/core/searchmvt/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package searchmvt @@ -35,65 +35,80 @@ import ( // Request holds the request body struct for the package searchmvt // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search_mvt/SearchMvtRequest.ts#L33-L193 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search_mvt/SearchMvtRequest.ts#L33-L373 type Request struct { // Aggs Sub-aggregations for the geotile_grid. // - // Supports the following aggregation types: - // - avg - // - cardinality - // - max - // - min - // - sum + // It supports the following aggregation types: + // + // - `avg` + // - `boxplot` + // - `cardinality` + // - `extended stats` + // - `max` + // - `median absolute deviation` + // - `min` + // - `percentile` + // - `percentile-rank` + // - `stats` + // - `sum` + // - `value count` + // + // The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is + // reserved for internal aggregations. Aggs map[string]types.Aggregations `json:"aggs,omitempty"` - // Buffer Size, in pixels, of a clipping buffer outside the tile. This allows renderers + // Buffer The size, in pixels, of a clipping buffer outside the tile. This allows + // renderers // to avoid outline artifacts from geometries that extend past the extent of the // tile. Buffer *int `json:"buffer,omitempty"` - // ExactBounds If false, the meta layer’s feature is the bounding box of the tile. - // If true, the meta layer’s feature is a bounding box resulting from a - // geo_bounds aggregation. The aggregation runs on values that intersect - // the // tile with wrap_longitude set to false. The resulting + // ExactBounds If `false`, the meta layer's feature is the bounding box of the tile. + // If `true`, the meta layer's feature is a bounding box resulting from a + // `geo_bounds` aggregation. The aggregation runs on values that + // intersect + // the `//` tile with `wrap_longitude` set to `false`. The resulting // bounding box may be larger than the vector tile. ExactBounds *bool `json:"exact_bounds,omitempty"` - // Extent Size, in pixels, of a side of the tile. Vector tiles are square with equal - // sides. + // Extent The size, in pixels, of a side of the tile. Vector tiles are square with + // equal sides. Extent *int `json:"extent,omitempty"` - // Fields Fields to return in the `hits` layer. Supports wildcards (`*`). + // Fields The fields to return in the `hits` layer. + // It supports wildcards (`*`). // This parameter does not support fields with array values. Fields with array // values may return inconsistent results. Fields []string `json:"fields,omitempty"` - // GridAgg Aggregation used to create a grid for the `field`. + // GridAgg The aggregation used to create a grid for the `field`. GridAgg *gridaggregationtype.GridAggregationType `json:"grid_agg,omitempty"` // GridPrecision Additional zoom levels available through the aggs layer. For example, if - // is 7 - // and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, - // results - // don’t include the aggs layer. + // `` is `7` + // and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If + // 0, results + // don't include the aggs layer. GridPrecision *int `json:"grid_precision,omitempty"` // GridType Determines the geometry type for features in the aggs layer. In the aggs // layer, - // each feature represents a geotile_grid cell. If 'grid' each feature is a - // Polygon - // of the cells bounding box. If 'point' each feature is a Point that is the + // each feature represents a `geotile_grid` cell. If `grid, each feature is a + // polygon + // of the cells bounding box. If `point`, each feature is a Point that is the // centroid // of the cell. GridType *gridtype.GridType `json:"grid_type,omitempty"` - // Query Query DSL used to filter documents for the search. + // Query The query DSL used to filter documents for the search. Query *types.Query `json:"query,omitempty"` // RuntimeMappings Defines one or more runtime fields in the search request. These fields take // precedence over mapped fields with the same name. RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` - // Size Maximum number of features to return in the hits layer. Accepts 0-10000. - // If 0, results don’t include the hits layer. + // Size The maximum number of features to return in the hits layer. Accepts 0-10000. + // If 0, results don't include the hits layer. Size *int `json:"size,omitempty"` - // Sort Sorts features in the hits layer. By default, the API calculates a bounding - // box for each feature. It sorts features based on this box’s diagonal length, + // Sort Sort the features in the hits layer. By default, the API calculates a + // bounding + // box for each feature. It sorts features based on this box's diagonal length, // from longest to shortest. Sort []types.SortCombinations `json:"sort,omitempty"` - // TrackTotalHits Number of hits matching the query to count accurately. If `true`, the exact - // number + // TrackTotalHits The number of hits matching the query to count accurately. If `true`, the + // exact number // of hits is returned at the cost of some performance. If `false`, the response // does // not include the total number of hits matching the query. @@ -101,6 +116,20 @@ type Request struct { // WithLabels If `true`, the hits and aggs layers will contain additional point features // representing // suggested label positions for the original features. + // + // * `Point` and `MultiPoint` features will have one of the points selected. + // * `Polygon` and `MultiPolygon` features will have a single point generated, + // either the centroid, if it is within the polygon, or another point within the + // polygon selected from the sorted triangle-tree. + // * `LineString` features will likewise provide a roughly central point + // selected from the triangle-tree. + // * The aggregation results will provide one central point for each aggregation + // bucket. + // + // All attributes from the original features will also be copied to the new + // label features. + // In addition, the new features will be distinguishable using the tag + // `_mvt_label_position`. WithLabels *bool `json:"with_labels,omitempty"` } diff --git a/typedapi/core/searchmvt/response.go b/typedapi/core/searchmvt/response.go index 7632b06065..ba8cdd9308 100644 --- a/typedapi/core/searchmvt/response.go +++ b/typedapi/core/searchmvt/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package searchmvt // Response holds the response body struct for the package searchmvt // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search_mvt/SearchMvtResponse.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search_mvt/SearchMvtResponse.ts#L22-L25 type Response = []byte diff --git a/typedapi/core/searchmvt/search_mvt.go b/typedapi/core/searchmvt/search_mvt.go index 905626cd5e..fe2fef914c 100644 --- a/typedapi/core/searchmvt/search_mvt.go +++ b/typedapi/core/searchmvt/search_mvt.go @@ -16,11 +16,177 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Search a vector tile. // // Search a vector tile for geospatial values. +// Before using this API, you should be familiar with the Mapbox vector tile +// specification. +// The API returns results as a binary mapbox vector tile. +// +// Internally, Elasticsearch translates a vector tile search API request into a +// search containing: +// +// * A `geo_bounding_box` query on the ``. The query uses the +// `//` tile as a bounding box. +// * A `geotile_grid` or `geohex_grid` aggregation on the ``. The +// `grid_agg` parameter determines the aggregation type. The aggregation uses +// the `//` tile as a bounding box. +// * Optionally, a `geo_bounds` aggregation on the ``. The search only +// includes this aggregation if the `exact_bounds` parameter is `true`. +// * If the optional parameter `with_labels` is `true`, the internal search will +// include a dynamic runtime field that calls the `getLabelPosition` function of +// the geometry doc value. This enables the generation of new point features +// containing suggested geometry labels, so that, for example, multi-polygons +// will have only one label. +// +// For example, Elasticsearch may translate a vector tile search API request +// with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of +// `true` into the following search +// +// ``` +// GET my-index/_search +// +// { +// "size": 10000, +// "query": { +// "geo_bounding_box": { +// "my-geo-field": { +// "top_left": { +// "lat": -40.979898069620134, +// "lon": -45 +// }, +// "bottom_right": { +// "lat": -66.51326044311186, +// "lon": 0 +// } +// } +// } +// }, +// "aggregations": { +// "grid": { +// "geotile_grid": { +// "field": "my-geo-field", +// "precision": 11, +// "size": 65536, +// "bounds": { +// "top_left": { +// "lat": -40.979898069620134, +// "lon": -45 +// }, +// "bottom_right": { +// "lat": -66.51326044311186, +// "lon": 0 +// } +// } +// } +// }, +// "bounds": { +// "geo_bounds": { +// "field": "my-geo-field", +// "wrap_longitude": false +// } +// } +// } +// } +// +// ``` +// +// The API returns results as a binary Mapbox vector tile. +// Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the +// tile contains three layers: +// +// * A `hits` layer containing a feature for each `` value matching the +// `geo_bounding_box` query. +// * An `aggs` layer containing a feature for each cell of the `geotile_grid` or +// `geohex_grid`. The layer only contains features for cells with matching data. +// * A meta layer containing: +// - A feature containing a bounding box. By default, this is the bounding box +// +// of the tile. +// - Value ranges for any sub-aggregations on the `geotile_grid` or +// +// `geohex_grid`. +// - Metadata for the search. +// +// The API only returns features that can display at its zoom level. +// For example, if a polygon feature has no area at its zoom level, the API +// omits it. +// The API returns errors as UTF-8 encoded JSON. +// +// IMPORTANT: You can specify several options for this API as either a query +// parameter or request body parameter. +// If you specify both parameters, the query parameter takes precedence. +// +// **Grid precision for geotile** +// +// For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles +// for lower zoom levels. +// `grid_precision` represents the additional zoom levels available through +// these cells. The final precision is computed by as follows: ` + +// grid_precision`. +// For example, if `` is 7 and `grid_precision` is 8, then the +// `geotile_grid` aggregation will use a precision of 15. +// The maximum final precision is 29. +// The `grid_precision` also determines the number of cells for the grid as +// follows: `(2^grid_precision) x (2^grid_precision)`. +// For example, a value of 8 divides the tile into a grid of 256 x 256 cells. +// The `aggs` layer only contains features for cells with matching data. +// +// **Grid precision for geohex** +// +// For a `grid_agg` of `geohex`, Elasticsearch uses `` and +// `grid_precision` to calculate a final precision as follows: ` + +// grid_precision`. +// +// This precision determines the H3 resolution of the hexagonal cells produced +// by the `geohex` aggregation. +// The following table maps the H3 resolution for each precision. +// For example, if `` is 3 and `grid_precision` is 3, the precision is 6. +// At a precision of 6, hexagonal cells have an H3 resolution of 2. +// If `` is 3 and `grid_precision` is 4, the precision is 7. +// At a precision of 7, hexagonal cells have an H3 resolution of 3. +// +// | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | +// | --------- | ---------------- | ------------- | ----------------| ----- | +// | 1 | 4 | 0 | 122 | 30.5 | +// | 2 | 16 | 0 | 122 | 7.625 | +// | 3 | 64 | 1 | 842 | 13.15625 | +// | 4 | 256 | 1 | 842 | 3.2890625 | +// | 5 | 1024 | 2 | 5882 | 5.744140625 | +// | 6 | 4096 | 2 | 5882 | 1.436035156 | +// | 7 | 16384 | 3 | 41162 | 2.512329102 | +// | 8 | 65536 | 3 | 41162 | 0.6280822754 | +// | 9 | 262144 | 4 | 288122 | 1.099098206 | +// | 10 | 1048576 | 4 | 288122 | 0.2747745514 | +// | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | +// | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | +// | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | +// | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | +// | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | +// | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | +// | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | +// | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | +// | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | +// | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | +// | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | +// | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | +// | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | +// | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | +// | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | +// | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | +// | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | +// | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | +// | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | +// +// Hexagonal cells don't align perfectly on a vector tile. +// Some cells may intersect more than one vector tile. +// To compute the H3 resolution for each precision, Elasticsearch compares the +// average density of hexagonal bins at each resolution with the average density +// of tile bins at each zoom level. +// Elasticsearch uses the H3 resolution that is closest to the corresponding +// geotile density. package searchmvt import ( @@ -108,8 +274,174 @@ func NewSearchMvtFunc(tp elastictransport.Interface) NewSearchMvt { // Search a vector tile. // // Search a vector tile for geospatial values. +// Before using this API, you should be familiar with the Mapbox vector tile +// specification. +// The API returns results as a binary mapbox vector tile. +// +// Internally, Elasticsearch translates a vector tile search API request into a +// search containing: // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-vector-tile-api.html +// * A `geo_bounding_box` query on the ``. The query uses the +// `//` tile as a bounding box. +// * A `geotile_grid` or `geohex_grid` aggregation on the ``. The +// `grid_agg` parameter determines the aggregation type. The aggregation uses +// the `//` tile as a bounding box. +// * Optionally, a `geo_bounds` aggregation on the ``. The search only +// includes this aggregation if the `exact_bounds` parameter is `true`. +// * If the optional parameter `with_labels` is `true`, the internal search will +// include a dynamic runtime field that calls the `getLabelPosition` function of +// the geometry doc value. This enables the generation of new point features +// containing suggested geometry labels, so that, for example, multi-polygons +// will have only one label. +// +// For example, Elasticsearch may translate a vector tile search API request +// with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of +// `true` into the following search +// +// ``` +// GET my-index/_search +// +// { +// "size": 10000, +// "query": { +// "geo_bounding_box": { +// "my-geo-field": { +// "top_left": { +// "lat": -40.979898069620134, +// "lon": -45 +// }, +// "bottom_right": { +// "lat": -66.51326044311186, +// "lon": 0 +// } +// } +// } +// }, +// "aggregations": { +// "grid": { +// "geotile_grid": { +// "field": "my-geo-field", +// "precision": 11, +// "size": 65536, +// "bounds": { +// "top_left": { +// "lat": -40.979898069620134, +// "lon": -45 +// }, +// "bottom_right": { +// "lat": -66.51326044311186, +// "lon": 0 +// } +// } +// } +// }, +// "bounds": { +// "geo_bounds": { +// "field": "my-geo-field", +// "wrap_longitude": false +// } +// } +// } +// } +// +// ``` +// +// The API returns results as a binary Mapbox vector tile. +// Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the +// tile contains three layers: +// +// * A `hits` layer containing a feature for each `` value matching the +// `geo_bounding_box` query. +// * An `aggs` layer containing a feature for each cell of the `geotile_grid` or +// `geohex_grid`. The layer only contains features for cells with matching data. +// * A meta layer containing: +// - A feature containing a bounding box. By default, this is the bounding box +// +// of the tile. +// - Value ranges for any sub-aggregations on the `geotile_grid` or +// +// `geohex_grid`. +// - Metadata for the search. +// +// The API only returns features that can display at its zoom level. +// For example, if a polygon feature has no area at its zoom level, the API +// omits it. +// The API returns errors as UTF-8 encoded JSON. +// +// IMPORTANT: You can specify several options for this API as either a query +// parameter or request body parameter. +// If you specify both parameters, the query parameter takes precedence. +// +// **Grid precision for geotile** +// +// For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles +// for lower zoom levels. +// `grid_precision` represents the additional zoom levels available through +// these cells. The final precision is computed by as follows: ` + +// grid_precision`. +// For example, if `` is 7 and `grid_precision` is 8, then the +// `geotile_grid` aggregation will use a precision of 15. +// The maximum final precision is 29. +// The `grid_precision` also determines the number of cells for the grid as +// follows: `(2^grid_precision) x (2^grid_precision)`. +// For example, a value of 8 divides the tile into a grid of 256 x 256 cells. +// The `aggs` layer only contains features for cells with matching data. +// +// **Grid precision for geohex** +// +// For a `grid_agg` of `geohex`, Elasticsearch uses `` and +// `grid_precision` to calculate a final precision as follows: ` + +// grid_precision`. +// +// This precision determines the H3 resolution of the hexagonal cells produced +// by the `geohex` aggregation. +// The following table maps the H3 resolution for each precision. +// For example, if `` is 3 and `grid_precision` is 3, the precision is 6. +// At a precision of 6, hexagonal cells have an H3 resolution of 2. +// If `` is 3 and `grid_precision` is 4, the precision is 7. +// At a precision of 7, hexagonal cells have an H3 resolution of 3. +// +// | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | +// | --------- | ---------------- | ------------- | ----------------| ----- | +// | 1 | 4 | 0 | 122 | 30.5 | +// | 2 | 16 | 0 | 122 | 7.625 | +// | 3 | 64 | 1 | 842 | 13.15625 | +// | 4 | 256 | 1 | 842 | 3.2890625 | +// | 5 | 1024 | 2 | 5882 | 5.744140625 | +// | 6 | 4096 | 2 | 5882 | 1.436035156 | +// | 7 | 16384 | 3 | 41162 | 2.512329102 | +// | 8 | 65536 | 3 | 41162 | 0.6280822754 | +// | 9 | 262144 | 4 | 288122 | 1.099098206 | +// | 10 | 1048576 | 4 | 288122 | 0.2747745514 | +// | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | +// | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | +// | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | +// | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | +// | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | +// | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | +// | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | +// | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | +// | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | +// | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | +// | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | +// | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | +// | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | +// | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | +// | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | +// | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | +// | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | +// | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | +// | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | +// +// Hexagonal cells don't align perfectly on a vector tile. +// Some cells may intersect more than one vector tile. +// To compute the H3 resolution for each precision, Elasticsearch compares the +// average density of hexagonal bins at each resolution with the average density +// of tile bins at each zoom level. +// Elasticsearch uses the H3 resolution that is closest to the corresponding +// geotile density. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt func New(tp elastictransport.Interface) *SearchMvt { r := &SearchMvt{ transport: tp, @@ -117,8 +449,6 @@ func New(tp elastictransport.Interface) *SearchMvt { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -443,151 +773,263 @@ func (r *SearchMvt) Pretty(pretty bool) *SearchMvt { return r } -// Aggs Sub-aggregations for the geotile_grid. +// Sub-aggregations for the geotile_grid. +// +// It supports the following aggregation types: // -// Supports the following aggregation types: -// - avg -// - cardinality -// - max -// - min -// - sum +// - `avg` +// - `boxplot` +// - `cardinality` +// - `extended stats` +// - `max` +// - `median absolute deviation` +// - `min` +// - `percentile` +// - `percentile-rank` +// - `stats` +// - `sum` +// - `value count` +// +// The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is +// reserved for internal aggregations. // API name: aggs func (r *SearchMvt) Aggs(aggs map[string]types.Aggregations) *SearchMvt { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aggs = aggs + return r +} + +func (r *SearchMvt) AddAgg(key string, value types.AggregationsVariant) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggs == nil { + r.req.Aggs = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggs + } + tmp[key] = *value.AggregationsCaster() + + r.req.Aggs = tmp return r } -// Buffer Size, in pixels, of a clipping buffer outside the tile. This allows renderers +// The size, in pixels, of a clipping buffer outside the tile. This allows +// renderers // to avoid outline artifacts from geometries that extend past the extent of the // tile. // API name: buffer func (r *SearchMvt) Buffer(buffer int) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Buffer = &buffer return r } -// ExactBounds If false, the meta layer’s feature is the bounding box of the tile. -// If true, the meta layer’s feature is a bounding box resulting from a -// geo_bounds aggregation. The aggregation runs on values that intersect -// the // tile with wrap_longitude set to false. The resulting +// If `false`, the meta layer's feature is the bounding box of the tile. +// If `true`, the meta layer's feature is a bounding box resulting from a +// `geo_bounds` aggregation. The aggregation runs on values that +// intersect +// the `//` tile with `wrap_longitude` set to `false`. The resulting // bounding box may be larger than the vector tile. // API name: exact_bounds func (r *SearchMvt) ExactBounds(exactbounds bool) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ExactBounds = &exactbounds return r } -// Extent Size, in pixels, of a side of the tile. Vector tiles are square with equal -// sides. +// The size, in pixels, of a side of the tile. Vector tiles are square with +// equal sides. // API name: extent func (r *SearchMvt) Extent(extent int) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Extent = &extent return r } -// Fields Fields to return in the `hits` layer. Supports wildcards (`*`). +// The fields to return in the `hits` layer. +// It supports wildcards (`*`). // This parameter does not support fields with array values. Fields with array // values may return inconsistent results. // API name: fields func (r *SearchMvt) Fields(fields ...string) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Fields = fields return r } -// GridAgg Aggregation used to create a grid for the `field`. +// The aggregation used to create a grid for the `field`. // API name: grid_agg func (r *SearchMvt) GridAgg(gridagg gridaggregationtype.GridAggregationType) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.GridAgg = &gridagg - return r } -// GridPrecision Additional zoom levels available through the aggs layer. For example, if -// is 7 -// and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, -// results -// don’t include the aggs layer. +// Additional zoom levels available through the aggs layer. For example, if +// `` is `7` +// and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If +// 0, results +// don't include the aggs layer. // API name: grid_precision func (r *SearchMvt) GridPrecision(gridprecision int) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.GridPrecision = &gridprecision return r } -// GridType Determines the geometry type for features in the aggs layer. In the aggs +// Determines the geometry type for features in the aggs layer. In the aggs // layer, -// each feature represents a geotile_grid cell. If 'grid' each feature is a -// Polygon -// of the cells bounding box. If 'point' each feature is a Point that is the +// each feature represents a `geotile_grid` cell. If `grid, each feature is a +// polygon +// of the cells bounding box. If `point`, each feature is a Point that is the // centroid // of the cell. // API name: grid_type func (r *SearchMvt) GridType(gridtype gridtype.GridType) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.GridType = &gridtype - return r } -// Query Query DSL used to filter documents for the search. +// The query DSL used to filter documents for the search. // API name: query -func (r *SearchMvt) Query(query *types.Query) *SearchMvt { +func (r *SearchMvt) Query(query types.QueryVariant) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// RuntimeMappings Defines one or more runtime fields in the search request. These fields take +// Defines one or more runtime fields in the search request. These fields take // precedence over mapped fields with the same name. // API name: runtime_mappings -func (r *SearchMvt) RuntimeMappings(runtimefields types.RuntimeFields) *SearchMvt { - r.req.RuntimeMappings = runtimefields +func (r *SearchMvt) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } -// Size Maximum number of features to return in the hits layer. Accepts 0-10000. -// If 0, results don’t include the hits layer. +// The maximum number of features to return in the hits layer. Accepts 0-10000. +// If 0, results don't include the hits layer. // API name: size func (r *SearchMvt) Size(size int) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } -// Sort Sorts features in the hits layer. By default, the API calculates a bounding -// box for each feature. It sorts features based on this box’s diagonal length, +// Sort the features in the hits layer. By default, the API calculates a +// bounding +// box for each feature. It sorts features based on this box's diagonal length, // from longest to shortest. // API name: sort -func (r *SearchMvt) Sort(sorts ...types.SortCombinations) *SearchMvt { - r.req.Sort = sorts +func (r *SearchMvt) Sort(sorts ...types.SortCombinationsVariant) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } return r } -// TrackTotalHits Number of hits matching the query to count accurately. If `true`, the exact -// number +// The number of hits matching the query to count accurately. If `true`, the +// exact number // of hits is returned at the cost of some performance. If `false`, the response // does // not include the total number of hits matching the query. // API name: track_total_hits -func (r *SearchMvt) TrackTotalHits(trackhits types.TrackHits) *SearchMvt { - r.req.TrackTotalHits = trackhits +func (r *SearchMvt) TrackTotalHits(trackhits types.TrackHitsVariant) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TrackTotalHits = *trackhits.TrackHitsCaster() return r } -// WithLabels If `true`, the hits and aggs layers will contain additional point features +// If `true`, the hits and aggs layers will contain additional point features // representing // suggested label positions for the original features. +// +// * `Point` and `MultiPoint` features will have one of the points selected. +// * `Polygon` and `MultiPolygon` features will have a single point generated, +// either the centroid, if it is within the polygon, or another point within the +// polygon selected from the sorted triangle-tree. +// * `LineString` features will likewise provide a roughly central point +// selected from the triangle-tree. +// * The aggregation results will provide one central point for each aggregation +// bucket. +// +// All attributes from the original features will also be copied to the new +// label features. +// In addition, the new features will be distinguishable using the tag +// `_mvt_label_position`. // API name: with_labels func (r *SearchMvt) WithLabels(withlabels bool) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.WithLabels = &withlabels return r diff --git a/typedapi/core/searchshards/response.go b/typedapi/core/searchshards/response.go index 450b84676b..acee52f63f 100644 --- a/typedapi/core/searchshards/response.go +++ b/typedapi/core/searchshards/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package searchshards @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package searchshards // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search_shards/SearchShardsResponse.ts#L34-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search_shards/SearchShardsResponse.ts#L34-L40 type Response struct { Indices map[string]types.ShardStoreIndex `json:"indices"` Nodes map[string]types.SearchShardsNodeAttributes `json:"nodes"` diff --git a/typedapi/core/searchshards/search_shards.go b/typedapi/core/searchshards/search_shards.go index 10005234f7..1f13861d17 100644 --- a/typedapi/core/searchshards/search_shards.go +++ b/typedapi/core/searchshards/search_shards.go @@ -16,15 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get the search shards. // // Get the indices and shards that a search request would be run against. // This information can be useful for working out issues or planning // optimizations with routing and shard preferences. -// When filtered aliases are used, the filter is returned as part of the indices -// section. +// When filtered aliases are used, the filter is returned as part of the +// `indices` section. +// +// If the Elasticsearch security features are enabled, you must have the +// `view_index_metadata` or `manage` index privilege for the target data stream, +// index, or alias. package searchshards import ( @@ -86,10 +90,14 @@ func NewSearchShardsFunc(tp elastictransport.Interface) NewSearchShards { // Get the indices and shards that a search request would be run against. // This information can be useful for working out issues or planning // optimizations with routing and shard preferences. -// When filtered aliases are used, the filter is returned as part of the indices -// section. +// When filtered aliases are used, the filter is returned as part of the +// `indices` section. +// +// If the Elasticsearch security features are enabled, you must have the +// `view_index_metadata` or `manage` index privilege for the target data stream, +// index, or alias. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-shards.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards func New(tp elastictransport.Interface) *SearchShards { r := &SearchShards{ transport: tp, @@ -304,8 +312,10 @@ func (r *SearchShards) Header(key, value string) *SearchShards { return r } -// Index Returns the indices and shards that a search request would be executed -// against. +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). +// To search all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index func (r *SearchShards) Index(index string) *SearchShards { r.paramSet |= indexMask @@ -359,8 +369,19 @@ func (r *SearchShards) Local(local bool) *SearchShards { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// IT can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *SearchShards) MasterTimeout(duration string) *SearchShards { + r.values.Set("master_timeout", duration) + + return r +} + +// Preference The node or shard the operation should be performed on. +// It is random by default. // API name: preference func (r *SearchShards) Preference(preference string) *SearchShards { r.values.Set("preference", preference) @@ -368,7 +389,7 @@ func (r *SearchShards) Preference(preference string) *SearchShards { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *SearchShards) Routing(routing string) *SearchShards { r.values.Set("routing", routing) diff --git a/typedapi/core/searchtemplate/request.go b/typedapi/core/searchtemplate/request.go index 244639636b..02450ab56c 100644 --- a/typedapi/core/searchtemplate/request.go +++ b/typedapi/core/searchtemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package searchtemplate @@ -31,13 +31,15 @@ import ( // Request holds the request body struct for the package searchtemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search_template/SearchTemplateRequest.ts#L32-L136 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search_template/SearchTemplateRequest.ts#L32-L153 type Request struct { // Explain If `true`, returns detailed information about score calculation as part of // each hit. + // If you specify both this and the `explain` query parameter, the API uses only + // the query parameter. Explain *bool `json:"explain,omitempty"` - // Id ID of the search template to use. If no source is specified, + // Id The ID of the search template to use. If no `source` is specified, // this parameter is required. Id *string `json:"id,omitempty"` // Params Key-value pairs used to replace Mustache variables in the template. @@ -47,7 +49,8 @@ type Request struct { // Profile If `true`, the query execution is profiled. Profile *bool `json:"profile,omitempty"` // Source An inline search template. Supports the same parameters as the search API's - // request body. Also supports Mustache variables. If no id is specified, this + // request body. It also supports Mustache variables. If no `id` is specified, + // this // parameter is required. Source *string `json:"source,omitempty"` } diff --git a/typedapi/core/searchtemplate/response.go b/typedapi/core/searchtemplate/response.go index 39394fb660..7404d37f3e 100644 --- a/typedapi/core/searchtemplate/response.go +++ b/typedapi/core/searchtemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package searchtemplate @@ -34,7 +34,7 @@ import ( // Response holds the response body struct for the package searchtemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search_template/SearchTemplateResponse.ts#L30-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search_template/SearchTemplateResponse.ts#L30-L48 type Response struct { Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` diff --git a/typedapi/core/searchtemplate/search_template.go b/typedapi/core/searchtemplate/search_template.go index 10322741ac..b8c047b2dc 100644 --- a/typedapi/core/searchtemplate/search_template.go +++ b/typedapi/core/searchtemplate/search_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Run a search with a search template. package searchtemplate @@ -83,7 +83,7 @@ func NewSearchTemplateFunc(tp elastictransport.Interface) NewSearchTemplate { // Run a search with a search template. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template func New(tp elastictransport.Interface) *SearchTemplate { r := &SearchTemplate{ transport: tp, @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *SearchTemplate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -315,8 +313,8 @@ func (r *SearchTemplate) Header(key, value string) *SearchTemplate { return r } -// Index Comma-separated list of data streams, indices, -// and aliases to search. Supports wildcards (*). +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). // API Name: index func (r *SearchTemplate) Index(index string) *SearchTemplate { r.paramSet |= indexMask @@ -346,7 +344,7 @@ func (r *SearchTemplate) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Sear return r } -// ExpandWildcards Type of index that wildcard patterns can match. +// ExpandWildcards The type of index that wildcard patterns can match. // If the request can target data streams, this argument determines whether // wildcard expressions match hidden data streams. // Supports comma-separated values, such as `open,hidden`. @@ -380,8 +378,8 @@ func (r *SearchTemplate) IgnoreUnavailable(ignoreunavailable bool) *SearchTempla return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// It is random by default. // API name: preference func (r *SearchTemplate) Preference(preference string) *SearchTemplate { r.values.Set("preference", preference) @@ -389,7 +387,7 @@ func (r *SearchTemplate) Preference(preference string) *SearchTemplate { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *SearchTemplate) Routing(routing string) *SearchTemplate { r.values.Set("routing", routing) @@ -414,7 +412,8 @@ func (r *SearchTemplate) SearchType(searchtype searchtype.SearchType) *SearchTem return r } -// RestTotalHitsAsInt If true, hits.total are rendered as an integer in the response. +// RestTotalHitsAsInt If `true`, `hits.total` is rendered as an integer in the response. +// If `false`, it is rendered as an object. // API name: rest_total_hits_as_int func (r *SearchTemplate) RestTotalHitsAsInt(resttotalhitsasint bool) *SearchTemplate { r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) @@ -475,48 +474,91 @@ func (r *SearchTemplate) Pretty(pretty bool) *SearchTemplate { return r } -// Explain If `true`, returns detailed information about score calculation as part of +// If `true`, returns detailed information about score calculation as part of // each hit. +// If you specify both this and the `explain` query parameter, the API uses only +// the query parameter. // API name: explain func (r *SearchTemplate) Explain(explain bool) *SearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Explain = &explain return r } -// Id ID of the search template to use. If no source is specified, +// The ID of the search template to use. If no `source` is specified, // this parameter is required. // API name: id func (r *SearchTemplate) Id(id string) *SearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Id = &id return r } -// Params Key-value pairs used to replace Mustache variables in the template. +// Key-value pairs used to replace Mustache variables in the template. // The key is the variable name. // The value is the variable value. // API name: params func (r *SearchTemplate) Params(params map[string]json.RawMessage) *SearchTemplate { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Params = params + return r +} +func (r *SearchTemplate) AddParam(key string, value json.RawMessage) *SearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Params == nil { + r.req.Params = make(map[string]json.RawMessage) + } else { + tmp = r.req.Params + } + + tmp[key] = value + + r.req.Params = tmp return r } -// Profile If `true`, the query execution is profiled. +// If `true`, the query execution is profiled. // API name: profile func (r *SearchTemplate) Profile(profile bool) *SearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Profile = &profile return r } -// Source An inline search template. Supports the same parameters as the search API's -// request body. Also supports Mustache variables. If no id is specified, this +// An inline search template. Supports the same parameters as the search API's +// request body. It also supports Mustache variables. If no `id` is specified, +// this // parameter is required. // API name: source func (r *SearchTemplate) Source(source string) *SearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Source = &source diff --git a/typedapi/core/termsenum/request.go b/typedapi/core/termsenum/request.go index ec0fcdc00c..41b8d0ec91 100644 --- a/typedapi/core/termsenum/request.go +++ b/typedapi/core/termsenum/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package termsenum @@ -33,27 +33,33 @@ import ( // Request holds the request body struct for the package termsenum // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/terms_enum/TermsEnumRequest.ts#L26-L75 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/terms_enum/TermsEnumRequest.ts#L26-L93 type Request struct { - // CaseInsensitive When true the provided search string is matched against index terms without - // case sensitivity. + // CaseInsensitive When `true`, the provided search string is matched against index terms + // without case sensitivity. CaseInsensitive *bool `json:"case_insensitive,omitempty"` // Field The string to match at the start of indexed terms. If not provided, all terms // in the field are considered. Field string `json:"field"` - // IndexFilter Allows to filter an index shard if the provided query rewrites to match_none. + // IndexFilter Filter an index shard if the provided query rewrites to `match_none`. IndexFilter *types.Query `json:"index_filter,omitempty"` - SearchAfter *string `json:"search_after,omitempty"` - // Size How many matching terms to return. + // SearchAfter The string after which terms in the index should be returned. + // It allows for a form of pagination if the last result from one request is + // passed as the `search_after` parameter for a subsequent request. + SearchAfter *string `json:"search_after,omitempty"` + // Size The number of matching terms to return. Size *int `json:"size,omitempty"` - // String The string after which terms in the index should be returned. Allows for a - // form of pagination if the last result from one request is passed as the - // search_after parameter for a subsequent request. + // String The string to match at the start of indexed terms. + // If it is not provided, all terms in the field are considered. + // + // > info + // > The prefix string cannot be larger than the largest possible keyword value, + // which is Lucene's term byte-length limit of 32766. String *string `json:"string,omitempty"` - // Timeout The maximum length of time to spend collecting results. Defaults to "1s" (one - // second). If the timeout is exceeded the complete flag set to false in the - // response and the results may be partial or empty. + // Timeout The maximum length of time to spend collecting results. + // If the timeout is exceeded the `complete` flag set to `false` in the response + // and the results may be partial or empty. Timeout types.Duration `json:"timeout,omitempty"` } diff --git a/typedapi/core/termsenum/response.go b/typedapi/core/termsenum/response.go index ee01b35faf..3a857df926 100644 --- a/typedapi/core/termsenum/response.go +++ b/typedapi/core/termsenum/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package termsenum @@ -26,8 +26,13 @@ import ( // Response holds the response body struct for the package termsenum // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/terms_enum/TermsEnumResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/terms_enum/TermsEnumResponse.ts#L22-L32 type Response struct { + + // Complete If `false`, the returned terms set may be incomplete and should be treated as + // approximate. + // This can occur due to a few reasons, such as a request timeout or a node + // error. Complete bool `json:"complete"` Shards_ types.ShardStatistics `json:"_shards"` Terms []string `json:"terms"` diff --git a/typedapi/core/termsenum/terms_enum.go b/typedapi/core/termsenum/terms_enum.go index 2e7e95b6de..1bd6b6b351 100644 --- a/typedapi/core/termsenum/terms_enum.go +++ b/typedapi/core/termsenum/terms_enum.go @@ -16,20 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get terms in an index. // // Discover terms that match a partial string in an index. -// This "terms enum" API is designed for low-latency look-ups used in -// auto-complete scenarios. +// This API is designed for low-latency look-ups used in auto-complete +// scenarios. // -// If the `complete` property in the response is false, the returned terms set -// may be incomplete and should be treated as approximate. -// This can occur due to a few reasons, such as a request timeout or a node -// error. -// -// NOTE: The terms enum API may return terms from deleted documents. Deleted +// > info +// > The terms enum API may return terms from deleted documents. Deleted // documents are initially only marked as deleted. It is not until their // segments are merged that documents are actually deleted. Until that happens, // the terms enum API will return terms from these documents. @@ -98,20 +94,16 @@ func NewTermsEnumFunc(tp elastictransport.Interface) NewTermsEnum { // Get terms in an index. // // Discover terms that match a partial string in an index. -// This "terms enum" API is designed for low-latency look-ups used in -// auto-complete scenarios. -// -// If the `complete` property in the response is false, the returned terms set -// may be incomplete and should be treated as approximate. -// This can occur due to a few reasons, such as a request timeout or a node -// error. +// This API is designed for low-latency look-ups used in auto-complete +// scenarios. // -// NOTE: The terms enum API may return terms from deleted documents. Deleted +// > info +// > The terms enum API may return terms from deleted documents. Deleted // documents are initially only marked as deleted. It is not until their // segments are merged that documents are actually deleted. Until that happens, // the terms enum API will return terms from these documents. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-terms-enum.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum func New(tp elastictransport.Interface) *TermsEnum { r := &TermsEnum{ transport: tp, @@ -119,8 +111,6 @@ func New(tp elastictransport.Interface) *TermsEnum { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -332,8 +322,10 @@ func (r *TermsEnum) Header(key, value string) *TermsEnum { return r } -// Index Comma-separated list of data streams, indices, and index aliases to search. -// Wildcard (*) expressions are supported. +// Index A comma-separated list of data streams, indices, and index aliases to search. +// Wildcard (`*`) expressions are supported. +// To search all data streams or indices, omit this parameter or use `*` or +// `_all`. // API Name: index func (r *TermsEnum) _index(index string) *TermsEnum { r.paramSet |= indexMask @@ -386,66 +378,104 @@ func (r *TermsEnum) Pretty(pretty bool) *TermsEnum { return r } -// CaseInsensitive When true the provided search string is matched against index terms without -// case sensitivity. +// When `true`, the provided search string is matched against index terms +// without case sensitivity. // API name: case_insensitive func (r *TermsEnum) CaseInsensitive(caseinsensitive bool) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.CaseInsensitive = &caseinsensitive return r } -// Field The string to match at the start of indexed terms. If not provided, all terms +// The string to match at the start of indexed terms. If not provided, all terms // in the field are considered. // API name: field func (r *TermsEnum) Field(field string) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Field = field return r } -// IndexFilter Allows to filter an index shard if the provided query rewrites to match_none. +// Filter an index shard if the provided query rewrites to `match_none`. // API name: index_filter -func (r *TermsEnum) IndexFilter(indexfilter *types.Query) *TermsEnum { +func (r *TermsEnum) IndexFilter(indexfilter types.QueryVariant) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.IndexFilter = indexfilter + r.req.IndexFilter = indexfilter.QueryCaster() return r } +// The string after which terms in the index should be returned. +// It allows for a form of pagination if the last result from one request is +// passed as the `search_after` parameter for a subsequent request. // API name: search_after func (r *TermsEnum) SearchAfter(searchafter string) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.SearchAfter = &searchafter return r } -// Size How many matching terms to return. +// The number of matching terms to return. // API name: size func (r *TermsEnum) Size(size int) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } -// String The string after which terms in the index should be returned. Allows for a -// form of pagination if the last result from one request is passed as the -// search_after parameter for a subsequent request. +// The string to match at the start of indexed terms. +// If it is not provided, all terms in the field are considered. +// +// > info +// > The prefix string cannot be larger than the largest possible keyword value, +// which is Lucene's term byte-length limit of 32766. // API name: string func (r *TermsEnum) String(string string) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.String = &string return r } -// Timeout The maximum length of time to spend collecting results. Defaults to "1s" (one -// second). If the timeout is exceeded the complete flag set to false in the -// response and the results may be partial or empty. +// The maximum length of time to spend collecting results. +// If the timeout is exceeded the `complete` flag set to `false` in the response +// and the results may be partial or empty. // API name: timeout -func (r *TermsEnum) Timeout(duration types.Duration) *TermsEnum { - r.req.Timeout = duration +func (r *TermsEnum) Timeout(duration types.DurationVariant) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() return r } diff --git a/typedapi/core/termvectors/request.go b/typedapi/core/termvectors/request.go index 3ef6eb919e..e4e1e499aa 100644 --- a/typedapi/core/termvectors/request.go +++ b/typedapi/core/termvectors/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package termvectors @@ -29,15 +29,23 @@ import ( // Request holds the request body struct for the package termvectors // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/termvectors/TermVectorsRequest.ts#L33-L122 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/termvectors/TermVectorsRequest.ts#L33-L187 type Request struct { // Doc An artificial document (a document not present in the index) for which you // want to retrieve term vectors. Doc json.RawMessage `json:"doc,omitempty"` // Filter Filter terms based on their tf-idf scores. + // This could be useful in order find out a good characteristic vector of a + // document. + // This feature works in a similar manner to the second phase of the More Like + // This Query. Filter *types.TermVectorsFilter `json:"filter,omitempty"` - // PerFieldAnalyzer Overrides the default per-field analyzer. + // PerFieldAnalyzer Override the default per-field analyzer. + // This is useful in order to generate term vectors in any fashion, especially + // when using artificial documents. + // When providing an analyzer for a field that already stores term vectors, the + // term vectors will be regenerated. PerFieldAnalyzer map[string]string `json:"per_field_analyzer,omitempty"` } diff --git a/typedapi/core/termvectors/response.go b/typedapi/core/termvectors/response.go index d2a3c4eb47..39468a1643 100644 --- a/typedapi/core/termvectors/response.go +++ b/typedapi/core/termvectors/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package termvectors @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package termvectors // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/termvectors/TermVectorsResponse.ts#L25-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/termvectors/TermVectorsResponse.ts#L25-L34 type Response struct { Found bool `json:"found"` Id_ *string `json:"_id,omitempty"` diff --git a/typedapi/core/termvectors/termvectors.go b/typedapi/core/termvectors/termvectors.go index abc40d7767..404cb4723a 100644 --- a/typedapi/core/termvectors/termvectors.go +++ b/typedapi/core/termvectors/termvectors.go @@ -16,12 +16,62 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get term vector information. // // Get information and statistics about terms in the fields of a particular // document. +// +// You can retrieve term vectors for documents stored in the index or for +// artificial documents passed in the body of the request. +// You can specify the fields you are interested in through the `fields` +// parameter or by adding the fields to the request body. +// For example: +// +// ``` +// GET /my-index-000001/_termvectors/1?fields=message +// ``` +// +// Fields can be specified using wildcards, similar to the multi match query. +// +// Term vectors are real-time by default, not near real-time. +// This can be changed by setting `realtime` parameter to `false`. +// +// You can request three types of values: _term information_, _term statistics_, +// and _field statistics_. +// By default, all term information and field statistics are returned for all +// fields but term statistics are excluded. +// +// **Term information** +// +// * term frequency in the field (always returned) +// * term positions (`positions: true`) +// * start and end offsets (`offsets: true`) +// * term payloads (`payloads: true`), as base64 encoded bytes +// +// If the requested information wasn't stored in the index, it will be computed +// on the fly if possible. +// Additionally, term vectors could be computed for documents not even existing +// in the index, but instead provided by the user. +// +// > warn +// > Start and end offsets assume UTF-16 encoding is being used. If you want to +// use these offsets in order to get the original text that produced this token, +// you should make sure that the string you are taking a sub-string of is also +// encoded using UTF-16. +// +// **Behaviour** +// +// The term and field statistics are not accurate. +// Deleted documents are not taken into account. +// The information is only retrieved for the shard the requested document +// resides in. +// The term and field statistics are therefore only useful as relative measures +// whereas the absolute numbers have no meaning in this context. +// By default, when requesting term vectors of artificial documents, a shard to +// get the statistics from is randomly selected. +// Use `routing` only to hit a particular shard. package termvectors import ( @@ -93,7 +143,57 @@ func NewTermvectorsFunc(tp elastictransport.Interface) NewTermvectors { // Get information and statistics about terms in the fields of a particular // document. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-termvectors.html +// You can retrieve term vectors for documents stored in the index or for +// artificial documents passed in the body of the request. +// You can specify the fields you are interested in through the `fields` +// parameter or by adding the fields to the request body. +// For example: +// +// ``` +// GET /my-index-000001/_termvectors/1?fields=message +// ``` +// +// Fields can be specified using wildcards, similar to the multi match query. +// +// Term vectors are real-time by default, not near real-time. +// This can be changed by setting `realtime` parameter to `false`. +// +// You can request three types of values: _term information_, _term statistics_, +// and _field statistics_. +// By default, all term information and field statistics are returned for all +// fields but term statistics are excluded. +// +// **Term information** +// +// * term frequency in the field (always returned) +// * term positions (`positions: true`) +// * start and end offsets (`offsets: true`) +// * term payloads (`payloads: true`), as base64 encoded bytes +// +// If the requested information wasn't stored in the index, it will be computed +// on the fly if possible. +// Additionally, term vectors could be computed for documents not even existing +// in the index, but instead provided by the user. +// +// > warn +// > Start and end offsets assume UTF-16 encoding is being used. If you want to +// use these offsets in order to get the original text that produced this token, +// you should make sure that the string you are taking a sub-string of is also +// encoded using UTF-16. +// +// **Behaviour** +// +// The term and field statistics are not accurate. +// Deleted documents are not taken into account. +// The information is only retrieved for the shard the requested document +// resides in. +// The term and field statistics are therefore only useful as relative measures +// whereas the absolute numbers have no meaning in this context. +// By default, when requesting term vectors of artificial documents, a shard to +// get the statistics from is randomly selected. +// Use `routing` only to hit a particular shard. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors func New(tp elastictransport.Interface) *Termvectors { r := &Termvectors{ transport: tp, @@ -101,8 +201,6 @@ func New(tp elastictransport.Interface) *Termvectors { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -331,7 +429,7 @@ func (r *Termvectors) Header(key, value string) *Termvectors { return r } -// Index Name of the index that contains the document. +// Index The name of the index that contains the document. // API Name: index func (r *Termvectors) _index(index string) *Termvectors { r.paramSet |= indexMask @@ -340,7 +438,7 @@ func (r *Termvectors) _index(index string) *Termvectors { return r } -// Id Unique identifier of the document. +// Id A unique identifier for the document. // API Name: id func (r *Termvectors) Id(id string) *Termvectors { r.paramSet |= idMask @@ -349,10 +447,10 @@ func (r *Termvectors) Id(id string) *Termvectors { return r } -// Fields Comma-separated list or wildcard expressions of fields to include in the +// Fields A comma-separated list or wildcard expressions of fields to include in the // statistics. -// Used as the default list unless a specific field list is provided in the -// `completion_fields` or `fielddata_fields` parameters. +// It is used as the default list unless a specific field list is provided in +// the `completion_fields` or `fielddata_fields` parameters. // API name: fields func (r *Termvectors) Fields(fields ...string) *Termvectors { r.values.Set("fields", strings.Join(fields, ",")) @@ -360,8 +458,13 @@ func (r *Termvectors) Fields(fields ...string) *Termvectors { return r } -// FieldStatistics If `true`, the response includes the document count, sum of document -// frequencies, and sum of total term frequencies. +// FieldStatistics If `true`, the response includes: +// +// * The document count (how many documents contain this field). +// * The sum of document frequencies (the sum of document frequencies for all +// terms in this field). +// * The sum of total term frequencies (the sum of total term frequencies of +// each term in this field). // API name: field_statistics func (r *Termvectors) FieldStatistics(fieldstatistics bool) *Termvectors { r.values.Set("field_statistics", strconv.FormatBool(fieldstatistics)) @@ -393,8 +496,8 @@ func (r *Termvectors) Positions(positions bool) *Termvectors { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// It is random by default. // API name: preference func (r *Termvectors) Preference(preference string) *Termvectors { r.values.Set("preference", preference) @@ -410,7 +513,7 @@ func (r *Termvectors) Realtime(realtime bool) *Termvectors { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value that is used to route operations to a specific shard. // API name: routing func (r *Termvectors) Routing(routing string) *Termvectors { r.values.Set("routing", routing) @@ -418,7 +521,14 @@ func (r *Termvectors) Routing(routing string) *Termvectors { return r } -// TermStatistics If `true`, the response includes term frequency and document frequency. +// TermStatistics If `true`, the response includes: +// +// * The total term frequency (how often a term occurs in all documents). +// * The document frequency (the number of documents containing the current +// term). +// +// By default these values are not returned since term statistics can have a +// serious performance impact. // API name: term_statistics func (r *Termvectors) TermStatistics(termstatistics bool) *Termvectors { r.values.Set("term_statistics", strconv.FormatBool(termstatistics)) @@ -434,7 +544,7 @@ func (r *Termvectors) Version(versionnumber string) *Termvectors { return r } -// VersionType Specific version type. +// VersionType The version type. // API name: version_type func (r *Termvectors) VersionType(versiontype versiontype.VersionType) *Termvectors { r.values.Set("version_type", versiontype.String()) @@ -486,14 +596,14 @@ func (r *Termvectors) Pretty(pretty bool) *Termvectors { return r } -// Doc An artificial document (a document not present in the index) for which you +// An artificial document (a document not present in the index) for which you // want to retrieve term vectors. // API name: doc -// -// doc should be a json.RawMessage or a structure -// if a structure is provided, the client will defer a json serialization -// prior to sending the payload to Elasticsearch. func (r *Termvectors) Doc(doc any) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } switch casted := doc.(type) { case json.RawMessage: r.req.Doc = casted @@ -507,24 +617,56 @@ func (r *Termvectors) Doc(doc any) *Termvectors { return nil }) } - return r } -// Filter Filter terms based on their tf-idf scores. +// Filter terms based on their tf-idf scores. +// This could be useful in order find out a good characteristic vector of a +// document. +// This feature works in a similar manner to the second phase of the More Like +// This Query. // API name: filter -func (r *Termvectors) Filter(filter *types.TermVectorsFilter) *Termvectors { +func (r *Termvectors) Filter(filter types.TermVectorsFilterVariant) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Filter = filter + r.req.Filter = filter.TermVectorsFilterCaster() return r } -// PerFieldAnalyzer Overrides the default per-field analyzer. +// Override the default per-field analyzer. +// This is useful in order to generate term vectors in any fashion, especially +// when using artificial documents. +// When providing an analyzer for a field that already stores term vectors, the +// term vectors will be regenerated. // API name: per_field_analyzer func (r *Termvectors) PerFieldAnalyzer(perfieldanalyzer map[string]string) *Termvectors { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.PerFieldAnalyzer = perfieldanalyzer + return r +} + +func (r *Termvectors) AddPerFieldAnalyzer(key string, value string) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]string + if r.req.PerFieldAnalyzer == nil { + r.req.PerFieldAnalyzer = make(map[string]string) + } else { + tmp = r.req.PerFieldAnalyzer + } + + tmp[key] = value + r.req.PerFieldAnalyzer = tmp return r } diff --git a/typedapi/core/update/request.go b/typedapi/core/update/request.go index a481004264..609ce3b7c2 100644 --- a/typedapi/core/update/request.go +++ b/typedapi/core/update/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package update @@ -33,27 +33,29 @@ import ( // Request holds the request body struct for the package update // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/update/UpdateRequest.ts#L38-L154 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/update/UpdateRequest.ts#L38-L194 type Request struct { - // DetectNoop Set to false to disable setting 'result' in the response - // to 'noop' if no change to the document occurred. + // DetectNoop If `true`, the `result` in the response is set to `noop` (no operation) when + // there are no changes to the document. DetectNoop *bool `json:"detect_noop,omitempty"` // Doc A partial update to an existing document. + // If both `doc` and `script` are specified, `doc` is ignored. Doc json.RawMessage `json:"doc,omitempty"` - // DocAsUpsert Set to true to use the contents of 'doc' as the value of 'upsert' + // DocAsUpsert If `true`, use the contents of 'doc' as the value of 'upsert'. + // NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. DocAsUpsert *bool `json:"doc_as_upsert,omitempty"` - // Script Script to execute to update the document. + // Script The script to run to update the document. Script *types.Script `json:"script,omitempty"` - // ScriptedUpsert Set to true to execute the script whether or not the document exists. + // ScriptedUpsert If `true`, run the script whether or not the document exists. ScriptedUpsert *bool `json:"scripted_upsert,omitempty"` - // Source_ Set to false to disable source retrieval. You can also specify a - // comma-separated - // list of the fields you want to retrieve. + // Source_ If `false`, turn off source retrieval. + // You can also specify a comma-separated list of the fields you want to + // retrieve. Source_ types.SourceConfig `json:"_source,omitempty"` // Upsert If the document does not already exist, the contents of 'upsert' are inserted - // as a - // new document. If the document exists, the 'script' is executed. + // as a new document. + // If the document exists, the 'script' is run. Upsert json.RawMessage `json:"upsert,omitempty"` } diff --git a/typedapi/core/update/response.go b/typedapi/core/update/response.go index 908cd9ba50..52ceedaf7f 100644 --- a/typedapi/core/update/response.go +++ b/typedapi/core/update/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package update @@ -27,17 +27,26 @@ import ( // Response holds the response body struct for the package update // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/update/UpdateResponse.ts#L27-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/update/UpdateResponse.ts#L27-L29 type Response struct { - ForcedRefresh *bool `json:"forced_refresh,omitempty"` - Get *types.InlineGet `json:"get,omitempty"` - Id_ string `json:"_id"` - Index_ string `json:"_index"` - PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` - Result result.Result `json:"result"` - SeqNo_ *int64 `json:"_seq_no,omitempty"` - Shards_ types.ShardStatistics `json:"_shards"` - Version_ int64 `json:"_version"` + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + Get *types.InlineGet `json:"get,omitempty"` + // Id_ The unique identifier for the added document. + Id_ string `json:"_id"` + // Index_ The name of the index the document was added to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Result The result of the indexing operation: `created` or `updated`. + Result result.Result `json:"result"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Shards_ Information about the replication process of the operation. + Shards_ types.ShardStatistics `json:"_shards"` + // Version_ The document version, which is incremented each time the document is updated. + Version_ int64 `json:"_version"` } // NewResponse returns a Response diff --git a/typedapi/core/update/update.go b/typedapi/core/update/update.go index 00c94e86ed..85b4f392a3 100644 --- a/typedapi/core/update/update.go +++ b/typedapi/core/update/update.go @@ -16,10 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update a document. -// Updates a document by running a script or passing a partial document. +// +// Update a document by running a script or passing a partial document. +// +// If the Elasticsearch security features are enabled, you must have the `index` +// or `write` index privilege for the target index or index alias. +// +// The script can update, delete, or skip modifying the document. +// The API also supports passing a partial document, which is merged into the +// existing document. +// To fully replace an existing document, use the index API. +// This operation: +// +// * Gets the document (collocated with the shard) from the index. +// * Runs the specified script. +// * Indexes the result. +// +// The document must still be reindexed, but using this API removes some network +// roundtrips and reduces chances of version conflicts between the GET and the +// index operation. +// +// The `_source` field must be enabled to use this API. +// In addition to `_source`, you can access the following variables through the +// `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the +// current timestamp). package update import ( @@ -89,9 +112,32 @@ func NewUpdateFunc(tp elastictransport.Interface) NewUpdate { } // Update a document. -// Updates a document by running a script or passing a partial document. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html +// Update a document by running a script or passing a partial document. +// +// If the Elasticsearch security features are enabled, you must have the `index` +// or `write` index privilege for the target index or index alias. +// +// The script can update, delete, or skip modifying the document. +// The API also supports passing a partial document, which is merged into the +// existing document. +// To fully replace an existing document, use the index API. +// This operation: +// +// * Gets the document (collocated with the shard) from the index. +// * Runs the specified script. +// * Indexes the result. +// +// The document must still be reindexed, but using this API removes some network +// roundtrips and reduces chances of version conflicts between the GET and the +// index operation. +// +// The `_source` field must be enabled to use this API. +// In addition to `_source`, you can access the following variables through the +// `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the +// current timestamp). +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update func New(tp elastictransport.Interface) *Update { r := &Update{ transport: tp, @@ -99,8 +145,6 @@ func New(tp elastictransport.Interface) *Update { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -318,7 +362,7 @@ func (r *Update) Header(key, value string) *Update { return r } -// Id Document ID +// Id A unique identifier for the document to be updated. // API Name: id func (r *Update) _id(id string) *Update { r.paramSet |= idMask @@ -327,7 +371,8 @@ func (r *Update) _id(id string) *Update { return r } -// Index The name of the index +// Index The name of the target index. +// By default, the index is created automatically if it doesn't exist. // API Name: index func (r *Update) _index(index string) *Update { r.paramSet |= indexMask @@ -352,6 +397,15 @@ func (r *Update) IfSeqNo(sequencenumber string) *Update { return r } +// IncludeSourceOnError True or false if to include the document source in the error message in case +// of parsing errors. +// API name: include_source_on_error +func (r *Update) IncludeSourceOnError(includesourceonerror bool) *Update { + r.values.Set("include_source_on_error", strconv.FormatBool(includesourceonerror)) + + return r +} + // Lang The script language. // API name: lang func (r *Update) Lang(lang string) *Update { @@ -361,9 +415,10 @@ func (r *Update) Lang(lang string) *Update { } // Refresh If 'true', Elasticsearch refreshes the affected shards to make this operation -// visible to search, if 'wait_for' then wait for a refresh to make this -// operation -// visible to search, if 'false' do nothing with refreshes. +// visible to search. +// If 'wait_for', it waits for a refresh to make this operation visible to +// search. +// If 'false', it does nothing with refreshes. // API name: refresh func (r *Update) Refresh(refresh refresh.Refresh) *Update { r.values.Set("refresh", refresh.String()) @@ -371,7 +426,7 @@ func (r *Update) Refresh(refresh refresh.Refresh) *Update { return r } -// RequireAlias If true, the destination must be an index alias. +// RequireAlias If `true`, the destination must be an index alias. // API name: require_alias func (r *Update) RequireAlias(requirealias bool) *Update { r.values.Set("require_alias", strconv.FormatBool(requirealias)) @@ -379,8 +434,7 @@ func (r *Update) RequireAlias(requirealias bool) *Update { return r } -// RetryOnConflict Specify how many times should the operation be retried when a conflict -// occurs. +// RetryOnConflict The number of times the operation should be retried when a conflict occurs. // API name: retry_on_conflict func (r *Update) RetryOnConflict(retryonconflict int) *Update { r.values.Set("retry_on_conflict", strconv.Itoa(retryonconflict)) @@ -388,7 +442,7 @@ func (r *Update) RetryOnConflict(retryonconflict int) *Update { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *Update) Routing(routing string) *Update { r.values.Set("routing", routing) @@ -396,8 +450,9 @@ func (r *Update) Routing(routing string) *Update { return r } -// Timeout Period to wait for dynamic mapping updates and active shards. -// This guarantees Elasticsearch waits for at least the timeout before failing. +// Timeout The period to wait for the following operations: dynamic mapping updates and +// waiting for active shards. +// Elasticsearch waits for at least the timeout period before failing. // The actual wait time could be longer, particularly when multiple waits occur. // API name: timeout func (r *Update) Timeout(duration string) *Update { @@ -406,11 +461,11 @@ func (r *Update) Timeout(duration string) *Update { return r } -// WaitForActiveShards The number of shard copies that must be active before proceeding with the -// operations. +// WaitForActiveShards The number of copies of each shard that must be active before proceeding with +// the operation. // Set to 'all' or any positive integer up to the total number of shards in the -// index -// (number_of_replicas+1). Defaults to 1 meaning the primary shard. +// index (`number_of_replicas`+1). +// The default value of `1` means it waits for each primary shard to be active. // API name: wait_for_active_shards func (r *Update) WaitForActiveShards(waitforactiveshards string) *Update { r.values.Set("wait_for_active_shards", waitforactiveshards) @@ -418,7 +473,7 @@ func (r *Update) WaitForActiveShards(waitforactiveshards string) *Update { return r } -// SourceExcludes_ Specify the source fields you want to exclude. +// SourceExcludes_ The source fields you want to exclude. // API name: _source_excludes func (r *Update) SourceExcludes_(fields ...string) *Update { r.values.Set("_source_excludes", strings.Join(fields, ",")) @@ -426,7 +481,7 @@ func (r *Update) SourceExcludes_(fields ...string) *Update { return r } -// SourceIncludes_ Specify the source fields you want to retrieve. +// SourceIncludes_ The source fields you want to retrieve. // API name: _source_includes func (r *Update) SourceIncludes_(fields ...string) *Update { r.values.Set("_source_includes", strings.Join(fields, ",")) @@ -478,22 +533,28 @@ func (r *Update) Pretty(pretty bool) *Update { return r } -// DetectNoop Set to false to disable setting 'result' in the response -// to 'noop' if no change to the document occurred. +// If `true`, the `result` in the response is set to `noop` (no operation) when +// there are no changes to the document. // API name: detect_noop func (r *Update) DetectNoop(detectnoop bool) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.DetectNoop = &detectnoop return r } -// Doc A partial update to an existing document. +// A partial update to an existing document. +// If both `doc` and `script` are specified, `doc` is ignored. // API name: doc -// -// doc should be a json.RawMessage or a structure -// if a structure is provided, the client will defer a json serialization -// prior to sending the payload to Elasticsearch. func (r *Update) Doc(doc any) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } switch casted := doc.(type) { case json.RawMessage: r.req.Doc = casted @@ -507,54 +568,73 @@ func (r *Update) Doc(doc any) *Update { return nil }) } - return r } -// DocAsUpsert Set to true to use the contents of 'doc' as the value of 'upsert' +// If `true`, use the contents of 'doc' as the value of 'upsert'. +// NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. // API name: doc_as_upsert func (r *Update) DocAsUpsert(docasupsert bool) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.DocAsUpsert = &docasupsert return r } -// Script Script to execute to update the document. +// The script to run to update the document. // API name: script -func (r *Update) Script(script *types.Script) *Update { +func (r *Update) Script(script types.ScriptVariant) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Script = script + r.req.Script = script.ScriptCaster() return r } -// ScriptedUpsert Set to true to execute the script whether or not the document exists. +// If `true`, run the script whether or not the document exists. // API name: scripted_upsert func (r *Update) ScriptedUpsert(scriptedupsert bool) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ScriptedUpsert = &scriptedupsert return r } -// Source_ Set to false to disable source retrieval. You can also specify a -// comma-separated -// list of the fields you want to retrieve. +// If `false`, turn off source retrieval. +// You can also specify a comma-separated list of the fields you want to +// retrieve. // API name: _source -func (r *Update) Source_(sourceconfig types.SourceConfig) *Update { - r.req.Source_ = sourceconfig +func (r *Update) Source_(sourceconfig types.SourceConfigVariant) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source_ = *sourceconfig.SourceConfigCaster() return r } -// Upsert If the document does not already exist, the contents of 'upsert' are inserted -// as a -// new document. If the document exists, the 'script' is executed. +// If the document does not already exist, the contents of 'upsert' are inserted +// as a new document. +// If the document exists, the 'script' is run. // API name: upsert -// -// upsert should be a json.RawMessage or a structure -// if a structure is provided, the client will defer a json serialization -// prior to sending the payload to Elasticsearch. func (r *Update) Upsert(upsert any) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } switch casted := upsert.(type) { case json.RawMessage: r.req.Upsert = casted @@ -568,6 +648,5 @@ func (r *Update) Upsert(upsert any) *Update { return nil }) } - return r } diff --git a/typedapi/core/updatebyquery/request.go b/typedapi/core/updatebyquery/request.go index f44b668e42..2018f200ab 100644 --- a/typedapi/core/updatebyquery/request.go +++ b/typedapi/core/updatebyquery/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatebyquery @@ -30,14 +30,15 @@ import ( // Request holds the request body struct for the package updatebyquery // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/update_by_query/UpdateByQueryRequest.ts#L37-L227 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/update_by_query/UpdateByQueryRequest.ts#L37-L336 type Request struct { - // Conflicts What to do if update by query hits version conflicts: `abort` or `proceed`. + // Conflicts The preferred behavior when update by query hits version conflicts: `abort` + // or `proceed`. Conflicts *conflicts.Conflicts `json:"conflicts,omitempty"` // MaxDocs The maximum number of documents to update. MaxDocs *int64 `json:"max_docs,omitempty"` - // Query Specifies the documents to update using the Query DSL. + // Query The documents to update using the Query DSL. Query *types.Query `json:"query,omitempty"` // Script The script to run to update the document source or metadata when updating. Script *types.Script `json:"script,omitempty"` diff --git a/typedapi/core/updatebyquery/response.go b/typedapi/core/updatebyquery/response.go index 9d7f2f0859..98b79162e6 100644 --- a/typedapi/core/updatebyquery/response.go +++ b/typedapi/core/updatebyquery/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatebyquery @@ -26,24 +26,51 @@ import ( // Response holds the response body struct for the package updatebyquery // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/update_by_query/UpdateByQueryResponse.ts#L26-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/update_by_query/UpdateByQueryResponse.ts#L26-L67 type Response struct { - Batches *int64 `json:"batches,omitempty"` - Deleted *int64 `json:"deleted,omitempty"` - Failures []types.BulkIndexByScrollFailure `json:"failures,omitempty"` - Noops *int64 `json:"noops,omitempty"` - RequestsPerSecond *float32 `json:"requests_per_second,omitempty"` - Retries *types.Retries `json:"retries,omitempty"` - Task types.TaskId `json:"task,omitempty"` - Throttled types.Duration `json:"throttled,omitempty"` - ThrottledMillis *int64 `json:"throttled_millis,omitempty"` - ThrottledUntil types.Duration `json:"throttled_until,omitempty"` - ThrottledUntilMillis *int64 `json:"throttled_until_millis,omitempty"` - TimedOut *bool `json:"timed_out,omitempty"` - Took *int64 `json:"took,omitempty"` - Total *int64 `json:"total,omitempty"` - Updated *int64 `json:"updated,omitempty"` - VersionConflicts *int64 `json:"version_conflicts,omitempty"` + + // Batches The number of scroll responses pulled back by the update by query. + Batches *int64 `json:"batches,omitempty"` + // Deleted The number of documents that were successfully deleted. + Deleted *int64 `json:"deleted,omitempty"` + // Failures Array of failures if there were any unrecoverable errors during the process. + // If this is non-empty then the request ended because of those failures. + // Update by query is implemented using batches. + // Any failure causes the entire process to end, but all failures in the current + // batch are collected into the array. + // You can use the `conflicts` option to prevent reindex from ending when + // version conflicts occur. + Failures []types.BulkIndexByScrollFailure `json:"failures,omitempty"` + // Noops The number of documents that were ignored because the script used for the + // update by query returned a noop value for `ctx.op`. + Noops *int64 `json:"noops,omitempty"` + // RequestsPerSecond The number of requests per second effectively run during the update by query. + RequestsPerSecond *float32 `json:"requests_per_second,omitempty"` + // Retries The number of retries attempted by update by query. + // `bulk` is the number of bulk actions retried. + // `search` is the number of search actions retried. + Retries *types.Retries `json:"retries,omitempty"` + Task types.TaskId `json:"task,omitempty"` + Throttled types.Duration `json:"throttled,omitempty"` + // ThrottledMillis The number of milliseconds the request slept to conform to + // `requests_per_second`. + ThrottledMillis *int64 `json:"throttled_millis,omitempty"` + ThrottledUntil types.Duration `json:"throttled_until,omitempty"` + // ThrottledUntilMillis This field should always be equal to zero in an _update_by_query response. + // It only has meaning when using the task API, where it indicates the next time + // (in milliseconds since epoch) a throttled request will be run again in order + // to conform to `requests_per_second`. + ThrottledUntilMillis *int64 `json:"throttled_until_millis,omitempty"` + // TimedOut If true, some requests timed out during the update by query. + TimedOut *bool `json:"timed_out,omitempty"` + // Took The number of milliseconds from start to end of the whole operation. + Took *int64 `json:"took,omitempty"` + // Total The number of documents that were successfully processed. + Total *int64 `json:"total,omitempty"` + // Updated The number of documents that were successfully updated. + Updated *int64 `json:"updated,omitempty"` + // VersionConflicts The number of version conflicts that the update by query hit. + VersionConflicts *int64 `json:"version_conflicts,omitempty"` } // NewResponse returns a Response diff --git a/typedapi/core/updatebyquery/update_by_query.go b/typedapi/core/updatebyquery/update_by_query.go index 2c41e2668f..3f1c1e4d98 100644 --- a/typedapi/core/updatebyquery/update_by_query.go +++ b/typedapi/core/updatebyquery/update_by_query.go @@ -16,13 +16,145 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update documents. // Updates documents that match the specified query. // If no query is specified, performs an update on every document in the data // stream or index without modifying the source, which is useful for picking up // mapping changes. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or alias: +// +// * `read` +// * `index` or `write` +// +// You can specify the query criteria in the request URI or the request body +// using the same syntax as the search API. +// +// When you submit an update by query request, Elasticsearch gets a snapshot of +// the data stream or index when it begins processing the request and updates +// matching documents using internal versioning. +// When the versions match, the document is updated and the version number is +// incremented. +// If a document changes between the time that the snapshot is taken and the +// update operation is processed, it results in a version conflict and the +// operation fails. +// You can opt to count version conflicts instead of halting and returning by +// setting `conflicts` to `proceed`. +// Note that if you opt to count version conflicts, the operation could attempt +// to update more documents from the source than `max_docs` until it has +// successfully updated `max_docs` documents or it has gone through every +// document in the source query. +// +// NOTE: Documents with a version equal to 0 cannot be updated using update by +// query because internal versioning does not support 0 as a valid version +// number. +// +// While processing an update by query request, Elasticsearch performs multiple +// search requests sequentially to find all of the matching documents. +// A bulk update request is performed for each batch of matching documents. +// Any query or update failures cause the update by query request to fail and +// the failures are shown in the response. +// Any update requests that completed successfully still stick, they are not +// rolled back. +// +// **Throttling update requests** +// +// To control the rate at which update by query issues batches of update +// operations, you can set `requests_per_second` to any positive decimal number. +// This pads each batch with a wait time to throttle the rate. +// Set `requests_per_second` to `-1` to turn off throttling. +// +// Throttling uses a wait time between batches so that the internal scroll +// requests can be given a timeout that takes the request padding into account. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is 1000, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single _bulk request, large batch sizes cause +// Elasticsearch to create many requests and wait before starting the next set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Update by query supports sliced scroll to parallelize the update process. +// This can improve efficiency and provide a convenient way to break the request +// down into smaller parts. +// +// Setting `slices` to `auto` chooses a reasonable number for most data streams +// and indices. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple source data streams or indices, it will choose the +// number of slices based on the index or backing index with the smallest number +// of shards. +// +// Adding `slices` to `_update_by_query` just automates the manual process of +// creating sub-requests, which means it has some quirks: +// +// * You can see these requests in the tasks APIs. These sub-requests are +// "child" tasks of the task for the request with slices. +// * Fetching the status of the task for the request with `slices` only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with slices will cancel each sub-request. +// * Due to the nature of slices each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// slices are distributed proportionally to each sub-request. Combine that with +// the point above about distribution being uneven and you should conclude that +// using `max_docs` with `slices` might not result in exactly `max_docs` +// documents being updated. +// * Each sub-request gets a slightly different snapshot of the source data +// stream or index though these are all taken at approximately the same time. +// +// If you're slicing manually or otherwise tuning automatic slicing, keep in +// mind that: +// +// * Query performance is most efficient when the number of slices is equal to +// the number of shards in the index or backing index. If that number is large +// (for example, 500), choose a lower number as too many slices hurts +// performance. Setting slices higher than the number of shards generally does +// not improve efficiency and adds overhead. +// * Update performance scales linearly across available resources with the +// number of slices. +// +// Whether query or update performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// +// **Update the document source** +// +// Update by query supports scripts to update the document source. +// As with the update API, you can set `ctx.op` to change the operation that is +// performed. +// +// Set `ctx.op = "noop"` if your script decides that it doesn't have to make any +// changes. +// The update by query operation skips updating the document and increments the +// `noop` counter. +// +// Set `ctx.op = "delete"` if your script decides that the document should be +// deleted. +// The update by query operation deletes the document and increments the +// `deleted` counter. +// +// Update by query supports only `index`, `noop`, and `delete`. +// Setting `ctx.op` to anything else is an error. +// Setting any other field in `ctx` is an error. +// This API enables you to only modify the source of matching documents; you +// cannot move them. package updatebyquery import ( @@ -95,7 +227,139 @@ func NewUpdateByQueryFunc(tp elastictransport.Interface) NewUpdateByQuery { // stream or index without modifying the source, which is useful for picking up // mapping changes. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or alias: +// +// * `read` +// * `index` or `write` +// +// You can specify the query criteria in the request URI or the request body +// using the same syntax as the search API. +// +// When you submit an update by query request, Elasticsearch gets a snapshot of +// the data stream or index when it begins processing the request and updates +// matching documents using internal versioning. +// When the versions match, the document is updated and the version number is +// incremented. +// If a document changes between the time that the snapshot is taken and the +// update operation is processed, it results in a version conflict and the +// operation fails. +// You can opt to count version conflicts instead of halting and returning by +// setting `conflicts` to `proceed`. +// Note that if you opt to count version conflicts, the operation could attempt +// to update more documents from the source than `max_docs` until it has +// successfully updated `max_docs` documents or it has gone through every +// document in the source query. +// +// NOTE: Documents with a version equal to 0 cannot be updated using update by +// query because internal versioning does not support 0 as a valid version +// number. +// +// While processing an update by query request, Elasticsearch performs multiple +// search requests sequentially to find all of the matching documents. +// A bulk update request is performed for each batch of matching documents. +// Any query or update failures cause the update by query request to fail and +// the failures are shown in the response. +// Any update requests that completed successfully still stick, they are not +// rolled back. +// +// **Throttling update requests** +// +// To control the rate at which update by query issues batches of update +// operations, you can set `requests_per_second` to any positive decimal number. +// This pads each batch with a wait time to throttle the rate. +// Set `requests_per_second` to `-1` to turn off throttling. +// +// Throttling uses a wait time between batches so that the internal scroll +// requests can be given a timeout that takes the request padding into account. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is 1000, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single _bulk request, large batch sizes cause +// Elasticsearch to create many requests and wait before starting the next set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Update by query supports sliced scroll to parallelize the update process. +// This can improve efficiency and provide a convenient way to break the request +// down into smaller parts. +// +// Setting `slices` to `auto` chooses a reasonable number for most data streams +// and indices. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple source data streams or indices, it will choose the +// number of slices based on the index or backing index with the smallest number +// of shards. +// +// Adding `slices` to `_update_by_query` just automates the manual process of +// creating sub-requests, which means it has some quirks: +// +// * You can see these requests in the tasks APIs. These sub-requests are +// "child" tasks of the task for the request with slices. +// * Fetching the status of the task for the request with `slices` only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with slices will cancel each sub-request. +// * Due to the nature of slices each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// slices are distributed proportionally to each sub-request. Combine that with +// the point above about distribution being uneven and you should conclude that +// using `max_docs` with `slices` might not result in exactly `max_docs` +// documents being updated. +// * Each sub-request gets a slightly different snapshot of the source data +// stream or index though these are all taken at approximately the same time. +// +// If you're slicing manually or otherwise tuning automatic slicing, keep in +// mind that: +// +// * Query performance is most efficient when the number of slices is equal to +// the number of shards in the index or backing index. If that number is large +// (for example, 500), choose a lower number as too many slices hurts +// performance. Setting slices higher than the number of shards generally does +// not improve efficiency and adds overhead. +// * Update performance scales linearly across available resources with the +// number of slices. +// +// Whether query or update performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// +// **Update the document source** +// +// Update by query supports scripts to update the document source. +// As with the update API, you can set `ctx.op` to change the operation that is +// performed. +// +// Set `ctx.op = "noop"` if your script decides that it doesn't have to make any +// changes. +// The update by query operation skips updating the document and increments the +// `noop` counter. +// +// Set `ctx.op = "delete"` if your script decides that the document should be +// deleted. +// The update by query operation deletes the document and increments the +// `deleted` counter. +// +// Update by query supports only `index`, `noop`, and `delete`. +// Setting `ctx.op` to anything else is an error. +// Setting any other field in `ctx` is an error. +// This API enables you to only modify the source of matching documents; you +// cannot move them. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query func New(tp elastictransport.Interface) *UpdateByQuery { r := &UpdateByQuery{ transport: tp, @@ -103,8 +367,6 @@ func New(tp elastictransport.Interface) *UpdateByQuery { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -316,8 +578,8 @@ func (r *UpdateByQuery) Header(key, value string) *UpdateByQuery { return r } -// Index Comma-separated list of data streams, indices, and aliases to search. -// Supports wildcards (`*`). +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). // To search all data streams or indices, omit this parameter or use `*` or // `_all`. // API Name: index @@ -340,7 +602,9 @@ func (r *UpdateByQuery) AllowNoIndices(allownoindices bool) *UpdateByQuery { return r } -// Analyzer Analyzer to use for the query string. +// Analyzer The analyzer to use for the query string. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: analyzer func (r *UpdateByQuery) Analyzer(analyzer string) *UpdateByQuery { r.values.Set("analyzer", analyzer) @@ -349,6 +613,8 @@ func (r *UpdateByQuery) Analyzer(analyzer string) *UpdateByQuery { } // AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: analyze_wildcard func (r *UpdateByQuery) AnalyzeWildcard(analyzewildcard bool) *UpdateByQuery { r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) @@ -357,6 +623,8 @@ func (r *UpdateByQuery) AnalyzeWildcard(analyzewildcard bool) *UpdateByQuery { } // DefaultOperator The default operator for query string query: `AND` or `OR`. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: default_operator func (r *UpdateByQuery) DefaultOperator(defaultoperator operator.Operator) *UpdateByQuery { r.values.Set("default_operator", defaultoperator.String()) @@ -364,7 +632,10 @@ func (r *UpdateByQuery) DefaultOperator(defaultoperator operator.Operator) *Upda return r } -// Df Field to use as default where no field prefix is given in the query string. +// Df The field to use as default where no field prefix is given in the query +// string. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: df func (r *UpdateByQuery) Df(df string) *UpdateByQuery { r.values.Set("df", df) @@ -372,10 +643,10 @@ func (r *UpdateByQuery) Df(df string) *UpdateByQuery { return r } -// ExpandWildcards Type of index that wildcard patterns can match. +// ExpandWildcards The type of index that wildcard patterns can match. // If the request can target data streams, this argument determines whether // wildcard expressions match hidden data streams. -// Supports comma-separated values, such as `open,hidden`. +// It supports comma-separated values, such as `open,hidden`. // Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards func (r *UpdateByQuery) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *UpdateByQuery { @@ -407,6 +678,8 @@ func (r *UpdateByQuery) IgnoreUnavailable(ignoreunavailable bool) *UpdateByQuery // Lenient If `true`, format-based query failures (such as providing text to a numeric // field) in the query string will be ignored. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: lenient func (r *UpdateByQuery) Lenient(lenient bool) *UpdateByQuery { r.values.Set("lenient", strconv.FormatBool(lenient)) @@ -414,7 +687,7 @@ func (r *UpdateByQuery) Lenient(lenient bool) *UpdateByQuery { return r } -// Pipeline ID of the pipeline to use to preprocess incoming documents. +// Pipeline The ID of the pipeline to use to preprocess incoming documents. // If the index has a default ingest pipeline specified, then setting the value // to `_none` disables the default ingest pipeline for this request. // If a final pipeline is configured it will always run, regardless of the value @@ -426,8 +699,8 @@ func (r *UpdateByQuery) Pipeline(pipeline string) *UpdateByQuery { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// It is random by default. // API name: preference func (r *UpdateByQuery) Preference(preference string) *UpdateByQuery { r.values.Set("preference", preference) @@ -435,7 +708,7 @@ func (r *UpdateByQuery) Preference(preference string) *UpdateByQuery { return r } -// Q Query in the Lucene query string syntax. +// Q A query in the Lucene query string syntax. // API name: q func (r *UpdateByQuery) Q(q string) *UpdateByQuery { r.values.Set("q", q) @@ -444,7 +717,9 @@ func (r *UpdateByQuery) Q(q string) *UpdateByQuery { } // Refresh If `true`, Elasticsearch refreshes affected shards to make the operation -// visible to search. +// visible to search after the request completes. +// This is different than the update API's `refresh` parameter, which causes +// just the shard that received the request to be refreshed. // API name: refresh func (r *UpdateByQuery) Refresh(refresh bool) *UpdateByQuery { r.values.Set("refresh", strconv.FormatBool(refresh)) @@ -453,6 +728,7 @@ func (r *UpdateByQuery) Refresh(refresh bool) *UpdateByQuery { } // RequestCache If `true`, the request cache is used for this request. +// It defaults to the index-level setting. // API name: request_cache func (r *UpdateByQuery) RequestCache(requestcache bool) *UpdateByQuery { r.values.Set("request_cache", strconv.FormatBool(requestcache)) @@ -468,7 +744,7 @@ func (r *UpdateByQuery) RequestsPerSecond(requestspersecond string) *UpdateByQue return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *UpdateByQuery) Routing(routing string) *UpdateByQuery { r.values.Set("routing", routing) @@ -476,7 +752,7 @@ func (r *UpdateByQuery) Routing(routing string) *UpdateByQuery { return r } -// Scroll Period to retain the search context for scrolling. +// Scroll The period to retain the search context for scrolling. // API name: scroll func (r *UpdateByQuery) Scroll(duration string) *UpdateByQuery { r.values.Set("scroll", duration) @@ -484,7 +760,7 @@ func (r *UpdateByQuery) Scroll(duration string) *UpdateByQuery { return r } -// ScrollSize Size of the scroll request that powers the operation. +// ScrollSize The size of the scroll request that powers the operation. // API name: scroll_size func (r *UpdateByQuery) ScrollSize(scrollsize string) *UpdateByQuery { r.values.Set("scroll_size", scrollsize) @@ -492,7 +768,8 @@ func (r *UpdateByQuery) ScrollSize(scrollsize string) *UpdateByQuery { return r } -// SearchTimeout Explicit timeout for each search request. +// SearchTimeout An explicit timeout for each search request. +// By default, there is no timeout. // API name: search_timeout func (r *UpdateByQuery) SearchTimeout(duration string) *UpdateByQuery { r.values.Set("search_timeout", duration) @@ -500,8 +777,8 @@ func (r *UpdateByQuery) SearchTimeout(duration string) *UpdateByQuery { return r } -// SearchType The type of the search operation. Available options: `query_then_fetch`, -// `dfs_query_then_fetch`. +// SearchType The type of the search operation. Available options include +// `query_then_fetch` and `dfs_query_then_fetch`. // API name: search_type func (r *UpdateByQuery) SearchType(searchtype searchtype.SearchType) *UpdateByQuery { r.values.Set("search_type", searchtype.String()) @@ -529,7 +806,7 @@ func (r *UpdateByQuery) Sort(sorts ...string) *UpdateByQuery { return r } -// Stats Specific `tag` of the request for logging and statistical purposes. +// Stats The specific `tag` of the request for logging and statistical purposes. // API name: stats func (r *UpdateByQuery) Stats(stats ...string) *UpdateByQuery { tmp := []string{} @@ -541,10 +818,11 @@ func (r *UpdateByQuery) Stats(stats ...string) *UpdateByQuery { return r } -// TerminateAfter Maximum number of documents to collect for each shard. +// TerminateAfter The maximum number of documents to collect for each shard. // If a query reaches this limit, Elasticsearch terminates the query early. // Elasticsearch collects documents before sorting. -// Use with caution. +// +// IMPORTANT: Use with caution. // Elasticsearch applies this parameter to each shard handling the request. // When possible, let Elasticsearch perform early termination automatically. // Avoid specifying this parameter for requests that target data streams with @@ -556,8 +834,11 @@ func (r *UpdateByQuery) TerminateAfter(terminateafter string) *UpdateByQuery { return r } -// Timeout Period each update request waits for the following operations: dynamic +// Timeout The period each update request waits for the following operations: dynamic // mapping updates, waiting for active shards. +// By default, it is one minute. +// This guarantees Elasticsearch waits for at least the timeout before failing. +// The actual wait time could be longer, particularly when multiple waits occur. // API name: timeout func (r *UpdateByQuery) Timeout(duration string) *UpdateByQuery { r.values.Set("timeout", duration) @@ -586,6 +867,9 @@ func (r *UpdateByQuery) VersionType(versiontype bool) *UpdateByQuery { // operation. // Set to `all` or any positive integer up to the total number of shards in the // index (`number_of_replicas+1`). +// The `timeout` parameter controls how long each write request waits for +// unavailable shards to become available. +// Both work exactly the way they work in the bulk API. // API name: wait_for_active_shards func (r *UpdateByQuery) WaitForActiveShards(waitforactiveshards string) *UpdateByQuery { r.values.Set("wait_for_active_shards", waitforactiveshards) @@ -594,6 +878,11 @@ func (r *UpdateByQuery) WaitForActiveShards(waitforactiveshards string) *UpdateB } // WaitForCompletion If `true`, the request blocks until the operation is complete. +// If `false`, Elasticsearch performs some preflight checks, launches the +// request, and returns a task ID that you can use to cancel or get the status +// of the task. +// Elasticsearch creates a record of this task as a document at +// `.tasks/task/${taskId}`. // API name: wait_for_completion func (r *UpdateByQuery) WaitForCompletion(waitforcompletion bool) *UpdateByQuery { r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) @@ -645,47 +934,67 @@ func (r *UpdateByQuery) Pretty(pretty bool) *UpdateByQuery { return r } -// Conflicts What to do if update by query hits version conflicts: `abort` or `proceed`. +// The preferred behavior when update by query hits version conflicts: `abort` +// or `proceed`. // API name: conflicts func (r *UpdateByQuery) Conflicts(conflicts conflicts.Conflicts) *UpdateByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Conflicts = &conflicts - return r } -// MaxDocs The maximum number of documents to update. +// The maximum number of documents to update. // API name: max_docs func (r *UpdateByQuery) MaxDocs(maxdocs int64) *UpdateByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxDocs = &maxdocs return r } -// Query Specifies the documents to update using the Query DSL. +// The documents to update using the Query DSL. // API name: query -func (r *UpdateByQuery) Query(query *types.Query) *UpdateByQuery { +func (r *UpdateByQuery) Query(query types.QueryVariant) *UpdateByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// Script The script to run to update the document source or metadata when updating. +// The script to run to update the document source or metadata when updating. // API name: script -func (r *UpdateByQuery) Script(script *types.Script) *UpdateByQuery { +func (r *UpdateByQuery) Script(script types.ScriptVariant) *UpdateByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Script = script + r.req.Script = script.ScriptCaster() return r } -// Slice Slice the request manually using the provided slice ID and total number of +// Slice the request manually using the provided slice ID and total number of // slices. // API name: slice -func (r *UpdateByQuery) Slice(slice *types.SlicedScroll) *UpdateByQuery { +func (r *UpdateByQuery) Slice(slice types.SlicedScrollVariant) *UpdateByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Slice = slice + r.req.Slice = slice.SlicedScrollCaster() return r } diff --git a/typedapi/core/updatebyqueryrethrottle/response.go b/typedapi/core/updatebyqueryrethrottle/response.go index 7b4c2d00db..9bca05361a 100644 --- a/typedapi/core/updatebyqueryrethrottle/response.go +++ b/typedapi/core/updatebyqueryrethrottle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatebyqueryrethrottle @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatebyqueryrethrottle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/update_by_query_rethrottle/UpdateByQueryRethrottleResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/update_by_query_rethrottle/UpdateByQueryRethrottleResponse.ts#L23-L25 type Response struct { Nodes map[string]types.UpdateByQueryRethrottleNode `json:"nodes"` } diff --git a/typedapi/core/updatebyqueryrethrottle/update_by_query_rethrottle.go b/typedapi/core/updatebyqueryrethrottle/update_by_query_rethrottle.go index ab445327fe..b4bc49ffc8 100644 --- a/typedapi/core/updatebyqueryrethrottle/update_by_query_rethrottle.go +++ b/typedapi/core/updatebyqueryrethrottle/update_by_query_rethrottle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Throttle an update by query operation. // @@ -90,7 +90,7 @@ func NewUpdateByQueryRethrottleFunc(tp elastictransport.Interface) NewUpdateByQu // rethrotting that slows down the query takes effect after completing the // current batch to prevent scroll timeouts. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle func New(tp elastictransport.Interface) *UpdateByQueryRethrottle { r := &UpdateByQueryRethrottle{ transport: tp, @@ -312,6 +312,7 @@ func (r *UpdateByQueryRethrottle) _taskid(taskid string) *UpdateByQueryRethrottl } // RequestsPerSecond The throttle for this request in sub-requests per second. +// To turn off throttling, set it to `-1`. // API name: requests_per_second func (r *UpdateByQueryRethrottle) RequestsPerSecond(requestspersecond string) *UpdateByQueryRethrottle { r.values.Set("requests_per_second", requestspersecond) diff --git a/typedapi/danglingindices/deletedanglingindex/delete_dangling_index.go b/typedapi/danglingindices/deletedanglingindex/delete_dangling_index.go index eb4e2b73ed..789de19a30 100644 --- a/typedapi/danglingindices/deletedanglingindex/delete_dangling_index.go +++ b/typedapi/danglingindices/deletedanglingindex/delete_dangling_index.go @@ -16,10 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete a dangling index. -// // If Elasticsearch encounters index data that is absent from the current // cluster state, those indices are considered to be dangling. // For example, this can happen if you delete more than @@ -83,14 +82,13 @@ func NewDeleteDanglingIndexFunc(tp elastictransport.Interface) NewDeleteDangling } // Delete a dangling index. -// // If Elasticsearch encounters index data that is absent from the current // cluster state, those indices are considered to be dangling. // For example, this can happen if you delete more than // `cluster.indices.tombstones.size` indices while an Elasticsearch node is // offline. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index func New(tp elastictransport.Interface) *DeleteDanglingIndex { r := &DeleteDanglingIndex{ transport: tp, diff --git a/typedapi/danglingindices/deletedanglingindex/response.go b/typedapi/danglingindices/deletedanglingindex/response.go index f448689ae0..ce28766926 100644 --- a/typedapi/danglingindices/deletedanglingindex/response.go +++ b/typedapi/danglingindices/deletedanglingindex/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletedanglingindex // Response holds the response body struct for the package deletedanglingindex // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/dangling_indices/delete_dangling_index/DeleteDanglingIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/dangling_indices/delete_dangling_index/DeleteDanglingIndexResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/danglingindices/importdanglingindex/import_dangling_index.go b/typedapi/danglingindices/importdanglingindex/import_dangling_index.go index 88b8c34e41..6493f0181f 100644 --- a/typedapi/danglingindices/importdanglingindex/import_dangling_index.go +++ b/typedapi/danglingindices/importdanglingindex/import_dangling_index.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Import a dangling index. // @@ -90,7 +90,7 @@ func NewImportDanglingIndexFunc(tp elastictransport.Interface) NewImportDangling // `cluster.indices.tombstones.size` indices while an Elasticsearch node is // offline. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index func New(tp elastictransport.Interface) *ImportDanglingIndex { r := &ImportDanglingIndex{ transport: tp, diff --git a/typedapi/danglingindices/importdanglingindex/response.go b/typedapi/danglingindices/importdanglingindex/response.go index c1a9f58990..9d2c0dda1f 100644 --- a/typedapi/danglingindices/importdanglingindex/response.go +++ b/typedapi/danglingindices/importdanglingindex/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package importdanglingindex // Response holds the response body struct for the package importdanglingindex // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/dangling_indices/import_dangling_index/ImportDanglingIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/dangling_indices/import_dangling_index/ImportDanglingIndexResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/danglingindices/listdanglingindices/list_dangling_indices.go b/typedapi/danglingindices/listdanglingindices/list_dangling_indices.go index 3402422599..02c271dc5c 100644 --- a/typedapi/danglingindices/listdanglingindices/list_dangling_indices.go +++ b/typedapi/danglingindices/listdanglingindices/list_dangling_indices.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get the dangling indices. // @@ -86,7 +86,7 @@ func NewListDanglingIndicesFunc(tp elastictransport.Interface) NewListDanglingIn // // Use this API to list dangling indices, which you can then import or delete. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices func New(tp elastictransport.Interface) *ListDanglingIndices { r := &ListDanglingIndices{ transport: tp, diff --git a/typedapi/danglingindices/listdanglingindices/response.go b/typedapi/danglingindices/listdanglingindices/response.go index 6e36ec6417..c70b51db71 100644 --- a/typedapi/danglingindices/listdanglingindices/response.go +++ b/typedapi/danglingindices/listdanglingindices/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package listdanglingindices @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package listdanglingindices // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponse.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponse.ts#L23-L27 type Response struct { DanglingIndices []types.DanglingIndex `json:"dangling_indices"` } diff --git a/typedapi/enrich/deletepolicy/delete_policy.go b/typedapi/enrich/deletepolicy/delete_policy.go index 05caadf22b..af61a27972 100644 --- a/typedapi/enrich/deletepolicy/delete_policy.go +++ b/typedapi/enrich/deletepolicy/delete_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete an enrich policy. // Deletes an existing enrich policy and its enrich index. @@ -80,7 +80,7 @@ func NewDeletePolicyFunc(tp elastictransport.Interface) NewDeletePolicy { // Delete an enrich policy. // Deletes an existing enrich policy and its enrich index. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-enrich-policy-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy func New(tp elastictransport.Interface) *DeletePolicy { r := &DeletePolicy{ transport: tp, @@ -301,6 +301,14 @@ func (r *DeletePolicy) _name(name string) *DeletePolicy { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *DeletePolicy) MasterTimeout(duration string) *DeletePolicy { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/enrich/deletepolicy/response.go b/typedapi/enrich/deletepolicy/response.go index fd35d037bc..a7d6a97ab2 100644 --- a/typedapi/enrich/deletepolicy/response.go +++ b/typedapi/enrich/deletepolicy/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletepolicy // Response holds the response body struct for the package deletepolicy // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/enrich/delete_policy/DeleteEnrichPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/enrich/delete_policy/DeleteEnrichPolicyResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/enrich/executepolicy/execute_policy.go b/typedapi/enrich/executepolicy/execute_policy.go index 3915b55bd3..23d7f83f30 100644 --- a/typedapi/enrich/executepolicy/execute_policy.go +++ b/typedapi/enrich/executepolicy/execute_policy.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Creates the enrich index for an existing enrich policy. +// Run an enrich policy. +// Create the enrich index for an existing enrich policy. package executepolicy import ( @@ -76,9 +77,10 @@ func NewExecutePolicyFunc(tp elastictransport.Interface) NewExecutePolicy { } } -// Creates the enrich index for an existing enrich policy. +// Run an enrich policy. +// Create the enrich index for an existing enrich policy. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/execute-enrich-policy-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy func New(tp elastictransport.Interface) *ExecutePolicy { r := &ExecutePolicy{ transport: tp, @@ -301,6 +303,14 @@ func (r *ExecutePolicy) _name(name string) *ExecutePolicy { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *ExecutePolicy) MasterTimeout(duration string) *ExecutePolicy { + r.values.Set("master_timeout", duration) + + return r +} + // WaitForCompletion If `true`, the request blocks other enrich policy execution requests until // complete. // API name: wait_for_completion diff --git a/typedapi/enrich/executepolicy/response.go b/typedapi/enrich/executepolicy/response.go index 8985130def..e255faed4c 100644 --- a/typedapi/enrich/executepolicy/response.go +++ b/typedapi/enrich/executepolicy/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package executepolicy @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package executepolicy // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/enrich/execute_policy/ExecuteEnrichPolicyResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/enrich/execute_policy/ExecuteEnrichPolicyResponse.ts#L23-L28 type Response struct { Status *types.ExecuteEnrichPolicyStatus `json:"status,omitempty"` TaskId types.TaskId `json:"task_id,omitempty"` diff --git a/typedapi/enrich/getpolicy/get_policy.go b/typedapi/enrich/getpolicy/get_policy.go index 95b50c4d67..15ce33fb78 100644 --- a/typedapi/enrich/getpolicy/get_policy.go +++ b/typedapi/enrich/getpolicy/get_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get an enrich policy. // Returns information about an enrich policy. @@ -78,7 +78,7 @@ func NewGetPolicyFunc(tp elastictransport.Interface) NewGetPolicy { // Get an enrich policy. // Returns information about an enrich policy. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-enrich-policy-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy func New(tp elastictransport.Interface) *GetPolicy { r := &GetPolicy{ transport: tp, @@ -307,6 +307,14 @@ func (r *GetPolicy) Name(name string) *GetPolicy { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *GetPolicy) MasterTimeout(duration string) *GetPolicy { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/enrich/getpolicy/response.go b/typedapi/enrich/getpolicy/response.go index a7ec36323f..14143cc31e 100644 --- a/typedapi/enrich/getpolicy/response.go +++ b/typedapi/enrich/getpolicy/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getpolicy @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/enrich/get_policy/GetEnrichPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/enrich/get_policy/GetEnrichPolicyResponse.ts#L22-L24 type Response struct { Policies []types.Summary `json:"policies"` } diff --git a/typedapi/enrich/putpolicy/put_policy.go b/typedapi/enrich/putpolicy/put_policy.go index 702abf2d5f..a82c794ad1 100644 --- a/typedapi/enrich/putpolicy/put_policy.go +++ b/typedapi/enrich/putpolicy/put_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create an enrich policy. // Creates an enrich policy. @@ -85,7 +85,7 @@ func NewPutPolicyFunc(tp elastictransport.Interface) NewPutPolicy { // Create an enrich policy. // Creates an enrich policy. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-enrich-policy-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy func New(tp elastictransport.Interface) *PutPolicy { r := &PutPolicy{ transport: tp, @@ -93,8 +93,6 @@ func New(tp elastictransport.Interface) *PutPolicy { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -317,6 +315,14 @@ func (r *PutPolicy) _name(name string) *PutPolicy { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *PutPolicy) MasterTimeout(duration string) *PutPolicy { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -361,30 +367,42 @@ func (r *PutPolicy) Pretty(pretty bool) *PutPolicy { return r } -// GeoMatch Matches enrich data to incoming documents based on a `geo_shape` query. +// Matches enrich data to incoming documents based on a `geo_shape` query. // API name: geo_match -func (r *PutPolicy) GeoMatch(geomatch *types.EnrichPolicy) *PutPolicy { +func (r *PutPolicy) GeoMatch(geomatch types.EnrichPolicyVariant) *PutPolicy { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.GeoMatch = geomatch + r.req.GeoMatch = geomatch.EnrichPolicyCaster() return r } -// Match Matches enrich data to incoming documents based on a `term` query. +// Matches enrich data to incoming documents based on a `term` query. // API name: match -func (r *PutPolicy) Match(match *types.EnrichPolicy) *PutPolicy { +func (r *PutPolicy) Match(match types.EnrichPolicyVariant) *PutPolicy { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Match = match + r.req.Match = match.EnrichPolicyCaster() return r } -// Range Matches a number, date, or IP address in incoming documents to a range in the +// Matches a number, date, or IP address in incoming documents to a range in the // enrich index based on a `term` query. // API name: range -func (r *PutPolicy) Range(range_ *types.EnrichPolicy) *PutPolicy { +func (r *PutPolicy) Range(range_ types.EnrichPolicyVariant) *PutPolicy { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Range = range_ + r.req.Range = range_.EnrichPolicyCaster() return r } diff --git a/typedapi/enrich/putpolicy/request.go b/typedapi/enrich/putpolicy/request.go index 41475ee08a..2a89a97ae6 100644 --- a/typedapi/enrich/putpolicy/request.go +++ b/typedapi/enrich/putpolicy/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putpolicy @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package putpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/enrich/put_policy/PutEnrichPolicyRequest.ts#L24-L53 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/enrich/put_policy/PutEnrichPolicyRequest.ts#L25-L67 type Request struct { // GeoMatch Matches enrich data to incoming documents based on a `geo_shape` query. diff --git a/typedapi/enrich/putpolicy/response.go b/typedapi/enrich/putpolicy/response.go index 90fe00cace..46b3c01c68 100644 --- a/typedapi/enrich/putpolicy/response.go +++ b/typedapi/enrich/putpolicy/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putpolicy // Response holds the response body struct for the package putpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/enrich/put_policy/PutEnrichPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/enrich/put_policy/PutEnrichPolicyResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/enrich/stats/response.go b/typedapi/enrich/stats/response.go index 128300a3cc..8e13aa2e15 100644 --- a/typedapi/enrich/stats/response.go +++ b/typedapi/enrich/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package stats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/enrich/stats/EnrichStatsResponse.ts#L22-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/enrich/stats/EnrichStatsResponse.ts#L22-L39 type Response struct { // CacheStats Objects containing information about the enrich cache stats on each ingest diff --git a/typedapi/enrich/stats/stats.go b/typedapi/enrich/stats/stats.go index c90a8acb5e..f2e8e5ad6d 100644 --- a/typedapi/enrich/stats/stats.go +++ b/typedapi/enrich/stats/stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get enrich stats. // Returns enrich coordinator statistics and information about enrich policies @@ -74,7 +74,7 @@ func NewStatsFunc(tp elastictransport.Interface) NewStats { // Returns enrich coordinator statistics and information about enrich policies // that are currently executing. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/enrich-stats-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats func New(tp elastictransport.Interface) *Stats { r := &Stats{ transport: tp, @@ -280,6 +280,14 @@ func (r *Stats) Header(key, value string) *Stats { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Stats) MasterTimeout(duration string) *Stats { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/eql/delete/delete.go b/typedapi/eql/delete/delete.go index c6983cadfb..1b880fddc8 100644 --- a/typedapi/eql/delete/delete.go +++ b/typedapi/eql/delete/delete.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes an async EQL search or a stored synchronous EQL search. +// Delete an async EQL search. +// Delete an async EQL search or a stored synchronous EQL search. // The API also deletes results for the search. package delete @@ -77,10 +78,11 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { } } -// Deletes an async EQL search or a stored synchronous EQL search. +// Delete an async EQL search. +// Delete an async EQL search or a stored synchronous EQL search. // The API also deletes results for the search. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete func New(tp elastictransport.Interface) *Delete { r := &Delete{ transport: tp, diff --git a/typedapi/eql/delete/response.go b/typedapi/eql/delete/response.go index 1011edb631..065582faf4 100644 --- a/typedapi/eql/delete/response.go +++ b/typedapi/eql/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/eql/delete/EqlDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/eql/delete/EqlDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/eql/get/get.go b/typedapi/eql/get/get.go index 0dd118cff0..69c8a53437 100644 --- a/typedapi/eql/get/get.go +++ b/typedapi/eql/get/get.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns the current status and available results for an async EQL search or a +// Get async EQL search results. +// Get the current status and available results for an async EQL search or a // stored synchronous EQL search. package get @@ -77,10 +78,11 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { } } -// Returns the current status and available results for an async EQL search or a +// Get async EQL search results. +// Get the current status and available results for an async EQL search or a // stored synchronous EQL search. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-eql-search-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get func New(tp elastictransport.Interface) *Get { r := &Get{ transport: tp, diff --git a/typedapi/eql/get/response.go b/typedapi/eql/get/response.go index f33aad01de..4dbda14d77 100644 --- a/typedapi/eql/get/response.go +++ b/typedapi/eql/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package get @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/eql/get/EqlGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/eql/get/EqlGetResponse.ts#L22-L24 type Response struct { // Hits Contains matching events and sequences. Also contains related metadata. @@ -37,6 +37,9 @@ type Response struct { IsPartial *bool `json:"is_partial,omitempty"` // IsRunning If true, the search request is still executing. IsRunning *bool `json:"is_running,omitempty"` + // ShardFailures Contains information about shard failures (if any), in case + // allow_partial_search_results=true + ShardFailures []types.ShardFailure `json:"shard_failures,omitempty"` // TimedOut If true, the request timed out before completion. TimedOut *bool `json:"timed_out,omitempty"` // Took Milliseconds it took Elasticsearch to execute the request. diff --git a/typedapi/eql/getstatus/get_status.go b/typedapi/eql/getstatus/get_status.go index 4c6e3658cd..22d6b1cdf2 100644 --- a/typedapi/eql/getstatus/get_status.go +++ b/typedapi/eql/getstatus/get_status.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns the current status for an async EQL search or a stored synchronous -// EQL search without returning results. +// Get the async EQL status. +// Get the current status for an async EQL search or a stored synchronous EQL +// search without returning results. package getstatus import ( @@ -77,10 +78,11 @@ func NewGetStatusFunc(tp elastictransport.Interface) NewGetStatus { } } -// Returns the current status for an async EQL search or a stored synchronous -// EQL search without returning results. +// Get the async EQL status. +// Get the current status for an async EQL search or a stored synchronous EQL +// search without returning results. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-eql-status-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status func New(tp elastictransport.Interface) *GetStatus { r := &GetStatus{ transport: tp, diff --git a/typedapi/eql/getstatus/response.go b/typedapi/eql/getstatus/response.go index b14417edf0..18b075a7da 100644 --- a/typedapi/eql/getstatus/response.go +++ b/typedapi/eql/getstatus/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getstatus // Response holds the response body struct for the package getstatus // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/eql/get_status/EqlGetStatusResponse.ts#L24-L51 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/eql/get_status/EqlGetStatusResponse.ts#L24-L51 type Response struct { // CompletionStatus For a completed search shows the http status code of the completed search. diff --git a/typedapi/eql/search/request.go b/typedapi/eql/search/request.go index b2df8499c7..48dd935dfc 100644 --- a/typedapi/eql/search/request.go +++ b/typedapi/eql/search/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package search @@ -34,9 +34,23 @@ import ( // Request holds the request body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/eql/search/EqlSearchRequest.ts#L28-L125 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/eql/search/EqlSearchRequest.ts#L28-L161 type Request struct { - CaseSensitive *bool `json:"case_sensitive,omitempty"` + + // AllowPartialSearchResults Allow query execution also in case of shard failures. + // If true, the query will keep running and will return results based on the + // available shards. + // For sequences, the behavior can be further refined using + // allow_partial_sequence_results + AllowPartialSearchResults *bool `json:"allow_partial_search_results,omitempty"` + // AllowPartialSequenceResults This flag applies only to sequences and has effect only if + // allow_partial_search_results=true. + // If true, the sequence query will return results based on the available + // shards, ignoring the others. + // If false, the sequence query will return successfully, but will always have + // empty results. + AllowPartialSequenceResults *bool `json:"allow_partial_sequence_results,omitempty"` + CaseSensitive *bool `json:"case_sensitive,omitempty"` // EventCategoryField Field containing the event classification, such as process, file, or network. EventCategoryField *string `json:"event_category_field,omitempty"` // FetchSize Maximum number of events to search at a time for sequence queries. @@ -102,6 +116,34 @@ func (s *Request) UnmarshalJSON(data []byte) error { switch t { + case "allow_partial_search_results": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowPartialSearchResults", err) + } + s.AllowPartialSearchResults = &value + case bool: + s.AllowPartialSearchResults = &v + } + + case "allow_partial_sequence_results": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowPartialSequenceResults", err) + } + s.AllowPartialSequenceResults = &value + case bool: + s.AllowPartialSequenceResults = &v + } + case "case_sensitive": var tmp any dec.Decode(&tmp) diff --git a/typedapi/eql/search/response.go b/typedapi/eql/search/response.go index 16f8f4d9bb..04341e856f 100644 --- a/typedapi/eql/search/response.go +++ b/typedapi/eql/search/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package search @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/eql/search/EqlSearchResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/eql/search/EqlSearchResponse.ts#L22-L24 type Response struct { // Hits Contains matching events and sequences. Also contains related metadata. @@ -37,6 +37,9 @@ type Response struct { IsPartial *bool `json:"is_partial,omitempty"` // IsRunning If true, the search request is still executing. IsRunning *bool `json:"is_running,omitempty"` + // ShardFailures Contains information about shard failures (if any), in case + // allow_partial_search_results=true + ShardFailures []types.ShardFailure `json:"shard_failures,omitempty"` // TimedOut If true, the request timed out before completion. TimedOut *bool `json:"timed_out,omitempty"` // Took Milliseconds it took Elasticsearch to execute the request. diff --git a/typedapi/eql/search/search.go b/typedapi/eql/search/search.go index a4b01facbb..099bf82491 100644 --- a/typedapi/eql/search/search.go +++ b/typedapi/eql/search/search.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns results matching a query expressed in Event Query Language (EQL) +// Get EQL search results. +// Returns search results for an Event Query Language (EQL) query. +// EQL assumes each document in a data stream or index corresponds to an event. package search import ( @@ -83,9 +85,11 @@ func NewSearchFunc(tp elastictransport.Interface) NewSearch { } } -// Returns results matching a query expressed in Event Query Language (EQL) +// Get EQL search results. +// Returns search results for an Event Query Language (EQL) query. +// EQL assumes each document in a data stream or index corresponds to an event. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search func New(tp elastictransport.Interface) *Search { r := &Search{ transport: tp, @@ -93,8 +97,6 @@ func New(tp elastictransport.Interface) *Search { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -387,77 +389,159 @@ func (r *Search) Pretty(pretty bool) *Search { return r } +// Allow query execution also in case of shard failures. +// If true, the query will keep running and will return results based on the +// available shards. +// For sequences, the behavior can be further refined using +// allow_partial_sequence_results +// API name: allow_partial_search_results +func (r *Search) AllowPartialSearchResults(allowpartialsearchresults bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowPartialSearchResults = &allowpartialsearchresults + + return r +} + +// This flag applies only to sequences and has effect only if +// allow_partial_search_results=true. +// If true, the sequence query will return results based on the available +// shards, ignoring the others. +// If false, the sequence query will return successfully, but will always have +// empty results. +// API name: allow_partial_sequence_results +func (r *Search) AllowPartialSequenceResults(allowpartialsequenceresults bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowPartialSequenceResults = &allowpartialsequenceresults + + return r +} + // API name: case_sensitive func (r *Search) CaseSensitive(casesensitive bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.CaseSensitive = &casesensitive return r } -// EventCategoryField Field containing the event classification, such as process, file, or network. +// Field containing the event classification, such as process, file, or network. // API name: event_category_field func (r *Search) EventCategoryField(field string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.EventCategoryField = &field return r } -// FetchSize Maximum number of events to search at a time for sequence queries. +// Maximum number of events to search at a time for sequence queries. // API name: fetch_size func (r *Search) FetchSize(fetchsize uint) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.FetchSize = &fetchsize return r } -// Fields Array of wildcard (*) patterns. The response returns values for field names +// Array of wildcard (*) patterns. The response returns values for field names // matching these patterns in the fields property of each hit. // API name: fields -func (r *Search) Fields(fields ...types.FieldAndFormat) *Search { - r.req.Fields = fields +func (r *Search) Fields(fields ...types.FieldAndFormatVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Fields = make([]types.FieldAndFormat, len(fields)) + for i, v := range fields { + r.req.Fields[i] = *v.FieldAndFormatCaster() + } return r } -// Filter Query, written in Query DSL, used to filter the events on which the EQL query +// Query, written in Query DSL, used to filter the events on which the EQL query // runs. // API name: filter -func (r *Search) Filter(filters ...types.Query) *Search { - r.req.Filter = filters +func (r *Search) Filter(filters ...types.QueryVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Filter = make([]types.Query, len(filters)) + for i, v := range filters { + r.req.Filter[i] = *v.QueryCaster() + } return r } // API name: keep_alive -func (r *Search) KeepAlive(duration types.Duration) *Search { - r.req.KeepAlive = duration +func (r *Search) KeepAlive(duration types.DurationVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.KeepAlive = *duration.DurationCaster() return r } // API name: keep_on_completion func (r *Search) KeepOnCompletion(keeponcompletion bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.KeepOnCompletion = &keeponcompletion return r } -// MaxSamplesPerKey By default, the response of a sample query contains up to `10` samples, with +// By default, the response of a sample query contains up to `10` samples, with // one sample per unique set of join keys. Use the `size` // parameter to get a smaller or larger set of samples. To retrieve more than // one sample per set of join keys, use the // `max_samples_per_key` parameter. Pipes are not supported for sample queries. // API name: max_samples_per_key func (r *Search) MaxSamplesPerKey(maxsamplesperkey int) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxSamplesPerKey = &maxsamplesperkey return r } -// Query EQL query you wish to run. +// EQL query you wish to run. // API name: query func (r *Search) Query(query string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Query = query @@ -466,47 +550,74 @@ func (r *Search) Query(query string) *Search { // API name: result_position func (r *Search) ResultPosition(resultposition resultposition.ResultPosition) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ResultPosition = &resultposition - return r } // API name: runtime_mappings -func (r *Search) RuntimeMappings(runtimefields types.RuntimeFields) *Search { - r.req.RuntimeMappings = runtimefields +func (r *Search) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } -// Size For basic queries, the maximum number of matching events to return. Defaults +// For basic queries, the maximum number of matching events to return. Defaults // to 10 // API name: size func (r *Search) Size(size uint) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Size = &size return r } -// TiebreakerField Field used to sort hits with the same timestamp in ascending order +// Field used to sort hits with the same timestamp in ascending order // API name: tiebreaker_field func (r *Search) TiebreakerField(field string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TiebreakerField = &field return r } -// TimestampField Field containing event timestamp. Default "@timestamp" +// Field containing event timestamp. Default "@timestamp" // API name: timestamp_field func (r *Search) TimestampField(field string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TimestampField = &field return r } // API name: wait_for_completion_timeout -func (r *Search) WaitForCompletionTimeout(duration types.Duration) *Search { - r.req.WaitForCompletionTimeout = duration +func (r *Search) WaitForCompletionTimeout(duration types.DurationVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.WaitForCompletionTimeout = *duration.DurationCaster() return r } diff --git a/typedapi/esdsl/access.go b/typedapi/esdsl/access.go new file mode 100644 index 0000000000..2c0a8229a8 --- /dev/null +++ b/typedapi/esdsl/access.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _access struct { + v *types.Access +} + +func NewAccess() *_access { + + return &_access{v: types.NewAccess()} + +} + +// A list of indices permission entries for cross-cluster replication. +func (s *_access) Replication(replications ...types.ReplicationAccessVariant) *_access { + + for _, v := range replications { + + s.v.Replication = append(s.v.Replication, *v.ReplicationAccessCaster()) + + } + return s +} + +// A list of indices permission entries for cross-cluster search. +func (s *_access) Search(searches ...types.SearchAccessVariant) *_access { + + for _, v := range searches { + + s.v.Search = append(s.v.Search, *v.SearchAccessCaster()) + + } + return s +} + +func (s *_access) AccessCaster() *types.Access { + return s.v +} diff --git a/typedapi/esdsl/acknowledgestate.go b/typedapi/esdsl/acknowledgestate.go new file mode 100644 index 0000000000..26b38444b4 --- /dev/null +++ b/typedapi/esdsl/acknowledgestate.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/acknowledgementoptions" +) + +type _acknowledgeState struct { + v *types.AcknowledgeState +} + +func NewAcknowledgeState(state acknowledgementoptions.AcknowledgementOptions) *_acknowledgeState { + + tmp := &_acknowledgeState{v: types.NewAcknowledgeState()} + + tmp.State(state) + + return tmp + +} + +func (s *_acknowledgeState) State(state acknowledgementoptions.AcknowledgementOptions) *_acknowledgeState { + + s.v.State = state + return s +} + +func (s *_acknowledgeState) Timestamp(datetime types.DateTimeVariant) *_acknowledgeState { + + s.v.Timestamp = *datetime.DateTimeCaster() + + return s +} + +func (s *_acknowledgeState) AcknowledgeStateCaster() *types.AcknowledgeState { + return s.v +} diff --git a/typedapi/esdsl/actionstatus.go b/typedapi/esdsl/actionstatus.go new file mode 100644 index 0000000000..ca91b52c5d --- /dev/null +++ b/typedapi/esdsl/actionstatus.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _actionStatus struct { + v *types.ActionStatus +} + +func NewActionStatus(ack types.AcknowledgeStateVariant) *_actionStatus { + + tmp := &_actionStatus{v: types.NewActionStatus()} + + tmp.Ack(ack) + + return tmp + +} + +func (s *_actionStatus) Ack(ack types.AcknowledgeStateVariant) *_actionStatus { + + s.v.Ack = *ack.AcknowledgeStateCaster() + + return s +} + +func (s *_actionStatus) LastExecution(lastexecution types.ExecutionStateVariant) *_actionStatus { + + s.v.LastExecution = lastexecution.ExecutionStateCaster() + + return s +} + +func (s *_actionStatus) LastSuccessfulExecution(lastsuccessfulexecution types.ExecutionStateVariant) *_actionStatus { + + s.v.LastSuccessfulExecution = lastsuccessfulexecution.ExecutionStateCaster() + + return s +} + +func (s *_actionStatus) LastThrottle(lastthrottle types.ThrottleStateVariant) *_actionStatus { + + s.v.LastThrottle = lastthrottle.ThrottleStateCaster() + + return s +} + +func (s *_actionStatus) ActionStatusCaster() *types.ActionStatus { + return s.v +} diff --git a/typedapi/esdsl/activationstate.go b/typedapi/esdsl/activationstate.go new file mode 100644 index 0000000000..b6c9538983 --- /dev/null +++ b/typedapi/esdsl/activationstate.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _activationState struct { + v *types.ActivationState +} + +func NewActivationState(active bool) *_activationState { + + tmp := &_activationState{v: types.NewActivationState()} + + tmp.Active(active) + + return tmp + +} + +func (s *_activationState) Active(active bool) *_activationState { + + s.v.Active = active + + return s +} + +func (s *_activationState) Timestamp(datetime types.DateTimeVariant) *_activationState { + + s.v.Timestamp = *datetime.DateTimeCaster() + + return s +} + +func (s *_activationState) ActivationStateCaster() *types.ActivationState { + return s.v +} diff --git a/typedapi/esdsl/adaptiveallocationssettings.go b/typedapi/esdsl/adaptiveallocationssettings.go new file mode 100644 index 0000000000..b128183101 --- /dev/null +++ b/typedapi/esdsl/adaptiveallocationssettings.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _adaptiveAllocationsSettings struct { + v *types.AdaptiveAllocationsSettings +} + +func NewAdaptiveAllocationsSettings(enabled bool) *_adaptiveAllocationsSettings { + + tmp := &_adaptiveAllocationsSettings{v: types.NewAdaptiveAllocationsSettings()} + + tmp.Enabled(enabled) + + return tmp + +} + +// If true, adaptive_allocations is enabled +func (s *_adaptiveAllocationsSettings) Enabled(enabled bool) *_adaptiveAllocationsSettings { + + s.v.Enabled = enabled + + return s +} + +// Specifies the maximum number of allocations to scale to. +// If set, it must be greater than or equal to min_number_of_allocations. +func (s *_adaptiveAllocationsSettings) MaxNumberOfAllocations(maxnumberofallocations int) *_adaptiveAllocationsSettings { + + s.v.MaxNumberOfAllocations = &maxnumberofallocations + + return s +} + +// Specifies the minimum number of allocations to scale to. +// If set, it must be greater than or equal to 0. +// If not defined, the deployment scales to 0. +func (s *_adaptiveAllocationsSettings) MinNumberOfAllocations(minnumberofallocations int) *_adaptiveAllocationsSettings { + + s.v.MinNumberOfAllocations = &minnumberofallocations + + return s +} + +func (s *_adaptiveAllocationsSettings) AdaptiveAllocationsSettingsCaster() *types.AdaptiveAllocationsSettings { + return s.v +} diff --git a/typedapi/esdsl/addaction.go b/typedapi/esdsl/addaction.go new file mode 100644 index 0000000000..f26a20586f --- /dev/null +++ b/typedapi/esdsl/addaction.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _addAction struct { + v *types.AddAction +} + +// Adds a data stream or index to an alias. +// If the alias doesn’t exist, the `add` action creates it. +func NewAddAction() *_addAction { + + return &_addAction{v: types.NewAddAction()} + +} + +// Alias for the action. +// Index alias names support date math. +func (s *_addAction) Alias(indexalias string) *_addAction { + + s.v.Alias = &indexalias + + return s +} + +// Aliases for the action. +// Index alias names support date math. +func (s *_addAction) Aliases(aliases ...string) *_addAction { + + s.v.Aliases = make([]string, len(aliases)) + s.v.Aliases = aliases + + return s +} + +// Query used to limit documents the alias can access. +func (s *_addAction) Filter(filter types.QueryVariant) *_addAction { + + s.v.Filter = filter.QueryCaster() + + return s +} + +// Data stream or index for the action. +// Supports wildcards (`*`). +func (s *_addAction) Index(indexname string) *_addAction { + + s.v.Index = &indexname + + return s +} + +// Value used to route indexing operations to a specific shard. +// If specified, this overwrites the `routing` value for indexing operations. +// Data stream aliases don’t support this parameter. +func (s *_addAction) IndexRouting(routing string) *_addAction { + + s.v.IndexRouting = &routing + + return s +} + +// Data streams or indices for the action. +// Supports wildcards (`*`). +func (s *_addAction) Indices(indices ...string) *_addAction { + + s.v.Indices = indices + + return s +} + +// If `true`, the alias is hidden. +func (s *_addAction) IsHidden(ishidden bool) *_addAction { + + s.v.IsHidden = &ishidden + + return s +} + +// If `true`, sets the write index or data stream for the alias. +func (s *_addAction) IsWriteIndex(iswriteindex bool) *_addAction { + + s.v.IsWriteIndex = &iswriteindex + + return s +} + +// If `true`, the alias must exist to perform the action. +func (s *_addAction) MustExist(mustexist bool) *_addAction { + + s.v.MustExist = &mustexist + + return s +} + +// Value used to route indexing and search operations to a specific shard. +// Data stream aliases don’t support this parameter. +func (s *_addAction) Routing(routing string) *_addAction { + + s.v.Routing = &routing + + return s +} + +// Value used to route search operations to a specific shard. +// If specified, this overwrites the `routing` value for search operations. +// Data stream aliases don’t support this parameter. +func (s *_addAction) SearchRouting(routing string) *_addAction { + + s.v.SearchRouting = &routing + + return s +} + +func (s *_addAction) IndicesActionCaster() *types.IndicesAction { + container := types.NewIndicesAction() + + container.Add = s.v + + return container +} + +func (s *_addAction) AddActionCaster() *types.AddAction { + return s.v +} diff --git a/typedapi/esdsl/adjacencymatrixaggregation.go b/typedapi/esdsl/adjacencymatrixaggregation.go new file mode 100644 index 0000000000..eed3604b2c --- /dev/null +++ b/typedapi/esdsl/adjacencymatrixaggregation.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _adjacencyMatrixAggregation struct { + v *types.AdjacencyMatrixAggregation +} + +// A bucket aggregation returning a form of adjacency matrix. +// The request provides a collection of named filter expressions, similar to the +// `filters` aggregation. +// Each bucket in the response represents a non-empty cell in the matrix of +// intersecting filters. +func NewAdjacencyMatrixAggregation() *_adjacencyMatrixAggregation { + + return &_adjacencyMatrixAggregation{v: types.NewAdjacencyMatrixAggregation()} + +} + +// Filters used to create buckets. +// At least one filter is required. +func (s *_adjacencyMatrixAggregation) Filters(filters map[string]types.Query) *_adjacencyMatrixAggregation { + + s.v.Filters = filters + return s +} + +func (s *_adjacencyMatrixAggregation) AddFilter(key string, value types.QueryVariant) *_adjacencyMatrixAggregation { + + var tmp map[string]types.Query + if s.v.Filters == nil { + s.v.Filters = make(map[string]types.Query) + } else { + tmp = s.v.Filters + } + + tmp[key] = *value.QueryCaster() + + s.v.Filters = tmp + return s +} + +// Separator used to concatenate filter names. Defaults to &. +func (s *_adjacencyMatrixAggregation) Separator(separator string) *_adjacencyMatrixAggregation { + + s.v.Separator = &separator + + return s +} + +func (s *_adjacencyMatrixAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.AdjacencyMatrix = s.v + + return container +} + +func (s *_adjacencyMatrixAggregation) AdjacencyMatrixAggregationCaster() *types.AdjacencyMatrixAggregation { + return s.v +} diff --git a/typedapi/esdsl/aggregatemetricdoubleproperty.go b/typedapi/esdsl/aggregatemetricdoubleproperty.go new file mode 100644 index 0000000000..b145a0de6e --- /dev/null +++ b/typedapi/esdsl/aggregatemetricdoubleproperty.go @@ -0,0 +1,160 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" +) + +type _aggregateMetricDoubleProperty struct { + v *types.AggregateMetricDoubleProperty +} + +func NewAggregateMetricDoubleProperty(defaultmetric string) *_aggregateMetricDoubleProperty { + + tmp := &_aggregateMetricDoubleProperty{v: types.NewAggregateMetricDoubleProperty()} + + tmp.DefaultMetric(defaultmetric) + + return tmp + +} + +func (s *_aggregateMetricDoubleProperty) DefaultMetric(defaultmetric string) *_aggregateMetricDoubleProperty { + + s.v.DefaultMetric = defaultmetric + + return s +} + +func (s *_aggregateMetricDoubleProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_aggregateMetricDoubleProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_aggregateMetricDoubleProperty) Fields(fields map[string]types.Property) *_aggregateMetricDoubleProperty { + + s.v.Fields = fields + return s +} + +func (s *_aggregateMetricDoubleProperty) AddField(key string, value types.PropertyVariant) *_aggregateMetricDoubleProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_aggregateMetricDoubleProperty) IgnoreAbove(ignoreabove int) *_aggregateMetricDoubleProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +// Metadata about the field. +func (s *_aggregateMetricDoubleProperty) Meta(meta map[string]string) *_aggregateMetricDoubleProperty { + + s.v.Meta = meta + return s +} + +func (s *_aggregateMetricDoubleProperty) AddMeta(key string, value string) *_aggregateMetricDoubleProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_aggregateMetricDoubleProperty) Metrics(metrics ...string) *_aggregateMetricDoubleProperty { + + for _, v := range metrics { + + s.v.Metrics = append(s.v.Metrics, v) + + } + return s +} + +func (s *_aggregateMetricDoubleProperty) Properties(properties map[string]types.Property) *_aggregateMetricDoubleProperty { + + s.v.Properties = properties + return s +} + +func (s *_aggregateMetricDoubleProperty) AddProperty(key string, value types.PropertyVariant) *_aggregateMetricDoubleProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_aggregateMetricDoubleProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_aggregateMetricDoubleProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_aggregateMetricDoubleProperty) TimeSeriesMetric(timeseriesmetric timeseriesmetrictype.TimeSeriesMetricType) *_aggregateMetricDoubleProperty { + + s.v.TimeSeriesMetric = ×eriesmetric + return s +} + +func (s *_aggregateMetricDoubleProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_aggregateMetricDoubleProperty) AggregateMetricDoublePropertyCaster() *types.AggregateMetricDoubleProperty { + return s.v +} diff --git a/typedapi/esdsl/aggregateorder.go b/typedapi/esdsl/aggregateorder.go new file mode 100644 index 0000000000..55896a97cf --- /dev/null +++ b/typedapi/esdsl/aggregateorder.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" +) + +// This is provide all the types that are part of the union. +type _aggregateOrder struct { + v types.AggregateOrder +} + +func NewAggregateOrder() *_aggregateOrder { + return &_aggregateOrder{v: nil} +} + +func (u *_aggregateOrder) Map(value map[string]sortorder.SortOrder) *_aggregateOrder { // union map + + u.v = make(map[string]sortorder.SortOrder) + for k, v := range value { + u.v.(map[string]sortorder.SortOrder)[k] = v + } + + return u +} + +func (u *_aggregateOrder) SortOrders(sortorders ...map[string]sortorder.SortOrder) *_aggregateOrder { + + u.v = make([]map[string]sortorder.SortOrder, len(sortorders)) + u.v = sortorders + + return u +} + +func (u *_aggregateOrder) AggregateOrderCaster() *types.AggregateOrder { + return &u.v +} diff --git a/typedapi/esdsl/aggregateoutput.go b/typedapi/esdsl/aggregateoutput.go new file mode 100644 index 0000000000..9c37966d21 --- /dev/null +++ b/typedapi/esdsl/aggregateoutput.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _aggregateOutput struct { + v *types.AggregateOutput +} + +func NewAggregateOutput() *_aggregateOutput { + + return &_aggregateOutput{v: types.NewAggregateOutput()} + +} + +func (s *_aggregateOutput) Exponent(exponent types.WeightsVariant) *_aggregateOutput { + + s.v.Exponent = exponent.WeightsCaster() + + return s +} + +func (s *_aggregateOutput) LogisticRegression(logisticregression types.WeightsVariant) *_aggregateOutput { + + s.v.LogisticRegression = logisticregression.WeightsCaster() + + return s +} + +func (s *_aggregateOutput) WeightedMode(weightedmode types.WeightsVariant) *_aggregateOutput { + + s.v.WeightedMode = weightedmode.WeightsCaster() + + return s +} + +func (s *_aggregateOutput) WeightedSum(weightedsum types.WeightsVariant) *_aggregateOutput { + + s.v.WeightedSum = weightedsum.WeightsCaster() + + return s +} + +func (s *_aggregateOutput) AggregateOutputCaster() *types.AggregateOutput { + return s.v +} diff --git a/typedapi/types/inferenceresult.go b/typedapi/esdsl/aggregationrange.go similarity index 50% rename from typedapi/types/inferenceresult.go rename to typedapi/esdsl/aggregationrange.go index 830d9038b7..34468fd507 100644 --- a/typedapi/types/inferenceresult.go +++ b/typedapi/esdsl/aggregationrange.go @@ -16,24 +16,46 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -package types +package esdsl -// InferenceResult type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/_types/Results.ts#L79-L89 -type InferenceResult struct { - Completion []CompletionResult `json:"completion,omitempty"` - Rerank []RankedDocument `json:"rerank,omitempty"` - SparseEmbedding []SparseEmbeddingResult `json:"sparse_embedding,omitempty"` - TextEmbedding []TextEmbeddingResult `json:"text_embedding,omitempty"` - TextEmbeddingBytes []TextEmbeddingByteResult `json:"text_embedding_bytes,omitempty"` +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _aggregationRange struct { + v *types.AggregationRange } -// NewInferenceResult returns a InferenceResult. -func NewInferenceResult() *InferenceResult { - r := &InferenceResult{} +func NewAggregationRange() *_aggregationRange { + + return &_aggregationRange{v: types.NewAggregationRange()} + +} + +// Start of the range (inclusive). +func (s *_aggregationRange) From(from types.Float64) *_aggregationRange { + + s.v.From = &from + + return s +} + +// Custom key to return the range with. +func (s *_aggregationRange) Key(key string) *_aggregationRange { + + s.v.Key = &key + + return s +} + +// End of the range (exclusive). +func (s *_aggregationRange) To(to types.Float64) *_aggregationRange { + + s.v.To = &to + + return s +} - return r +func (s *_aggregationRange) AggregationRangeCaster() *types.AggregationRange { + return s.v } diff --git a/typedapi/esdsl/aggregations.go b/typedapi/esdsl/aggregations.go new file mode 100644 index 0000000000..df76dc809c --- /dev/null +++ b/typedapi/esdsl/aggregations.go @@ -0,0 +1,806 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _aggregations struct { + v *types.Aggregations +} + +func NewAggregations() *_aggregations { + return &_aggregations{v: types.NewAggregations()} +} + +// AdditionalAggregationsProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_aggregations) AdditionalAggregationsProperty(key string, value json.RawMessage) *_aggregations { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalAggregationsProperty = tmp + return s +} + +// A bucket aggregation returning a form of adjacency matrix. +// The request provides a collection of named filter expressions, similar to the +// `filters` aggregation. +// Each bucket in the response represents a non-empty cell in the matrix of +// intersecting filters. +func (s *_aggregations) AdjacencyMatrix(adjacencymatrix types.AdjacencyMatrixAggregationVariant) *_aggregations { + + s.v.AdjacencyMatrix = adjacencymatrix.AdjacencyMatrixAggregationCaster() + + return s +} + +// Sub-aggregations for this aggregation. +// Only applies to bucket aggregations. +func (s *_aggregations) Aggregations(aggregations map[string]types.Aggregations) *_aggregations { + + s.v.Aggregations = aggregations + return s +} + +func (s *_aggregations) AddAggregation(key string, value types.AggregationsVariant) *_aggregations { + + var tmp map[string]types.Aggregations + if s.v.Aggregations == nil { + s.v.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = s.v.Aggregations + } + + tmp[key] = *value.AggregationsCaster() + + s.v.Aggregations = tmp + return s +} + +// A multi-bucket aggregation similar to the date histogram, except instead of +// providing an interval to use as the width of each bucket, a target number of +// buckets is provided. +func (s *_aggregations) AutoDateHistogram(autodatehistogram types.AutoDateHistogramAggregationVariant) *_aggregations { + + s.v.AutoDateHistogram = autodatehistogram.AutoDateHistogramAggregationCaster() + + return s +} + +// A single-value metrics aggregation that computes the average of numeric +// values that are extracted from the aggregated documents. +func (s *_aggregations) Avg(avg types.AverageAggregationVariant) *_aggregations { + + s.v.Avg = avg.AverageAggregationCaster() + + return s +} + +// A sibling pipeline aggregation which calculates the mean value of a specified +// metric in a sibling aggregation. +// The specified metric must be numeric and the sibling aggregation must be a +// multi-bucket aggregation. +func (s *_aggregations) AvgBucket(avgbucket types.AverageBucketAggregationVariant) *_aggregations { + + s.v.AvgBucket = avgbucket.AverageBucketAggregationCaster() + + return s +} + +// A metrics aggregation that computes a box plot of numeric values extracted +// from the aggregated documents. +func (s *_aggregations) Boxplot(boxplot types.BoxplotAggregationVariant) *_aggregations { + + s.v.Boxplot = boxplot.BoxplotAggregationCaster() + + return s +} + +// A sibling pipeline aggregation which runs a correlation function on the +// configured sibling multi-bucket aggregation. +func (s *_aggregations) BucketCorrelation(bucketcorrelation types.BucketCorrelationAggregationVariant) *_aggregations { + + s.v.BucketCorrelation = bucketcorrelation.BucketCorrelationAggregationCaster() + + return s +} + +// A sibling pipeline aggregation which runs a two sample Kolmogorov–Smirnov +// test ("K-S test") against a provided distribution and the distribution +// implied by the documents counts in the configured sibling aggregation. +func (s *_aggregations) BucketCountKsTest(bucketcountkstest types.BucketKsAggregationVariant) *_aggregations { + + s.v.BucketCountKsTest = bucketcountkstest.BucketKsAggregationCaster() + + return s +} + +// A parent pipeline aggregation which runs a script which can perform per +// bucket computations on metrics in the parent multi-bucket aggregation. +func (s *_aggregations) BucketScript(bucketscript types.BucketScriptAggregationVariant) *_aggregations { + + s.v.BucketScript = bucketscript.BucketScriptAggregationCaster() + + return s +} + +// A parent pipeline aggregation which runs a script to determine whether the +// current bucket will be retained in the parent multi-bucket aggregation. +func (s *_aggregations) BucketSelector(bucketselector types.BucketSelectorAggregationVariant) *_aggregations { + + s.v.BucketSelector = bucketselector.BucketSelectorAggregationCaster() + + return s +} + +// A parent pipeline aggregation which sorts the buckets of its parent +// multi-bucket aggregation. +func (s *_aggregations) BucketSort(bucketsort types.BucketSortAggregationVariant) *_aggregations { + + s.v.BucketSort = bucketsort.BucketSortAggregationCaster() + + return s +} + +// A single-value metrics aggregation that calculates an approximate count of +// distinct values. +func (s *_aggregations) Cardinality(cardinality types.CardinalityAggregationVariant) *_aggregations { + + s.v.Cardinality = cardinality.CardinalityAggregationCaster() + + return s +} + +// A multi-bucket aggregation that groups semi-structured text into buckets. +func (s *_aggregations) CategorizeText(categorizetext types.CategorizeTextAggregationVariant) *_aggregations { + + s.v.CategorizeText = categorizetext.CategorizeTextAggregationCaster() + + return s +} + +// A single bucket aggregation that selects child documents that have the +// specified type, as defined in a `join` field. +func (s *_aggregations) Children(children types.ChildrenAggregationVariant) *_aggregations { + + s.v.Children = children.ChildrenAggregationCaster() + + return s +} + +// A multi-bucket aggregation that creates composite buckets from different +// sources. +// Unlike the other multi-bucket aggregations, you can use the `composite` +// aggregation to paginate *all* buckets from a multi-level aggregation +// efficiently. +func (s *_aggregations) Composite(composite types.CompositeAggregationVariant) *_aggregations { + + s.v.Composite = composite.CompositeAggregationCaster() + + return s +} + +// A parent pipeline aggregation which calculates the cumulative cardinality in +// a parent `histogram` or `date_histogram` aggregation. +func (s *_aggregations) CumulativeCardinality(cumulativecardinality types.CumulativeCardinalityAggregationVariant) *_aggregations { + + s.v.CumulativeCardinality = cumulativecardinality.CumulativeCardinalityAggregationCaster() + + return s +} + +// A parent pipeline aggregation which calculates the cumulative sum of a +// specified metric in a parent `histogram` or `date_histogram` aggregation. +func (s *_aggregations) CumulativeSum(cumulativesum types.CumulativeSumAggregationVariant) *_aggregations { + + s.v.CumulativeSum = cumulativesum.CumulativeSumAggregationCaster() + + return s +} + +// A multi-bucket values source based aggregation that can be applied on date +// values or date range values extracted from the documents. +// It dynamically builds fixed size (interval) buckets over the values. +func (s *_aggregations) DateHistogram(datehistogram types.DateHistogramAggregationVariant) *_aggregations { + + s.v.DateHistogram = datehistogram.DateHistogramAggregationCaster() + + return s +} + +// A multi-bucket value source based aggregation that enables the user to define +// a set of date ranges - each representing a bucket. +func (s *_aggregations) DateRange(daterange types.DateRangeAggregationVariant) *_aggregations { + + s.v.DateRange = daterange.DateRangeAggregationCaster() + + return s +} + +// A parent pipeline aggregation which calculates the derivative of a specified +// metric in a parent `histogram` or `date_histogram` aggregation. +func (s *_aggregations) Derivative(derivative types.DerivativeAggregationVariant) *_aggregations { + + s.v.Derivative = derivative.DerivativeAggregationCaster() + + return s +} + +// A filtering aggregation used to limit any sub aggregations' processing to a +// sample of the top-scoring documents. +// Similar to the `sampler` aggregation, but adds the ability to limit the +// number of matches that share a common value. +func (s *_aggregations) DiversifiedSampler(diversifiedsampler types.DiversifiedSamplerAggregationVariant) *_aggregations { + + s.v.DiversifiedSampler = diversifiedsampler.DiversifiedSamplerAggregationCaster() + + return s +} + +// A multi-value metrics aggregation that computes stats over numeric values +// extracted from the aggregated documents. +func (s *_aggregations) ExtendedStats(extendedstats types.ExtendedStatsAggregationVariant) *_aggregations { + + s.v.ExtendedStats = extendedstats.ExtendedStatsAggregationCaster() + + return s +} + +// A sibling pipeline aggregation which calculates a variety of stats across all +// bucket of a specified metric in a sibling aggregation. +func (s *_aggregations) ExtendedStatsBucket(extendedstatsbucket types.ExtendedStatsBucketAggregationVariant) *_aggregations { + + s.v.ExtendedStatsBucket = extendedstatsbucket.ExtendedStatsBucketAggregationCaster() + + return s +} + +// A single bucket aggregation that narrows the set of documents to those that +// match a query. +func (s *_aggregations) Filter(filter types.QueryVariant) *_aggregations { + + s.v.Filter = filter.QueryCaster() + + return s +} + +// A multi-bucket aggregation where each bucket contains the documents that +// match a query. +func (s *_aggregations) Filters(filters types.FiltersAggregationVariant) *_aggregations { + + s.v.Filters = filters.FiltersAggregationCaster() + + return s +} + +// A bucket aggregation which finds frequent item sets, a form of association +// rules mining that identifies items that often occur together. +func (s *_aggregations) FrequentItemSets(frequentitemsets types.FrequentItemSetsAggregationVariant) *_aggregations { + + s.v.FrequentItemSets = frequentitemsets.FrequentItemSetsAggregationCaster() + + return s +} + +// A metric aggregation that computes the geographic bounding box containing all +// values for a Geopoint or Geoshape field. +func (s *_aggregations) GeoBounds(geobounds types.GeoBoundsAggregationVariant) *_aggregations { + + s.v.GeoBounds = geobounds.GeoBoundsAggregationCaster() + + return s +} + +// A metric aggregation that computes the weighted centroid from all coordinate +// values for geo fields. +func (s *_aggregations) GeoCentroid(geocentroid types.GeoCentroidAggregationVariant) *_aggregations { + + s.v.GeoCentroid = geocentroid.GeoCentroidAggregationCaster() + + return s +} + +// A multi-bucket aggregation that works on `geo_point` fields. +// Evaluates the distance of each document value from an origin point and +// determines the buckets it belongs to, based on ranges defined in the request. +func (s *_aggregations) GeoDistance(geodistance types.GeoDistanceAggregationVariant) *_aggregations { + + s.v.GeoDistance = geodistance.GeoDistanceAggregationCaster() + + return s +} + +// Aggregates all `geo_point` values within a bucket into a `LineString` ordered +// by the chosen sort field. +func (s *_aggregations) GeoLine(geoline types.GeoLineAggregationVariant) *_aggregations { + + s.v.GeoLine = geoline.GeoLineAggregationCaster() + + return s +} + +// A multi-bucket aggregation that groups `geo_point` and `geo_shape` values +// into buckets that represent a grid. +// Each cell is labeled using a geohash which is of user-definable precision. +func (s *_aggregations) GeohashGrid(geohashgrid types.GeoHashGridAggregationVariant) *_aggregations { + + s.v.GeohashGrid = geohashgrid.GeoHashGridAggregationCaster() + + return s +} + +// A multi-bucket aggregation that groups `geo_point` and `geo_shape` values +// into buckets that represent a grid. +// Each cell corresponds to a H3 cell index and is labeled using the H3Index +// representation. +func (s *_aggregations) GeohexGrid(geohexgrid types.GeohexGridAggregationVariant) *_aggregations { + + s.v.GeohexGrid = geohexgrid.GeohexGridAggregationCaster() + + return s +} + +// A multi-bucket aggregation that groups `geo_point` and `geo_shape` values +// into buckets that represent a grid. +// Each cell corresponds to a map tile as used by many online map sites. +func (s *_aggregations) GeotileGrid(geotilegrid types.GeoTileGridAggregationVariant) *_aggregations { + + s.v.GeotileGrid = geotilegrid.GeoTileGridAggregationCaster() + + return s +} + +// Defines a single bucket of all the documents within the search execution +// context. +// This context is defined by the indices and the document types you’re +// searching on, but is not influenced by the search query itself. +func (s *_aggregations) Global(global types.GlobalAggregationVariant) *_aggregations { + + s.v.Global = global.GlobalAggregationCaster() + + return s +} + +// A multi-bucket values source based aggregation that can be applied on numeric +// values or numeric range values extracted from the documents. +// It dynamically builds fixed size (interval) buckets over the values. +func (s *_aggregations) Histogram(histogram types.HistogramAggregationVariant) *_aggregations { + + s.v.Histogram = histogram.HistogramAggregationCaster() + + return s +} + +// A parent pipeline aggregation which loads a pre-trained model and performs +// inference on the collated result fields from the parent bucket aggregation. +func (s *_aggregations) Inference(inference types.InferenceAggregationVariant) *_aggregations { + + s.v.Inference = inference.InferenceAggregationCaster() + + return s +} + +// A bucket aggregation that groups documents based on the network or +// sub-network of an IP address. +func (s *_aggregations) IpPrefix(ipprefix types.IpPrefixAggregationVariant) *_aggregations { + + s.v.IpPrefix = ipprefix.IpPrefixAggregationCaster() + + return s +} + +// A multi-bucket value source based aggregation that enables the user to define +// a set of IP ranges - each representing a bucket. +func (s *_aggregations) IpRange(iprange types.IpRangeAggregationVariant) *_aggregations { + + s.v.IpRange = iprange.IpRangeAggregationCaster() + + return s +} + +func (s *_aggregations) Line(line types.GeoLineAggregationVariant) *_aggregations { + + s.v.Line = line.GeoLineAggregationCaster() + + return s +} + +// A numeric aggregation that computes the following statistics over a set of +// document fields: `count`, `mean`, `variance`, `skewness`, `kurtosis`, +// `covariance`, and `covariance`. +func (s *_aggregations) MatrixStats(matrixstats types.MatrixStatsAggregationVariant) *_aggregations { + + s.v.MatrixStats = matrixstats.MatrixStatsAggregationCaster() + + return s +} + +// A single-value metrics aggregation that returns the maximum value among the +// numeric values extracted from the aggregated documents. +func (s *_aggregations) Max(max types.MaxAggregationVariant) *_aggregations { + + s.v.Max = max.MaxAggregationCaster() + + return s +} + +// A sibling pipeline aggregation which identifies the bucket(s) with the +// maximum value of a specified metric in a sibling aggregation and outputs both +// the value and the key(s) of the bucket(s). +func (s *_aggregations) MaxBucket(maxbucket types.MaxBucketAggregationVariant) *_aggregations { + + s.v.MaxBucket = maxbucket.MaxBucketAggregationCaster() + + return s +} + +// A single-value aggregation that approximates the median absolute deviation of +// its search results. +func (s *_aggregations) MedianAbsoluteDeviation(medianabsolutedeviation types.MedianAbsoluteDeviationAggregationVariant) *_aggregations { + + s.v.MedianAbsoluteDeviation = medianabsolutedeviation.MedianAbsoluteDeviationAggregationCaster() + + return s +} + +func (s *_aggregations) Meta(metadata types.MetadataVariant) *_aggregations { + + s.v.Meta = *metadata.MetadataCaster() + + return s +} + +// A single-value metrics aggregation that returns the minimum value among +// numeric values extracted from the aggregated documents. +func (s *_aggregations) Min(min types.MinAggregationVariant) *_aggregations { + + s.v.Min = min.MinAggregationCaster() + + return s +} + +// A sibling pipeline aggregation which identifies the bucket(s) with the +// minimum value of a specified metric in a sibling aggregation and outputs both +// the value and the key(s) of the bucket(s). +func (s *_aggregations) MinBucket(minbucket types.MinBucketAggregationVariant) *_aggregations { + + s.v.MinBucket = minbucket.MinBucketAggregationCaster() + + return s +} + +// A field data based single bucket aggregation, that creates a bucket of all +// documents in the current document set context that are missing a field value +// (effectively, missing a field or having the configured NULL value set). +func (s *_aggregations) Missing(missing types.MissingAggregationVariant) *_aggregations { + + s.v.Missing = missing.MissingAggregationCaster() + + return s +} + +func (s *_aggregations) MovingAvg(movingaverageaggregation types.MovingAverageAggregationVariant) *_aggregations { + + s.v.MovingAvg = *movingaverageaggregation.MovingAverageAggregationCaster() + + return s +} + +// Given an ordered series of data, "slides" a window across the data and runs a +// custom script on each window of data. +// For convenience, a number of common functions are predefined such as `min`, +// `max`, and moving averages. +func (s *_aggregations) MovingFn(movingfn types.MovingFunctionAggregationVariant) *_aggregations { + + s.v.MovingFn = movingfn.MovingFunctionAggregationCaster() + + return s +} + +// Given an ordered series of percentiles, "slides" a window across those +// percentiles and computes cumulative percentiles. +func (s *_aggregations) MovingPercentiles(movingpercentiles types.MovingPercentilesAggregationVariant) *_aggregations { + + s.v.MovingPercentiles = movingpercentiles.MovingPercentilesAggregationCaster() + + return s +} + +// A multi-bucket value source based aggregation where buckets are dynamically +// built - one per unique set of values. +func (s *_aggregations) MultiTerms(multiterms types.MultiTermsAggregationVariant) *_aggregations { + + s.v.MultiTerms = multiterms.MultiTermsAggregationCaster() + + return s +} + +// A special single bucket aggregation that enables aggregating nested +// documents. +func (s *_aggregations) Nested(nested types.NestedAggregationVariant) *_aggregations { + + s.v.Nested = nested.NestedAggregationCaster() + + return s +} + +// A parent pipeline aggregation which calculates the specific +// normalized/rescaled value for a specific bucket value. +func (s *_aggregations) Normalize(normalize types.NormalizeAggregationVariant) *_aggregations { + + s.v.Normalize = normalize.NormalizeAggregationCaster() + + return s +} + +// A special single bucket aggregation that selects parent documents that have +// the specified type, as defined in a `join` field. +func (s *_aggregations) Parent(parent types.ParentAggregationVariant) *_aggregations { + + s.v.Parent = parent.ParentAggregationCaster() + + return s +} + +// A multi-value metrics aggregation that calculates one or more percentile +// ranks over numeric values extracted from the aggregated documents. +func (s *_aggregations) PercentileRanks(percentileranks types.PercentileRanksAggregationVariant) *_aggregations { + + s.v.PercentileRanks = percentileranks.PercentileRanksAggregationCaster() + + return s +} + +// A multi-value metrics aggregation that calculates one or more percentiles +// over numeric values extracted from the aggregated documents. +func (s *_aggregations) Percentiles(percentiles types.PercentilesAggregationVariant) *_aggregations { + + s.v.Percentiles = percentiles.PercentilesAggregationCaster() + + return s +} + +// A sibling pipeline aggregation which calculates percentiles across all bucket +// of a specified metric in a sibling aggregation. +func (s *_aggregations) PercentilesBucket(percentilesbucket types.PercentilesBucketAggregationVariant) *_aggregations { + + s.v.PercentilesBucket = percentilesbucket.PercentilesBucketAggregationCaster() + + return s +} + +// A single bucket aggregation that randomly includes documents in the +// aggregated results. +// Sampling provides significant speed improvement at the cost of accuracy. +func (s *_aggregations) RandomSampler(randomsampler types.RandomSamplerAggregationVariant) *_aggregations { + + s.v.RandomSampler = randomsampler.RandomSamplerAggregationCaster() + + return s +} + +// A multi-bucket value source based aggregation that enables the user to define +// a set of ranges - each representing a bucket. +func (s *_aggregations) Range(range_ types.RangeAggregationVariant) *_aggregations { + + s.v.Range = range_.RangeAggregationCaster() + + return s +} + +// A multi-bucket value source based aggregation which finds "rare" terms — +// terms that are at the long-tail of the distribution and are not frequent. +func (s *_aggregations) RareTerms(rareterms types.RareTermsAggregationVariant) *_aggregations { + + s.v.RareTerms = rareterms.RareTermsAggregationCaster() + + return s +} + +// Calculates a rate of documents or a field in each bucket. +// Can only be used inside a `date_histogram` or `composite` aggregation. +func (s *_aggregations) Rate(rate types.RateAggregationVariant) *_aggregations { + + s.v.Rate = rate.RateAggregationCaster() + + return s +} + +// A special single bucket aggregation that enables aggregating on parent +// documents from nested documents. +// Should only be defined inside a `nested` aggregation. +func (s *_aggregations) ReverseNested(reversenested types.ReverseNestedAggregationVariant) *_aggregations { + + s.v.ReverseNested = reversenested.ReverseNestedAggregationCaster() + + return s +} + +// A filtering aggregation used to limit any sub aggregations' processing to a +// sample of the top-scoring documents. +func (s *_aggregations) Sampler(sampler types.SamplerAggregationVariant) *_aggregations { + + s.v.Sampler = sampler.SamplerAggregationCaster() + + return s +} + +// A metric aggregation that uses scripts to provide a metric output. +func (s *_aggregations) ScriptedMetric(scriptedmetric types.ScriptedMetricAggregationVariant) *_aggregations { + + s.v.ScriptedMetric = scriptedmetric.ScriptedMetricAggregationCaster() + + return s +} + +// An aggregation that subtracts values in a time series from themselves at +// different time lags or periods. +func (s *_aggregations) SerialDiff(serialdiff types.SerialDifferencingAggregationVariant) *_aggregations { + + s.v.SerialDiff = serialdiff.SerialDifferencingAggregationCaster() + + return s +} + +// Returns interesting or unusual occurrences of terms in a set. +func (s *_aggregations) SignificantTerms(significantterms types.SignificantTermsAggregationVariant) *_aggregations { + + s.v.SignificantTerms = significantterms.SignificantTermsAggregationCaster() + + return s +} + +// Returns interesting or unusual occurrences of free-text terms in a set. +func (s *_aggregations) SignificantText(significanttext types.SignificantTextAggregationVariant) *_aggregations { + + s.v.SignificantText = significanttext.SignificantTextAggregationCaster() + + return s +} + +// A multi-value metrics aggregation that computes stats over numeric values +// extracted from the aggregated documents. +func (s *_aggregations) Stats(stats types.StatsAggregationVariant) *_aggregations { + + s.v.Stats = stats.StatsAggregationCaster() + + return s +} + +// A sibling pipeline aggregation which calculates a variety of stats across all +// bucket of a specified metric in a sibling aggregation. +func (s *_aggregations) StatsBucket(statsbucket types.StatsBucketAggregationVariant) *_aggregations { + + s.v.StatsBucket = statsbucket.StatsBucketAggregationCaster() + + return s +} + +// A multi-value metrics aggregation that computes statistics over string values +// extracted from the aggregated documents. +func (s *_aggregations) StringStats(stringstats types.StringStatsAggregationVariant) *_aggregations { + + s.v.StringStats = stringstats.StringStatsAggregationCaster() + + return s +} + +// A single-value metrics aggregation that sums numeric values that are +// extracted from the aggregated documents. +func (s *_aggregations) Sum(sum types.SumAggregationVariant) *_aggregations { + + s.v.Sum = sum.SumAggregationCaster() + + return s +} + +// A sibling pipeline aggregation which calculates the sum of a specified metric +// across all buckets in a sibling aggregation. +func (s *_aggregations) SumBucket(sumbucket types.SumBucketAggregationVariant) *_aggregations { + + s.v.SumBucket = sumbucket.SumBucketAggregationCaster() + + return s +} + +// A metrics aggregation that performs a statistical hypothesis test in which +// the test statistic follows a Student’s t-distribution under the null +// hypothesis on numeric values extracted from the aggregated documents. +func (s *_aggregations) TTest(ttest types.TTestAggregationVariant) *_aggregations { + + s.v.TTest = ttest.TTestAggregationCaster() + + return s +} + +// A multi-bucket value source based aggregation where buckets are dynamically +// built - one per unique value. +func (s *_aggregations) Terms(terms types.TermsAggregationVariant) *_aggregations { + + s.v.Terms = terms.TermsAggregationCaster() + + return s +} + +// The time series aggregation queries data created using a time series index. +// This is typically data such as metrics or other data streams with a time +// component, and requires creating an index using the time series mode. +func (s *_aggregations) TimeSeries(timeseries types.TimeSeriesAggregationVariant) *_aggregations { + + s.v.TimeSeries = timeseries.TimeSeriesAggregationCaster() + + return s +} + +// A metric aggregation that returns the top matching documents per bucket. +func (s *_aggregations) TopHits(tophits types.TopHitsAggregationVariant) *_aggregations { + + s.v.TopHits = tophits.TopHitsAggregationCaster() + + return s +} + +// A metric aggregation that selects metrics from the document with the largest +// or smallest sort value. +func (s *_aggregations) TopMetrics(topmetrics types.TopMetricsAggregationVariant) *_aggregations { + + s.v.TopMetrics = topmetrics.TopMetricsAggregationCaster() + + return s +} + +// A single-value metrics aggregation that counts the number of values that are +// extracted from the aggregated documents. +func (s *_aggregations) ValueCount(valuecount types.ValueCountAggregationVariant) *_aggregations { + + s.v.ValueCount = valuecount.ValueCountAggregationCaster() + + return s +} + +// A multi-bucket aggregation similar to the histogram, except instead of +// providing an interval to use as the width of each bucket, a target number of +// buckets is provided. +func (s *_aggregations) VariableWidthHistogram(variablewidthhistogram types.VariableWidthHistogramAggregationVariant) *_aggregations { + + s.v.VariableWidthHistogram = variablewidthhistogram.VariableWidthHistogramAggregationCaster() + + return s +} + +// A single-value metrics aggregation that computes the weighted average of +// numeric values that are extracted from the aggregated documents. +func (s *_aggregations) WeightedAvg(weightedavg types.WeightedAverageAggregationVariant) *_aggregations { + + s.v.WeightedAvg = weightedavg.WeightedAverageAggregationCaster() + + return s +} + +func (s *_aggregations) AggregationsCaster() *types.Aggregations { + return s.v +} diff --git a/typedapi/esdsl/alias.go b/typedapi/esdsl/alias.go new file mode 100644 index 0000000000..28be0f6889 --- /dev/null +++ b/typedapi/esdsl/alias.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _alias struct { + v *types.Alias +} + +func NewAlias() *_alias { + + return &_alias{v: types.NewAlias()} + +} + +// Query used to limit documents the alias can access. +func (s *_alias) Filter(filter types.QueryVariant) *_alias { + + s.v.Filter = filter.QueryCaster() + + return s +} + +// Value used to route indexing operations to a specific shard. +// If specified, this overwrites the `routing` value for indexing operations. +func (s *_alias) IndexRouting(routing string) *_alias { + + s.v.IndexRouting = &routing + + return s +} + +// If `true`, the alias is hidden. +// All indices for the alias must have the same `is_hidden` value. +func (s *_alias) IsHidden(ishidden bool) *_alias { + + s.v.IsHidden = &ishidden + + return s +} + +// If `true`, the index is the write index for the alias. +func (s *_alias) IsWriteIndex(iswriteindex bool) *_alias { + + s.v.IsWriteIndex = &iswriteindex + + return s +} + +// Value used to route indexing and search operations to a specific shard. +func (s *_alias) Routing(routing string) *_alias { + + s.v.Routing = &routing + + return s +} + +// Value used to route search operations to a specific shard. +// If specified, this overwrites the `routing` value for search operations. +func (s *_alias) SearchRouting(routing string) *_alias { + + s.v.SearchRouting = &routing + + return s +} + +func (s *_alias) AliasCaster() *types.Alias { + return s.v +} diff --git a/typedapi/esdsl/aliasdefinition.go b/typedapi/esdsl/aliasdefinition.go new file mode 100644 index 0000000000..f4d12a4872 --- /dev/null +++ b/typedapi/esdsl/aliasdefinition.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _aliasDefinition struct { + v *types.AliasDefinition +} + +func NewAliasDefinition() *_aliasDefinition { + + return &_aliasDefinition{v: types.NewAliasDefinition()} + +} + +// Query used to limit documents the alias can access. +func (s *_aliasDefinition) Filter(filter types.QueryVariant) *_aliasDefinition { + + s.v.Filter = filter.QueryCaster() + + return s +} + +// Value used to route indexing operations to a specific shard. +// If specified, this overwrites the `routing` value for indexing operations. +func (s *_aliasDefinition) IndexRouting(indexrouting string) *_aliasDefinition { + + s.v.IndexRouting = &indexrouting + + return s +} + +// If `true`, the alias is hidden. +// All indices for the alias must have the same `is_hidden` value. +func (s *_aliasDefinition) IsHidden(ishidden bool) *_aliasDefinition { + + s.v.IsHidden = &ishidden + + return s +} + +// If `true`, the index is the write index for the alias. +func (s *_aliasDefinition) IsWriteIndex(iswriteindex bool) *_aliasDefinition { + + s.v.IsWriteIndex = &iswriteindex + + return s +} + +// Value used to route indexing and search operations to a specific shard. +func (s *_aliasDefinition) Routing(routing string) *_aliasDefinition { + + s.v.Routing = &routing + + return s +} + +// Value used to route search operations to a specific shard. +// If specified, this overwrites the `routing` value for search operations. +func (s *_aliasDefinition) SearchRouting(searchrouting string) *_aliasDefinition { + + s.v.SearchRouting = &searchrouting + + return s +} + +func (s *_aliasDefinition) AliasDefinitionCaster() *types.AliasDefinition { + return s.v +} diff --git a/typedapi/esdsl/allfield.go b/typedapi/esdsl/allfield.go new file mode 100644 index 0000000000..fd12cdf6e4 --- /dev/null +++ b/typedapi/esdsl/allfield.go @@ -0,0 +1,129 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _allField struct { + v *types.AllField +} + +func NewAllField(analyzer string, enabled bool, omitnorms bool, searchanalyzer string, similarity string, store bool, storetermvectoroffsets bool, storetermvectorpayloads bool, storetermvectorpositions bool, storetermvectors bool) *_allField { + + tmp := &_allField{v: types.NewAllField()} + + tmp.Analyzer(analyzer) + + tmp.Enabled(enabled) + + tmp.OmitNorms(omitnorms) + + tmp.SearchAnalyzer(searchanalyzer) + + tmp.Similarity(similarity) + + tmp.Store(store) + + tmp.StoreTermVectorOffsets(storetermvectoroffsets) + + tmp.StoreTermVectorPayloads(storetermvectorpayloads) + + tmp.StoreTermVectorPositions(storetermvectorpositions) + + tmp.StoreTermVectors(storetermvectors) + + return tmp + +} + +func (s *_allField) Analyzer(analyzer string) *_allField { + + s.v.Analyzer = analyzer + + return s +} + +func (s *_allField) Enabled(enabled bool) *_allField { + + s.v.Enabled = enabled + + return s +} + +func (s *_allField) OmitNorms(omitnorms bool) *_allField { + + s.v.OmitNorms = omitnorms + + return s +} + +func (s *_allField) SearchAnalyzer(searchanalyzer string) *_allField { + + s.v.SearchAnalyzer = searchanalyzer + + return s +} + +func (s *_allField) Similarity(similarity string) *_allField { + + s.v.Similarity = similarity + + return s +} + +func (s *_allField) Store(store bool) *_allField { + + s.v.Store = store + + return s +} + +func (s *_allField) StoreTermVectorOffsets(storetermvectoroffsets bool) *_allField { + + s.v.StoreTermVectorOffsets = storetermvectoroffsets + + return s +} + +func (s *_allField) StoreTermVectorPayloads(storetermvectorpayloads bool) *_allField { + + s.v.StoreTermVectorPayloads = storetermvectorpayloads + + return s +} + +func (s *_allField) StoreTermVectorPositions(storetermvectorpositions bool) *_allField { + + s.v.StoreTermVectorPositions = storetermvectorpositions + + return s +} + +func (s *_allField) StoreTermVectors(storetermvectors bool) *_allField { + + s.v.StoreTermVectors = storetermvectors + + return s +} + +func (s *_allField) AllFieldCaster() *types.AllField { + return s.v +} diff --git a/typedapi/esdsl/allocateaction.go b/typedapi/esdsl/allocateaction.go new file mode 100644 index 0000000000..bf61c2571a --- /dev/null +++ b/typedapi/esdsl/allocateaction.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _allocateAction struct { + v *types.AllocateAction +} + +func NewAllocateAction() *_allocateAction { + + return &_allocateAction{v: types.NewAllocateAction()} + +} + +func (s *_allocateAction) Exclude(exclude map[string]string) *_allocateAction { + + s.v.Exclude = exclude + return s +} + +func (s *_allocateAction) AddExclude(key string, value string) *_allocateAction { + + var tmp map[string]string + if s.v.Exclude == nil { + s.v.Exclude = make(map[string]string) + } else { + tmp = s.v.Exclude + } + + tmp[key] = value + + s.v.Exclude = tmp + return s +} + +func (s *_allocateAction) Include(include map[string]string) *_allocateAction { + + s.v.Include = include + return s +} + +func (s *_allocateAction) AddInclude(key string, value string) *_allocateAction { + + var tmp map[string]string + if s.v.Include == nil { + s.v.Include = make(map[string]string) + } else { + tmp = s.v.Include + } + + tmp[key] = value + + s.v.Include = tmp + return s +} + +func (s *_allocateAction) NumberOfReplicas(numberofreplicas int) *_allocateAction { + + s.v.NumberOfReplicas = &numberofreplicas + + return s +} + +func (s *_allocateAction) Require(require map[string]string) *_allocateAction { + + s.v.Require = require + return s +} + +func (s *_allocateAction) AddRequire(key string, value string) *_allocateAction { + + var tmp map[string]string + if s.v.Require == nil { + s.v.Require = make(map[string]string) + } else { + tmp = s.v.Require + } + + tmp[key] = value + + s.v.Require = tmp + return s +} + +func (s *_allocateAction) TotalShardsPerNode(totalshardspernode int) *_allocateAction { + + s.v.TotalShardsPerNode = &totalshardspernode + + return s +} + +func (s *_allocateAction) AllocateActionCaster() *types.AllocateAction { + return s.v +} diff --git a/typedapi/esdsl/alwayscondition.go b/typedapi/esdsl/alwayscondition.go new file mode 100644 index 0000000000..cfc9836b0a --- /dev/null +++ b/typedapi/esdsl/alwayscondition.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _alwaysCondition struct { + v *types.AlwaysCondition +} + +func NewAlwaysCondition() *_alwaysCondition { + + return &_alwaysCondition{v: types.NewAlwaysCondition()} + +} + +func (s *_alwaysCondition) WatcherConditionCaster() *types.WatcherCondition { + container := types.NewWatcherCondition() + + container.Always = s.v + + return container +} + +func (s *_alwaysCondition) AlwaysConditionCaster() *types.AlwaysCondition { + return s.v +} diff --git a/typedapi/esdsl/analysisconfig.go b/typedapi/esdsl/analysisconfig.go new file mode 100644 index 0000000000..c4ebd6cbec --- /dev/null +++ b/typedapi/esdsl/analysisconfig.go @@ -0,0 +1,190 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _analysisConfig struct { + v *types.AnalysisConfig +} + +func NewAnalysisConfig() *_analysisConfig { + + return &_analysisConfig{v: types.NewAnalysisConfig()} + +} + +// The size of the interval that the analysis is aggregated into, typically +// between `5m` and `1h`. This value should be either a whole number of days or +// equate to a +// whole number of buckets in one day. If the anomaly detection job uses a +// datafeed with aggregations, this value must also be divisible by the interval +// of the date histogram aggregation. +func (s *_analysisConfig) BucketSpan(duration types.DurationVariant) *_analysisConfig { + + s.v.BucketSpan = *duration.DurationCaster() + + return s +} + +// If `categorization_field_name` is specified, you can also define the analyzer +// that is used to interpret the categorization field. This property cannot be +// used at the same time as `categorization_filters`. The categorization +// analyzer specifies how the `categorization_field` is interpreted by the +// categorization process. The `categorization_analyzer` field can be specified +// either as a string or as an object. If it is a string, it must refer to a +// built-in analyzer or one added by another plugin. +func (s *_analysisConfig) CategorizationAnalyzer(categorizationanalyzer types.CategorizationAnalyzerVariant) *_analysisConfig { + + s.v.CategorizationAnalyzer = *categorizationanalyzer.CategorizationAnalyzerCaster() + + return s +} + +// If this property is specified, the values of the specified field will be +// categorized. The resulting categories must be used in a detector by setting +// `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword +// `mlcategory`. +func (s *_analysisConfig) CategorizationFieldName(field string) *_analysisConfig { + + s.v.CategorizationFieldName = &field + + return s +} + +// If `categorization_field_name` is specified, you can also define optional +// filters. This property expects an array of regular expressions. The +// expressions are used to filter out matching sequences from the categorization +// field values. You can use this functionality to fine tune the categorization +// by excluding sequences from consideration when categories are defined. For +// example, you can exclude SQL statements that appear in your log files. This +// property cannot be used at the same time as `categorization_analyzer`. If you +// only want to define simple regular expression filters that are applied prior +// to tokenization, setting this property is the easiest method. If you also +// want to customize the tokenizer or post-tokenization filtering, use the +// `categorization_analyzer` property instead and include the filters as +// pattern_replace character filters. The effect is exactly the same. +func (s *_analysisConfig) CategorizationFilters(categorizationfilters ...string) *_analysisConfig { + + for _, v := range categorizationfilters { + + s.v.CategorizationFilters = append(s.v.CategorizationFilters, v) + + } + return s +} + +// Detector configuration objects specify which data fields a job analyzes. They +// also specify which analytical functions are used. You can specify multiple +// detectors for a job. If the detectors array does not contain at least one +// detector, no analysis can occur and an error is returned. +func (s *_analysisConfig) Detectors(detectors ...types.DetectorVariant) *_analysisConfig { + + for _, v := range detectors { + + s.v.Detectors = append(s.v.Detectors, *v.DetectorCaster()) + + } + return s +} + +// A comma separated list of influencer field names. Typically these can be the +// by, over, or partition fields that are used in the detector configuration. +// You might also want to use a field name that is not specifically named in a +// detector, but is available as part of the input data. When you use multiple +// detectors, the use of influencers is recommended as it aggregates results for +// each influencer entity. +func (s *_analysisConfig) Influencers(influencers ...string) *_analysisConfig { + + for _, v := range influencers { + + s.v.Influencers = append(s.v.Influencers, v) + + } + return s +} + +// The size of the window in which to expect data that is out of time order. If +// you specify a non-zero value, it must be greater than or equal to one second. +// NOTE: Latency is applicable only when you send data by using the post data +// API. +func (s *_analysisConfig) Latency(duration types.DurationVariant) *_analysisConfig { + + s.v.Latency = *duration.DurationCaster() + + return s +} + +// Advanced configuration option. Affects the pruning of models that have not +// been updated for the given time duration. The value must be set to a multiple +// of the `bucket_span`. If set too low, important information may be removed +// from the model. For jobs created in 8.1 and later, the default value is the +// greater of `30d` or 20 times `bucket_span`. +func (s *_analysisConfig) ModelPruneWindow(duration types.DurationVariant) *_analysisConfig { + + s.v.ModelPruneWindow = *duration.DurationCaster() + + return s +} + +// This functionality is reserved for internal use. It is not supported for use +// in customer environments and is not subject to the support SLA of official GA +// features. If set to `true`, the analysis will automatically find correlations +// between metrics for a given by field value and report anomalies when those +// correlations cease to hold. For example, suppose CPU and memory usage on host +// A is usually highly correlated with the same metrics on host B. Perhaps this +// correlation occurs because they are running a load-balanced application. If +// you enable this property, anomalies will be reported when, for example, CPU +// usage on host A is high and the value of CPU usage on host B is low. That is +// to say, you’ll see an anomaly when the CPU of host A is unusual given the CPU +// of host B. To use the `multivariate_by_fields` property, you must also +// specify `by_field_name` in your detector. +func (s *_analysisConfig) MultivariateByFields(multivariatebyfields bool) *_analysisConfig { + + s.v.MultivariateByFields = &multivariatebyfields + + return s +} + +// Settings related to how categorization interacts with partition fields. +func (s *_analysisConfig) PerPartitionCategorization(perpartitioncategorization types.PerPartitionCategorizationVariant) *_analysisConfig { + + s.v.PerPartitionCategorization = perpartitioncategorization.PerPartitionCategorizationCaster() + + return s +} + +// If this property is specified, the data that is fed to the job is expected to +// be pre-summarized. This property value is the name of the field that contains +// the count of raw data points that have been summarized. The same +// `summary_count_field_name` applies to all detectors in the job. NOTE: The +// `summary_count_field_name` property cannot be used with the `metric` +// function. +func (s *_analysisConfig) SummaryCountFieldName(field string) *_analysisConfig { + + s.v.SummaryCountFieldName = &field + + return s +} + +func (s *_analysisConfig) AnalysisConfigCaster() *types.AnalysisConfig { + return s.v +} diff --git a/typedapi/esdsl/analysislimits.go b/typedapi/esdsl/analysislimits.go new file mode 100644 index 0000000000..73f8ffe7d1 --- /dev/null +++ b/typedapi/esdsl/analysislimits.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _analysisLimits struct { + v *types.AnalysisLimits +} + +func NewAnalysisLimits() *_analysisLimits { + + return &_analysisLimits{v: types.NewAnalysisLimits()} + +} + +// The maximum number of examples stored per category in memory and in the +// results data store. If you increase this value, more examples are available, +// however it requires that you have more storage available. If you set this +// value to 0, no examples are stored. NOTE: The `categorization_examples_limit` +// applies only to analysis that uses categorization. +func (s *_analysisLimits) CategorizationExamplesLimit(categorizationexampleslimit int64) *_analysisLimits { + + s.v.CategorizationExamplesLimit = &categorizationexampleslimit + + return s +} + +// The approximate maximum amount of memory resources that are required for +// analytical processing. Once this limit is approached, data pruning becomes +// more aggressive. Upon exceeding this limit, new entities are not modeled. If +// the `xpack.ml.max_model_memory_limit` setting has a value greater than 0 and +// less than 1024mb, that value is used instead of the default. The default +// value is relatively small to ensure that high resource usage is a conscious +// decision. If you have jobs that are expected to analyze high cardinality +// fields, you will likely need to use a higher value. If you specify a number +// instead of a string, the units are assumed to be MiB. Specifying a string is +// recommended for clarity. If you specify a byte size unit of `b` or `kb` and +// the number does not equate to a discrete number of megabytes, it is rounded +// down to the closest MiB. The minimum valid value is 1 MiB. If you specify a +// value less than 1 MiB, an error occurs. If you specify a value for the +// `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to +// create jobs that have `model_memory_limit` values greater than that setting +// value. +func (s *_analysisLimits) ModelMemoryLimit(bytesize types.ByteSizeVariant) *_analysisLimits { + + s.v.ModelMemoryLimit = *bytesize.ByteSizeCaster() + + return s +} + +func (s *_analysisLimits) AnalysisLimitsCaster() *types.AnalysisLimits { + return s.v +} diff --git a/typedapi/esdsl/analysismemorylimit.go b/typedapi/esdsl/analysismemorylimit.go new file mode 100644 index 0000000000..a85faefe70 --- /dev/null +++ b/typedapi/esdsl/analysismemorylimit.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _analysisMemoryLimit struct { + v *types.AnalysisMemoryLimit +} + +func NewAnalysisMemoryLimit(modelmemorylimit string) *_analysisMemoryLimit { + + tmp := &_analysisMemoryLimit{v: types.NewAnalysisMemoryLimit()} + + tmp.ModelMemoryLimit(modelmemorylimit) + + return tmp + +} + +// Limits can be applied for the resources required to hold the mathematical +// models in memory. These limits are approximate and can be set per job. They +// do not control the memory used by other processes, for example the +// Elasticsearch Java processes. +func (s *_analysisMemoryLimit) ModelMemoryLimit(modelmemorylimit string) *_analysisMemoryLimit { + + s.v.ModelMemoryLimit = modelmemorylimit + + return s +} + +func (s *_analysisMemoryLimit) AnalysisMemoryLimitCaster() *types.AnalysisMemoryLimit { + return s.v +} diff --git a/typedapi/esdsl/analyzer.go b/typedapi/esdsl/analyzer.go new file mode 100644 index 0000000000..296c268bb3 --- /dev/null +++ b/typedapi/esdsl/analyzer.go @@ -0,0 +1,673 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _analyzer struct { + v types.Analyzer +} + +func NewAnalyzer() *_analyzer { + return &_analyzer{v: nil} +} + +func (u *_analyzer) CustomAnalyzer(customanalyzer types.CustomAnalyzerVariant) *_analyzer { + + u.v = &customanalyzer + + return u +} + +// Interface implementation for CustomAnalyzer in Analyzer union +func (u *_customAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) FingerprintAnalyzer(fingerprintanalyzer types.FingerprintAnalyzerVariant) *_analyzer { + + u.v = &fingerprintanalyzer + + return u +} + +// Interface implementation for FingerprintAnalyzer in Analyzer union +func (u *_fingerprintAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) KeywordAnalyzer(keywordanalyzer types.KeywordAnalyzerVariant) *_analyzer { + + u.v = &keywordanalyzer + + return u +} + +// Interface implementation for KeywordAnalyzer in Analyzer union +func (u *_keywordAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) NoriAnalyzer(norianalyzer types.NoriAnalyzerVariant) *_analyzer { + + u.v = &norianalyzer + + return u +} + +// Interface implementation for NoriAnalyzer in Analyzer union +func (u *_noriAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) PatternAnalyzer(patternanalyzer types.PatternAnalyzerVariant) *_analyzer { + + u.v = &patternanalyzer + + return u +} + +// Interface implementation for PatternAnalyzer in Analyzer union +func (u *_patternAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) SimpleAnalyzer(simpleanalyzer types.SimpleAnalyzerVariant) *_analyzer { + + u.v = &simpleanalyzer + + return u +} + +// Interface implementation for SimpleAnalyzer in Analyzer union +func (u *_simpleAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) StandardAnalyzer(standardanalyzer types.StandardAnalyzerVariant) *_analyzer { + + u.v = &standardanalyzer + + return u +} + +// Interface implementation for StandardAnalyzer in Analyzer union +func (u *_standardAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) StopAnalyzer(stopanalyzer types.StopAnalyzerVariant) *_analyzer { + + u.v = &stopanalyzer + + return u +} + +// Interface implementation for StopAnalyzer in Analyzer union +func (u *_stopAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) WhitespaceAnalyzer(whitespaceanalyzer types.WhitespaceAnalyzerVariant) *_analyzer { + + u.v = &whitespaceanalyzer + + return u +} + +// Interface implementation for WhitespaceAnalyzer in Analyzer union +func (u *_whitespaceAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) IcuAnalyzer(icuanalyzer types.IcuAnalyzerVariant) *_analyzer { + + u.v = &icuanalyzer + + return u +} + +// Interface implementation for IcuAnalyzer in Analyzer union +func (u *_icuAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) KuromojiAnalyzer(kuromojianalyzer types.KuromojiAnalyzerVariant) *_analyzer { + + u.v = &kuromojianalyzer + + return u +} + +// Interface implementation for KuromojiAnalyzer in Analyzer union +func (u *_kuromojiAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) SnowballAnalyzer(snowballanalyzer types.SnowballAnalyzerVariant) *_analyzer { + + u.v = &snowballanalyzer + + return u +} + +// Interface implementation for SnowballAnalyzer in Analyzer union +func (u *_snowballAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) ArabicAnalyzer(arabicanalyzer types.ArabicAnalyzerVariant) *_analyzer { + + u.v = &arabicanalyzer + + return u +} + +// Interface implementation for ArabicAnalyzer in Analyzer union +func (u *_arabicAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) ArmenianAnalyzer(armeniananalyzer types.ArmenianAnalyzerVariant) *_analyzer { + + u.v = &armeniananalyzer + + return u +} + +// Interface implementation for ArmenianAnalyzer in Analyzer union +func (u *_armenianAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) BasqueAnalyzer(basqueanalyzer types.BasqueAnalyzerVariant) *_analyzer { + + u.v = &basqueanalyzer + + return u +} + +// Interface implementation for BasqueAnalyzer in Analyzer union +func (u *_basqueAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) BengaliAnalyzer(bengalianalyzer types.BengaliAnalyzerVariant) *_analyzer { + + u.v = &bengalianalyzer + + return u +} + +// Interface implementation for BengaliAnalyzer in Analyzer union +func (u *_bengaliAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) BrazilianAnalyzer(braziliananalyzer types.BrazilianAnalyzerVariant) *_analyzer { + + u.v = &braziliananalyzer + + return u +} + +// Interface implementation for BrazilianAnalyzer in Analyzer union +func (u *_brazilianAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) BulgarianAnalyzer(bulgariananalyzer types.BulgarianAnalyzerVariant) *_analyzer { + + u.v = &bulgariananalyzer + + return u +} + +// Interface implementation for BulgarianAnalyzer in Analyzer union +func (u *_bulgarianAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) CatalanAnalyzer(catalananalyzer types.CatalanAnalyzerVariant) *_analyzer { + + u.v = &catalananalyzer + + return u +} + +// Interface implementation for CatalanAnalyzer in Analyzer union +func (u *_catalanAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) ChineseAnalyzer(chineseanalyzer types.ChineseAnalyzerVariant) *_analyzer { + + u.v = &chineseanalyzer + + return u +} + +// Interface implementation for ChineseAnalyzer in Analyzer union +func (u *_chineseAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) CjkAnalyzer(cjkanalyzer types.CjkAnalyzerVariant) *_analyzer { + + u.v = &cjkanalyzer + + return u +} + +// Interface implementation for CjkAnalyzer in Analyzer union +func (u *_cjkAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) CzechAnalyzer(czechanalyzer types.CzechAnalyzerVariant) *_analyzer { + + u.v = &czechanalyzer + + return u +} + +// Interface implementation for CzechAnalyzer in Analyzer union +func (u *_czechAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) DanishAnalyzer(danishanalyzer types.DanishAnalyzerVariant) *_analyzer { + + u.v = &danishanalyzer + + return u +} + +// Interface implementation for DanishAnalyzer in Analyzer union +func (u *_danishAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) DutchAnalyzer(dutchanalyzer types.DutchAnalyzerVariant) *_analyzer { + + u.v = &dutchanalyzer + + return u +} + +// Interface implementation for DutchAnalyzer in Analyzer union +func (u *_dutchAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) EnglishAnalyzer(englishanalyzer types.EnglishAnalyzerVariant) *_analyzer { + + u.v = &englishanalyzer + + return u +} + +// Interface implementation for EnglishAnalyzer in Analyzer union +func (u *_englishAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) EstonianAnalyzer(estoniananalyzer types.EstonianAnalyzerVariant) *_analyzer { + + u.v = &estoniananalyzer + + return u +} + +// Interface implementation for EstonianAnalyzer in Analyzer union +func (u *_estonianAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) FinnishAnalyzer(finnishanalyzer types.FinnishAnalyzerVariant) *_analyzer { + + u.v = &finnishanalyzer + + return u +} + +// Interface implementation for FinnishAnalyzer in Analyzer union +func (u *_finnishAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) FrenchAnalyzer(frenchanalyzer types.FrenchAnalyzerVariant) *_analyzer { + + u.v = &frenchanalyzer + + return u +} + +// Interface implementation for FrenchAnalyzer in Analyzer union +func (u *_frenchAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) GalicianAnalyzer(galiciananalyzer types.GalicianAnalyzerVariant) *_analyzer { + + u.v = &galiciananalyzer + + return u +} + +// Interface implementation for GalicianAnalyzer in Analyzer union +func (u *_galicianAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) GermanAnalyzer(germananalyzer types.GermanAnalyzerVariant) *_analyzer { + + u.v = &germananalyzer + + return u +} + +// Interface implementation for GermanAnalyzer in Analyzer union +func (u *_germanAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) GreekAnalyzer(greekanalyzer types.GreekAnalyzerVariant) *_analyzer { + + u.v = &greekanalyzer + + return u +} + +// Interface implementation for GreekAnalyzer in Analyzer union +func (u *_greekAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) HindiAnalyzer(hindianalyzer types.HindiAnalyzerVariant) *_analyzer { + + u.v = &hindianalyzer + + return u +} + +// Interface implementation for HindiAnalyzer in Analyzer union +func (u *_hindiAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) HungarianAnalyzer(hungariananalyzer types.HungarianAnalyzerVariant) *_analyzer { + + u.v = &hungariananalyzer + + return u +} + +// Interface implementation for HungarianAnalyzer in Analyzer union +func (u *_hungarianAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) IndonesianAnalyzer(indonesiananalyzer types.IndonesianAnalyzerVariant) *_analyzer { + + u.v = &indonesiananalyzer + + return u +} + +// Interface implementation for IndonesianAnalyzer in Analyzer union +func (u *_indonesianAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) IrishAnalyzer(irishanalyzer types.IrishAnalyzerVariant) *_analyzer { + + u.v = &irishanalyzer + + return u +} + +// Interface implementation for IrishAnalyzer in Analyzer union +func (u *_irishAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) ItalianAnalyzer(italiananalyzer types.ItalianAnalyzerVariant) *_analyzer { + + u.v = &italiananalyzer + + return u +} + +// Interface implementation for ItalianAnalyzer in Analyzer union +func (u *_italianAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) LatvianAnalyzer(latviananalyzer types.LatvianAnalyzerVariant) *_analyzer { + + u.v = &latviananalyzer + + return u +} + +// Interface implementation for LatvianAnalyzer in Analyzer union +func (u *_latvianAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) LithuanianAnalyzer(lithuaniananalyzer types.LithuanianAnalyzerVariant) *_analyzer { + + u.v = &lithuaniananalyzer + + return u +} + +// Interface implementation for LithuanianAnalyzer in Analyzer union +func (u *_lithuanianAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) NorwegianAnalyzer(norwegiananalyzer types.NorwegianAnalyzerVariant) *_analyzer { + + u.v = &norwegiananalyzer + + return u +} + +// Interface implementation for NorwegianAnalyzer in Analyzer union +func (u *_norwegianAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) PersianAnalyzer(persiananalyzer types.PersianAnalyzerVariant) *_analyzer { + + u.v = &persiananalyzer + + return u +} + +// Interface implementation for PersianAnalyzer in Analyzer union +func (u *_persianAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) PortugueseAnalyzer(portugueseanalyzer types.PortugueseAnalyzerVariant) *_analyzer { + + u.v = &portugueseanalyzer + + return u +} + +// Interface implementation for PortugueseAnalyzer in Analyzer union +func (u *_portugueseAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) RomanianAnalyzer(romaniananalyzer types.RomanianAnalyzerVariant) *_analyzer { + + u.v = &romaniananalyzer + + return u +} + +// Interface implementation for RomanianAnalyzer in Analyzer union +func (u *_romanianAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) RussianAnalyzer(russiananalyzer types.RussianAnalyzerVariant) *_analyzer { + + u.v = &russiananalyzer + + return u +} + +// Interface implementation for RussianAnalyzer in Analyzer union +func (u *_russianAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) SerbianAnalyzer(serbiananalyzer types.SerbianAnalyzerVariant) *_analyzer { + + u.v = &serbiananalyzer + + return u +} + +// Interface implementation for SerbianAnalyzer in Analyzer union +func (u *_serbianAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) SoraniAnalyzer(soranianalyzer types.SoraniAnalyzerVariant) *_analyzer { + + u.v = &soranianalyzer + + return u +} + +// Interface implementation for SoraniAnalyzer in Analyzer union +func (u *_soraniAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) SpanishAnalyzer(spanishanalyzer types.SpanishAnalyzerVariant) *_analyzer { + + u.v = &spanishanalyzer + + return u +} + +// Interface implementation for SpanishAnalyzer in Analyzer union +func (u *_spanishAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) SwedishAnalyzer(swedishanalyzer types.SwedishAnalyzerVariant) *_analyzer { + + u.v = &swedishanalyzer + + return u +} + +// Interface implementation for SwedishAnalyzer in Analyzer union +func (u *_swedishAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) TurkishAnalyzer(turkishanalyzer types.TurkishAnalyzerVariant) *_analyzer { + + u.v = &turkishanalyzer + + return u +} + +// Interface implementation for TurkishAnalyzer in Analyzer union +func (u *_turkishAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) ThaiAnalyzer(thaianalyzer types.ThaiAnalyzerVariant) *_analyzer { + + u.v = &thaianalyzer + + return u +} + +// Interface implementation for ThaiAnalyzer in Analyzer union +func (u *_thaiAnalyzer) AnalyzerCaster() *types.Analyzer { + t := types.Analyzer(u.v) + return &t +} + +func (u *_analyzer) AnalyzerCaster() *types.Analyzer { + return &u.v +} diff --git a/typedapi/esdsl/apikeyaggregationcontainer.go b/typedapi/esdsl/apikeyaggregationcontainer.go new file mode 100644 index 0000000000..26b2531ec6 --- /dev/null +++ b/typedapi/esdsl/apikeyaggregationcontainer.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _apiKeyAggregationContainer struct { + v *types.ApiKeyAggregationContainer +} + +func NewApiKeyAggregationContainer() *_apiKeyAggregationContainer { + return &_apiKeyAggregationContainer{v: types.NewApiKeyAggregationContainer()} +} + +// AdditionalApiKeyAggregationContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_apiKeyAggregationContainer) AdditionalApiKeyAggregationContainerProperty(key string, value json.RawMessage) *_apiKeyAggregationContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalApiKeyAggregationContainerProperty = tmp + return s +} + +// Sub-aggregations for this aggregation. +// Only applies to bucket aggregations. +func (s *_apiKeyAggregationContainer) Aggregations(aggregations map[string]types.ApiKeyAggregationContainer) *_apiKeyAggregationContainer { + + s.v.Aggregations = aggregations + return s +} + +func (s *_apiKeyAggregationContainer) AddAggregation(key string, value types.ApiKeyAggregationContainerVariant) *_apiKeyAggregationContainer { + + var tmp map[string]types.ApiKeyAggregationContainer + if s.v.Aggregations == nil { + s.v.Aggregations = make(map[string]types.ApiKeyAggregationContainer) + } else { + tmp = s.v.Aggregations + } + + tmp[key] = *value.ApiKeyAggregationContainerCaster() + + s.v.Aggregations = tmp + return s +} + +// A single-value metrics aggregation that calculates an approximate count of +// distinct values. +func (s *_apiKeyAggregationContainer) Cardinality(cardinality types.CardinalityAggregationVariant) *_apiKeyAggregationContainer { + + s.v.Cardinality = cardinality.CardinalityAggregationCaster() + + return s +} + +// A multi-bucket aggregation that creates composite buckets from different +// sources. +// Unlike the other multi-bucket aggregations, you can use the `composite` +// aggregation to paginate *all* buckets from a multi-level aggregation +// efficiently. +func (s *_apiKeyAggregationContainer) Composite(composite types.CompositeAggregationVariant) *_apiKeyAggregationContainer { + + s.v.Composite = composite.CompositeAggregationCaster() + + return s +} + +// A multi-bucket value source based aggregation that enables the user to define +// a set of date ranges - each representing a bucket. +func (s *_apiKeyAggregationContainer) DateRange(daterange types.DateRangeAggregationVariant) *_apiKeyAggregationContainer { + + s.v.DateRange = daterange.DateRangeAggregationCaster() + + return s +} + +// A single bucket aggregation that narrows the set of documents to those that +// match a query. +func (s *_apiKeyAggregationContainer) Filter(filter types.ApiKeyQueryContainerVariant) *_apiKeyAggregationContainer { + + s.v.Filter = filter.ApiKeyQueryContainerCaster() + + return s +} + +// A multi-bucket aggregation where each bucket contains the documents that +// match a query. +func (s *_apiKeyAggregationContainer) Filters(filters types.ApiKeyFiltersAggregationVariant) *_apiKeyAggregationContainer { + + s.v.Filters = filters.ApiKeyFiltersAggregationCaster() + + return s +} + +func (s *_apiKeyAggregationContainer) Meta(metadata types.MetadataVariant) *_apiKeyAggregationContainer { + + s.v.Meta = *metadata.MetadataCaster() + + return s +} + +func (s *_apiKeyAggregationContainer) Missing(missing types.MissingAggregationVariant) *_apiKeyAggregationContainer { + + s.v.Missing = missing.MissingAggregationCaster() + + return s +} + +// A multi-bucket value source based aggregation that enables the user to define +// a set of ranges - each representing a bucket. +func (s *_apiKeyAggregationContainer) Range(range_ types.RangeAggregationVariant) *_apiKeyAggregationContainer { + + s.v.Range = range_.RangeAggregationCaster() + + return s +} + +// A multi-bucket value source based aggregation where buckets are dynamically +// built - one per unique value. +func (s *_apiKeyAggregationContainer) Terms(terms types.TermsAggregationVariant) *_apiKeyAggregationContainer { + + s.v.Terms = terms.TermsAggregationCaster() + + return s +} + +// A single-value metrics aggregation that counts the number of values that are +// extracted from the aggregated documents. +func (s *_apiKeyAggregationContainer) ValueCount(valuecount types.ValueCountAggregationVariant) *_apiKeyAggregationContainer { + + s.v.ValueCount = valuecount.ValueCountAggregationCaster() + + return s +} + +func (s *_apiKeyAggregationContainer) ApiKeyAggregationContainerCaster() *types.ApiKeyAggregationContainer { + return s.v +} diff --git a/typedapi/esdsl/apikeyfiltersaggregation.go b/typedapi/esdsl/apikeyfiltersaggregation.go new file mode 100644 index 0000000000..ebae007003 --- /dev/null +++ b/typedapi/esdsl/apikeyfiltersaggregation.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _apiKeyFiltersAggregation struct { + v *types.ApiKeyFiltersAggregation +} + +// A multi-bucket aggregation where each bucket contains the documents that +// match a query. +func NewApiKeyFiltersAggregation() *_apiKeyFiltersAggregation { + + return &_apiKeyFiltersAggregation{v: types.NewApiKeyFiltersAggregation()} + +} + +// Collection of queries from which to build buckets. +func (s *_apiKeyFiltersAggregation) Filters(bucketsapikeyquerycontainer types.BucketsApiKeyQueryContainerVariant) *_apiKeyFiltersAggregation { + + s.v.Filters = *bucketsapikeyquerycontainer.BucketsApiKeyQueryContainerCaster() + + return s +} + +// By default, the named filters aggregation returns the buckets as an object. +// Set to `false` to return the buckets as an array of objects. +func (s *_apiKeyFiltersAggregation) Keyed(keyed bool) *_apiKeyFiltersAggregation { + + s.v.Keyed = &keyed + + return s +} + +// Set to `true` to add a bucket to the response which will contain all +// documents that do not match any of the given filters. +func (s *_apiKeyFiltersAggregation) OtherBucket(otherbucket bool) *_apiKeyFiltersAggregation { + + s.v.OtherBucket = &otherbucket + + return s +} + +// The key with which the other bucket is returned. +func (s *_apiKeyFiltersAggregation) OtherBucketKey(otherbucketkey string) *_apiKeyFiltersAggregation { + + s.v.OtherBucketKey = &otherbucketkey + + return s +} + +func (s *_apiKeyFiltersAggregation) ApiKeyAggregationContainerCaster() *types.ApiKeyAggregationContainer { + container := types.NewApiKeyAggregationContainer() + + container.Filters = s.v + + return container +} + +func (s *_apiKeyFiltersAggregation) ApiKeyFiltersAggregationCaster() *types.ApiKeyFiltersAggregation { + return s.v +} diff --git a/typedapi/esdsl/apikeyquerycontainer.go b/typedapi/esdsl/apikeyquerycontainer.go new file mode 100644 index 0000000000..4014d09414 --- /dev/null +++ b/typedapi/esdsl/apikeyquerycontainer.go @@ -0,0 +1,171 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _apiKeyQueryContainer struct { + v *types.ApiKeyQueryContainer +} + +func NewApiKeyQueryContainer() *_apiKeyQueryContainer { + return &_apiKeyQueryContainer{v: types.NewApiKeyQueryContainer()} +} + +// AdditionalApiKeyQueryContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_apiKeyQueryContainer) AdditionalApiKeyQueryContainerProperty(key string, value json.RawMessage) *_apiKeyQueryContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalApiKeyQueryContainerProperty = tmp + return s +} + +// Matches documents matching boolean combinations of other queries. +func (s *_apiKeyQueryContainer) Bool(bool types.BoolQueryVariant) *_apiKeyQueryContainer { + + s.v.Bool = bool.BoolQueryCaster() + + return s +} + +// Returns documents that contain an indexed value for a field. +func (s *_apiKeyQueryContainer) Exists(exists types.ExistsQueryVariant) *_apiKeyQueryContainer { + + s.v.Exists = exists.ExistsQueryCaster() + + return s +} + +// Returns documents based on their IDs. +// This query uses document IDs stored in the `_id` field. +func (s *_apiKeyQueryContainer) Ids(ids types.IdsQueryVariant) *_apiKeyQueryContainer { + + s.v.Ids = ids.IdsQueryCaster() + + return s +} + +// Returns documents that match a provided text, number, date or boolean value. +// The provided text is analyzed before matching. +// Match is a single key dictionnary. +// It will replace the current value on each call. +func (s *_apiKeyQueryContainer) Match(key string, value types.MatchQueryVariant) *_apiKeyQueryContainer { + + tmp := make(map[string]types.MatchQuery) + + tmp[key] = *value.MatchQueryCaster() + + s.v.Match = tmp + return s +} + +// Matches all documents, giving them all a `_score` of 1.0. +func (s *_apiKeyQueryContainer) MatchAll(matchall types.MatchAllQueryVariant) *_apiKeyQueryContainer { + + s.v.MatchAll = matchall.MatchAllQueryCaster() + + return s +} + +// Returns documents that contain a specific prefix in a provided field. +// Prefix is a single key dictionnary. +// It will replace the current value on each call. +func (s *_apiKeyQueryContainer) Prefix(key string, value types.PrefixQueryVariant) *_apiKeyQueryContainer { + + tmp := make(map[string]types.PrefixQuery) + + tmp[key] = *value.PrefixQueryCaster() + + s.v.Prefix = tmp + return s +} + +// Returns documents that contain terms within a provided range. +// Range is a single key dictionnary. +// It will replace the current value on each call. +func (s *_apiKeyQueryContainer) Range(key string, value types.RangeQueryVariant) *_apiKeyQueryContainer { + + tmp := make(map[string]types.RangeQuery) + + tmp[key] = *value.RangeQueryCaster() + + s.v.Range = tmp + return s +} + +// Returns documents based on a provided query string, using a parser with a +// limited but fault-tolerant syntax. +func (s *_apiKeyQueryContainer) SimpleQueryString(simplequerystring types.SimpleQueryStringQueryVariant) *_apiKeyQueryContainer { + + s.v.SimpleQueryString = simplequerystring.SimpleQueryStringQueryCaster() + + return s +} + +// Returns documents that contain an exact term in a provided field. +// To return a document, the query term must exactly match the queried field's +// value, including whitespace and capitalization. +// Term is a single key dictionnary. +// It will replace the current value on each call. +func (s *_apiKeyQueryContainer) Term(key string, value types.TermQueryVariant) *_apiKeyQueryContainer { + + tmp := make(map[string]types.TermQuery) + + tmp[key] = *value.TermQueryCaster() + + s.v.Term = tmp + return s +} + +// Returns documents that contain one or more exact terms in a provided field. +// To return a document, one or more terms must exactly match a field value, +// including whitespace and capitalization. +func (s *_apiKeyQueryContainer) Terms(terms types.TermsQueryVariant) *_apiKeyQueryContainer { + + s.v.Terms = terms.TermsQueryCaster() + + return s +} + +// Returns documents that contain terms matching a wildcard pattern. +// Wildcard is a single key dictionnary. +// It will replace the current value on each call. +func (s *_apiKeyQueryContainer) Wildcard(key string, value types.WildcardQueryVariant) *_apiKeyQueryContainer { + + tmp := make(map[string]types.WildcardQuery) + + tmp[key] = *value.WildcardQueryCaster() + + s.v.Wildcard = tmp + return s +} + +func (s *_apiKeyQueryContainer) ApiKeyQueryContainerCaster() *types.ApiKeyQueryContainer { + return s.v +} diff --git a/typedapi/esdsl/appendprocessor.go b/typedapi/esdsl/appendprocessor.go new file mode 100644 index 0000000000..06239da404 --- /dev/null +++ b/typedapi/esdsl/appendprocessor.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _appendProcessor struct { + v *types.AppendProcessor +} + +// Appends one or more values to an existing array if the field already exists +// and it is an array. +// Converts a scalar to an array and appends one or more values to it if the +// field exists and it is a scalar. +// Creates an array containing the provided values if the field doesn’t exist. +// Accepts a single value or an array of values. +func NewAppendProcessor() *_appendProcessor { + + return &_appendProcessor{v: types.NewAppendProcessor()} + +} + +// If `false`, the processor does not append values already present in the +// field. +func (s *_appendProcessor) AllowDuplicates(allowduplicates bool) *_appendProcessor { + + s.v.AllowDuplicates = &allowduplicates + + return s +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_appendProcessor) Description(description string) *_appendProcessor { + + s.v.Description = &description + + return s +} + +// The field to be appended to. +// Supports template snippets. +func (s *_appendProcessor) Field(field string) *_appendProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_appendProcessor) If(if_ types.ScriptVariant) *_appendProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_appendProcessor) IgnoreFailure(ignorefailure bool) *_appendProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// Handle failures for the processor. +func (s *_appendProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_appendProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_appendProcessor) Tag(tag string) *_appendProcessor { + + s.v.Tag = &tag + + return s +} + +// The value to be appended. Supports template snippets. +func (s *_appendProcessor) Value(values ...json.RawMessage) *_appendProcessor { + + s.v.Value = make([]json.RawMessage, len(values)) + s.v.Value = values + + return s +} + +func (s *_appendProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Append = s.v + + return container +} + +func (s *_appendProcessor) AppendProcessorCaster() *types.AppendProcessor { + return s.v +} diff --git a/typedapi/esdsl/applicationglobaluserprivileges.go b/typedapi/esdsl/applicationglobaluserprivileges.go new file mode 100644 index 0000000000..fbcc71da84 --- /dev/null +++ b/typedapi/esdsl/applicationglobaluserprivileges.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _applicationGlobalUserPrivileges struct { + v *types.ApplicationGlobalUserPrivileges +} + +func NewApplicationGlobalUserPrivileges(manage types.ManageUserPrivilegesVariant) *_applicationGlobalUserPrivileges { + + tmp := &_applicationGlobalUserPrivileges{v: types.NewApplicationGlobalUserPrivileges()} + + tmp.Manage(manage) + + return tmp + +} + +func (s *_applicationGlobalUserPrivileges) Manage(manage types.ManageUserPrivilegesVariant) *_applicationGlobalUserPrivileges { + + s.v.Manage = *manage.ManageUserPrivilegesCaster() + + return s +} + +func (s *_applicationGlobalUserPrivileges) ApplicationGlobalUserPrivilegesCaster() *types.ApplicationGlobalUserPrivileges { + return s.v +} diff --git a/typedapi/esdsl/applicationprivileges.go b/typedapi/esdsl/applicationprivileges.go new file mode 100644 index 0000000000..75e7d178af --- /dev/null +++ b/typedapi/esdsl/applicationprivileges.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _applicationPrivileges struct { + v *types.ApplicationPrivileges +} + +func NewApplicationPrivileges(application string) *_applicationPrivileges { + + tmp := &_applicationPrivileges{v: types.NewApplicationPrivileges()} + + tmp.Application(application) + + return tmp + +} + +// The name of the application to which this entry applies. +func (s *_applicationPrivileges) Application(application string) *_applicationPrivileges { + + s.v.Application = application + + return s +} + +// A list of strings, where each element is the name of an application privilege +// or action. +func (s *_applicationPrivileges) Privileges(privileges ...string) *_applicationPrivileges { + + for _, v := range privileges { + + s.v.Privileges = append(s.v.Privileges, v) + + } + return s +} + +// A list resources to which the privileges are applied. +func (s *_applicationPrivileges) Resources(resources ...string) *_applicationPrivileges { + + for _, v := range resources { + + s.v.Resources = append(s.v.Resources, v) + + } + return s +} + +func (s *_applicationPrivileges) ApplicationPrivilegesCaster() *types.ApplicationPrivileges { + return s.v +} diff --git a/typedapi/esdsl/applicationprivilegescheck.go b/typedapi/esdsl/applicationprivilegescheck.go new file mode 100644 index 0000000000..48b7f414bb --- /dev/null +++ b/typedapi/esdsl/applicationprivilegescheck.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _applicationPrivilegesCheck struct { + v *types.ApplicationPrivilegesCheck +} + +func NewApplicationPrivilegesCheck(application string) *_applicationPrivilegesCheck { + + tmp := &_applicationPrivilegesCheck{v: types.NewApplicationPrivilegesCheck()} + + tmp.Application(application) + + return tmp + +} + +// The name of the application. +func (s *_applicationPrivilegesCheck) Application(application string) *_applicationPrivilegesCheck { + + s.v.Application = application + + return s +} + +// A list of the privileges that you want to check for the specified resources. +// It may be either application privilege names or the names of actions that are +// granted by those privileges +func (s *_applicationPrivilegesCheck) Privileges(privileges ...string) *_applicationPrivilegesCheck { + + for _, v := range privileges { + + s.v.Privileges = append(s.v.Privileges, v) + + } + return s +} + +// A list of resource names against which the privileges should be checked. +func (s *_applicationPrivilegesCheck) Resources(resources ...string) *_applicationPrivilegesCheck { + + for _, v := range resources { + + s.v.Resources = append(s.v.Resources, v) + + } + return s +} + +func (s *_applicationPrivilegesCheck) ApplicationPrivilegesCheckCaster() *types.ApplicationPrivilegesCheck { + return s.v +} diff --git a/typedapi/esdsl/arabicanalyzer.go b/typedapi/esdsl/arabicanalyzer.go new file mode 100644 index 0000000000..61e8173f17 --- /dev/null +++ b/typedapi/esdsl/arabicanalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _arabicAnalyzer struct { + v *types.ArabicAnalyzer +} + +func NewArabicAnalyzer() *_arabicAnalyzer { + + return &_arabicAnalyzer{v: types.NewArabicAnalyzer()} + +} + +func (s *_arabicAnalyzer) StemExclusion(stemexclusions ...string) *_arabicAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_arabicAnalyzer) Stopwords(stopwords ...string) *_arabicAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_arabicAnalyzer) StopwordsPath(stopwordspath string) *_arabicAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_arabicAnalyzer) ArabicAnalyzerCaster() *types.ArabicAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/armeniananalyzer.go b/typedapi/esdsl/armeniananalyzer.go new file mode 100644 index 0000000000..06b7c52d07 --- /dev/null +++ b/typedapi/esdsl/armeniananalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _armenianAnalyzer struct { + v *types.ArmenianAnalyzer +} + +func NewArmenianAnalyzer() *_armenianAnalyzer { + + return &_armenianAnalyzer{v: types.NewArmenianAnalyzer()} + +} + +func (s *_armenianAnalyzer) StemExclusion(stemexclusions ...string) *_armenianAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_armenianAnalyzer) Stopwords(stopwords ...string) *_armenianAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_armenianAnalyzer) StopwordsPath(stopwordspath string) *_armenianAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_armenianAnalyzer) ArmenianAnalyzerCaster() *types.ArmenianAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/arraycomparecondition.go b/typedapi/esdsl/arraycomparecondition.go new file mode 100644 index 0000000000..a03b94361d --- /dev/null +++ b/typedapi/esdsl/arraycomparecondition.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditionop" +) + +type _arrayCompareCondition struct { + k string + v *types.ArrayCompareCondition +} + +func NewArrayCompareCondition(key string) *_arrayCompareCondition { + return &_arrayCompareCondition{ + k: key, + v: types.NewArrayCompareCondition(), + } +} + +func (s *_arrayCompareCondition) ArrayCompareCondition(arraycomparecondition map[conditionop.ConditionOp]types.ArrayCompareOpParams) *_arrayCompareCondition { + + s.v.ArrayCompareCondition = arraycomparecondition + return s +} + +func (s *_arrayCompareCondition) AddArrayCompareCondition(key conditionop.ConditionOp, value types.ArrayCompareOpParamsVariant) *_arrayCompareCondition { + + var tmp map[conditionop.ConditionOp]types.ArrayCompareOpParams + if s.v.ArrayCompareCondition == nil { + s.v.ArrayCompareCondition = make(map[conditionop.ConditionOp]types.ArrayCompareOpParams) + } else { + tmp = s.v.ArrayCompareCondition + } + + tmp[key] = *value.ArrayCompareOpParamsCaster() + + s.v.ArrayCompareCondition = tmp + return s +} + +func (s *_arrayCompareCondition) Path(path string) *_arrayCompareCondition { + + s.v.Path = path + + return s +} + +func (s *_arrayCompareCondition) WatcherConditionCaster() *types.WatcherCondition { + container := types.NewWatcherCondition() + container.ArrayCompare = map[string]types.ArrayCompareCondition{ + s.k: *s.v, + } + return container +} + +// NewSingleArrayCompareCondition should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleArrayCompareCondition() *_arrayCompareCondition { + return &_arrayCompareCondition{ + k: "", + v: types.NewArrayCompareCondition(), + } +} + +func (s *_arrayCompareCondition) ArrayCompareConditionCaster() *types.ArrayCompareCondition { + return s.v.ArrayCompareConditionCaster() +} diff --git a/typedapi/esdsl/arraycompareopparams.go b/typedapi/esdsl/arraycompareopparams.go new file mode 100644 index 0000000000..3561fe8fc5 --- /dev/null +++ b/typedapi/esdsl/arraycompareopparams.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/quantifier" +) + +type _arrayCompareOpParams struct { + v *types.ArrayCompareOpParams +} + +func NewArrayCompareOpParams(quantifier quantifier.Quantifier) *_arrayCompareOpParams { + + tmp := &_arrayCompareOpParams{v: types.NewArrayCompareOpParams()} + + tmp.Quantifier(quantifier) + + return tmp + +} + +func (s *_arrayCompareOpParams) Quantifier(quantifier quantifier.Quantifier) *_arrayCompareOpParams { + + s.v.Quantifier = quantifier + return s +} + +func (s *_arrayCompareOpParams) Value(fieldvalue types.FieldValueVariant) *_arrayCompareOpParams { + + s.v.Value = *fieldvalue.FieldValueCaster() + + return s +} + +func (s *_arrayCompareOpParams) ArrayCompareOpParamsCaster() *types.ArrayCompareOpParams { + return s.v +} diff --git a/typedapi/esdsl/asciifoldingtokenfilter.go b/typedapi/esdsl/asciifoldingtokenfilter.go new file mode 100644 index 0000000000..4b595e4d72 --- /dev/null +++ b/typedapi/esdsl/asciifoldingtokenfilter.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _asciiFoldingTokenFilter struct { + v *types.AsciiFoldingTokenFilter +} + +func NewAsciiFoldingTokenFilter() *_asciiFoldingTokenFilter { + + return &_asciiFoldingTokenFilter{v: types.NewAsciiFoldingTokenFilter()} + +} + +func (s *_asciiFoldingTokenFilter) PreserveOriginal(stringifiedboolean types.StringifiedbooleanVariant) *_asciiFoldingTokenFilter { + + s.v.PreserveOriginal = *stringifiedboolean.StringifiedbooleanCaster() + + return s +} + +func (s *_asciiFoldingTokenFilter) Version(versionstring string) *_asciiFoldingTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_asciiFoldingTokenFilter) AsciiFoldingTokenFilterCaster() *types.AsciiFoldingTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/attachmentprocessor.go b/typedapi/esdsl/attachmentprocessor.go new file mode 100644 index 0000000000..ec2c62525d --- /dev/null +++ b/typedapi/esdsl/attachmentprocessor.go @@ -0,0 +1,167 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _attachmentProcessor struct { + v *types.AttachmentProcessor +} + +// The attachment processor lets Elasticsearch extract file attachments in +// common formats (such as PPT, XLS, and PDF) by using the Apache text +// extraction library Tika. +func NewAttachmentProcessor() *_attachmentProcessor { + + return &_attachmentProcessor{v: types.NewAttachmentProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_attachmentProcessor) Description(description string) *_attachmentProcessor { + + s.v.Description = &description + + return s +} + +// The field to get the base64 encoded field from. +func (s *_attachmentProcessor) Field(field string) *_attachmentProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_attachmentProcessor) If(if_ types.ScriptVariant) *_attachmentProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_attachmentProcessor) IgnoreFailure(ignorefailure bool) *_attachmentProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and field does not exist, the processor quietly exits without +// modifying the document. +func (s *_attachmentProcessor) IgnoreMissing(ignoremissing bool) *_attachmentProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// The number of chars being used for extraction to prevent huge fields. +// Use `-1` for no limit. +func (s *_attachmentProcessor) IndexedChars(indexedchars int64) *_attachmentProcessor { + + s.v.IndexedChars = &indexedchars + + return s +} + +// Field name from which you can overwrite the number of chars being used for +// extraction. +func (s *_attachmentProcessor) IndexedCharsField(field string) *_attachmentProcessor { + + s.v.IndexedCharsField = &field + + return s +} + +// Handle failures for the processor. +func (s *_attachmentProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_attachmentProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Array of properties to select to be stored. +// Can be `content`, `title`, `name`, `author`, `keywords`, `date`, +// `content_type`, `content_length`, `language`. +func (s *_attachmentProcessor) Properties(properties ...string) *_attachmentProcessor { + + for _, v := range properties { + + s.v.Properties = append(s.v.Properties, v) + + } + return s +} + +// If true, the binary field will be removed from the document +func (s *_attachmentProcessor) RemoveBinary(removebinary bool) *_attachmentProcessor { + + s.v.RemoveBinary = &removebinary + + return s +} + +// Field containing the name of the resource to decode. +// If specified, the processor passes this resource name to the underlying Tika +// library to enable Resource Name Based Detection. +func (s *_attachmentProcessor) ResourceName(resourcename string) *_attachmentProcessor { + + s.v.ResourceName = &resourcename + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_attachmentProcessor) Tag(tag string) *_attachmentProcessor { + + s.v.Tag = &tag + + return s +} + +// The field that will hold the attachment information. +func (s *_attachmentProcessor) TargetField(field string) *_attachmentProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_attachmentProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Attachment = s.v + + return container +} + +func (s *_attachmentProcessor) AttachmentProcessorCaster() *types.AttachmentProcessor { + return s.v +} diff --git a/typedapi/esdsl/autodatehistogramaggregation.go b/typedapi/esdsl/autodatehistogramaggregation.go new file mode 100644 index 0000000000..c879bec7d3 --- /dev/null +++ b/typedapi/esdsl/autodatehistogramaggregation.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/minimuminterval" +) + +type _autoDateHistogramAggregation struct { + v *types.AutoDateHistogramAggregation +} + +// A multi-bucket aggregation similar to the date histogram, except instead of +// providing an interval to use as the width of each bucket, a target number of +// buckets is provided. +func NewAutoDateHistogramAggregation() *_autoDateHistogramAggregation { + + return &_autoDateHistogramAggregation{v: types.NewAutoDateHistogramAggregation()} + +} + +// The target number of buckets. +func (s *_autoDateHistogramAggregation) Buckets(buckets int) *_autoDateHistogramAggregation { + + s.v.Buckets = &buckets + + return s +} + +// The field on which to run the aggregation. +func (s *_autoDateHistogramAggregation) Field(field string) *_autoDateHistogramAggregation { + + s.v.Field = &field + + return s +} + +// The date format used to format `key_as_string` in the response. +// If no `format` is specified, the first date format specified in the field +// mapping is used. +func (s *_autoDateHistogramAggregation) Format(format string) *_autoDateHistogramAggregation { + + s.v.Format = &format + + return s +} + +// The minimum rounding interval. +// This can make the collection process more efficient, as the aggregation will +// not attempt to round at any interval lower than `minimum_interval`. +func (s *_autoDateHistogramAggregation) MinimumInterval(minimuminterval minimuminterval.MinimumInterval) *_autoDateHistogramAggregation { + + s.v.MinimumInterval = &minimuminterval + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_autoDateHistogramAggregation) Missing(datetime types.DateTimeVariant) *_autoDateHistogramAggregation { + + s.v.Missing = *datetime.DateTimeCaster() + + return s +} + +// Time zone specified as a ISO 8601 UTC offset. +func (s *_autoDateHistogramAggregation) Offset(offset string) *_autoDateHistogramAggregation { + + s.v.Offset = &offset + + return s +} + +func (s *_autoDateHistogramAggregation) Params(params map[string]json.RawMessage) *_autoDateHistogramAggregation { + + s.v.Params = params + return s +} + +func (s *_autoDateHistogramAggregation) AddParam(key string, value json.RawMessage) *_autoDateHistogramAggregation { + + var tmp map[string]json.RawMessage + if s.v.Params == nil { + s.v.Params = make(map[string]json.RawMessage) + } else { + tmp = s.v.Params + } + + tmp[key] = value + + s.v.Params = tmp + return s +} + +func (s *_autoDateHistogramAggregation) Script(script types.ScriptVariant) *_autoDateHistogramAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +// Time zone ID. +func (s *_autoDateHistogramAggregation) TimeZone(timezone string) *_autoDateHistogramAggregation { + + s.v.TimeZone = &timezone + + return s +} + +func (s *_autoDateHistogramAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.AutoDateHistogram = s.v + + return container +} + +func (s *_autoDateHistogramAggregation) AutoDateHistogramAggregationCaster() *types.AutoDateHistogramAggregation { + return s.v +} diff --git a/typedapi/esdsl/autoscalingpolicy.go b/typedapi/esdsl/autoscalingpolicy.go new file mode 100644 index 0000000000..c5e0eff5f2 --- /dev/null +++ b/typedapi/esdsl/autoscalingpolicy.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _autoscalingPolicy struct { + v *types.AutoscalingPolicy +} + +func NewAutoscalingPolicy() *_autoscalingPolicy { + + return &_autoscalingPolicy{v: types.NewAutoscalingPolicy()} + +} + +// Decider settings. +func (s *_autoscalingPolicy) Deciders(deciders map[string]json.RawMessage) *_autoscalingPolicy { + + s.v.Deciders = deciders + return s +} + +func (s *_autoscalingPolicy) AddDecider(key string, value json.RawMessage) *_autoscalingPolicy { + + var tmp map[string]json.RawMessage + if s.v.Deciders == nil { + s.v.Deciders = make(map[string]json.RawMessage) + } else { + tmp = s.v.Deciders + } + + tmp[key] = value + + s.v.Deciders = tmp + return s +} + +func (s *_autoscalingPolicy) Roles(roles ...string) *_autoscalingPolicy { + + for _, v := range roles { + + s.v.Roles = append(s.v.Roles, v) + + } + return s +} + +func (s *_autoscalingPolicy) AutoscalingPolicyCaster() *types.AutoscalingPolicy { + return s.v +} diff --git a/typedapi/esdsl/averageaggregation.go b/typedapi/esdsl/averageaggregation.go new file mode 100644 index 0000000000..31a9cd66f8 --- /dev/null +++ b/typedapi/esdsl/averageaggregation.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _averageAggregation struct { + v *types.AverageAggregation +} + +// A single-value metrics aggregation that computes the average of numeric +// values that are extracted from the aggregated documents. +func NewAverageAggregation() *_averageAggregation { + + return &_averageAggregation{v: types.NewAverageAggregation()} + +} + +// The field on which to run the aggregation. +func (s *_averageAggregation) Field(field string) *_averageAggregation { + + s.v.Field = &field + + return s +} + +func (s *_averageAggregation) Format(format string) *_averageAggregation { + + s.v.Format = &format + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_averageAggregation) Missing(missing types.MissingVariant) *_averageAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_averageAggregation) Script(script types.ScriptVariant) *_averageAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_averageAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Avg = s.v + + return container +} + +func (s *_averageAggregation) AverageAggregationCaster() *types.AverageAggregation { + return s.v +} diff --git a/typedapi/esdsl/averagebucketaggregation.go b/typedapi/esdsl/averagebucketaggregation.go new file mode 100644 index 0000000000..f3a7ce7ba5 --- /dev/null +++ b/typedapi/esdsl/averagebucketaggregation.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _averageBucketAggregation struct { + v *types.AverageBucketAggregation +} + +// A sibling pipeline aggregation which calculates the mean value of a specified +// metric in a sibling aggregation. +// The specified metric must be numeric and the sibling aggregation must be a +// multi-bucket aggregation. +func NewAverageBucketAggregation() *_averageBucketAggregation { + + return &_averageBucketAggregation{v: types.NewAverageBucketAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_averageBucketAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_averageBucketAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_averageBucketAggregation) Format(format string) *_averageBucketAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_averageBucketAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_averageBucketAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +func (s *_averageBucketAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.AvgBucket = s.v + + return container +} + +func (s *_averageBucketAggregation) AverageBucketAggregationCaster() *types.AverageBucketAggregation { + return s.v +} diff --git a/typedapi/esdsl/azurerepository.go b/typedapi/esdsl/azurerepository.go new file mode 100644 index 0000000000..5cb03d3939 --- /dev/null +++ b/typedapi/esdsl/azurerepository.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _azureRepository struct { + v *types.AzureRepository +} + +func NewAzureRepository() *_azureRepository { + + return &_azureRepository{v: types.NewAzureRepository()} + +} + +// The repository settings. +func (s *_azureRepository) Settings(settings types.AzureRepositorySettingsVariant) *_azureRepository { + + s.v.Settings = settings.AzureRepositorySettingsCaster() + + return s +} + +func (s *_azureRepository) Uuid(uuid string) *_azureRepository { + + s.v.Uuid = &uuid + + return s +} + +func (s *_azureRepository) AzureRepositoryCaster() *types.AzureRepository { + return s.v +} diff --git a/typedapi/esdsl/azurerepositorysettings.go b/typedapi/esdsl/azurerepositorysettings.go new file mode 100644 index 0000000000..fa527c5175 --- /dev/null +++ b/typedapi/esdsl/azurerepositorysettings.go @@ -0,0 +1,169 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _azureRepositorySettings struct { + v *types.AzureRepositorySettings +} + +func NewAzureRepositorySettings() *_azureRepositorySettings { + + return &_azureRepositorySettings{v: types.NewAzureRepositorySettings()} + +} + +// The path to the repository data within the container. +// It defaults to the root directory. +// +// NOTE: Don't set `base_path` when configuring a snapshot repository for +// Elastic Cloud Enterprise. +// Elastic Cloud Enterprise automatically generates the `base_path` for each +// deployment so that multiple deployments can share the same bucket. +func (s *_azureRepositorySettings) BasePath(basepath string) *_azureRepositorySettings { + + s.v.BasePath = &basepath + + return s +} + +// Big files can be broken down into multiple smaller blobs in the blob store +// during snapshotting. +// It is not recommended to change this value from its default unless there is +// an explicit reason for limiting the size of blobs in the repository. +// Setting a value lower than the default can result in an increased number of +// API calls to the blob store during snapshot create and restore operations +// compared to using the default value and thus make both operations slower and +// more costly. +// Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. +// The default varies by repository type. +func (s *_azureRepositorySettings) ChunkSize(bytesize types.ByteSizeVariant) *_azureRepositorySettings { + + s.v.ChunkSize = *bytesize.ByteSizeCaster() + + return s +} + +// The name of the Azure repository client to use. +func (s *_azureRepositorySettings) Client(client string) *_azureRepositorySettings { + + s.v.Client = &client + + return s +} + +// When set to `true`, metadata files are stored in compressed format. +// This setting doesn't affect index files that are already compressed by +// default. +func (s *_azureRepositorySettings) Compress(compress bool) *_azureRepositorySettings { + + s.v.Compress = &compress + + return s +} + +// The Azure container. +func (s *_azureRepositorySettings) Container(container string) *_azureRepositorySettings { + + s.v.Container = &container + + return s +} + +// The maxmimum batch size, between 1 and 256, used for `BlobBatch` requests. +// Defaults to 256 which is the maximum number supported by the Azure blob batch +// API. +func (s *_azureRepositorySettings) DeleteObjectsMaxSize(deleteobjectsmaxsize int) *_azureRepositorySettings { + + s.v.DeleteObjectsMaxSize = &deleteobjectsmaxsize + + return s +} + +// Either `primary_only` or `secondary_only`. +// Note that if you set it to `secondary_only`, it will force `readonly` to +// `true`. +func (s *_azureRepositorySettings) LocationMode(locationmode string) *_azureRepositorySettings { + + s.v.LocationMode = &locationmode + + return s +} + +// The maximum number of concurrent batch delete requests that will be submitted +// for any individual bulk delete with `BlobBatch`. +// Note that the effective number of concurrent deletes is further limited by +// the Azure client connection and event loop thread limits. +// Defaults to 10, minimum is 1, maximum is 100. +func (s *_azureRepositorySettings) MaxConcurrentBatchDeletes(maxconcurrentbatchdeletes int) *_azureRepositorySettings { + + s.v.MaxConcurrentBatchDeletes = &maxconcurrentbatchdeletes + + return s +} + +// The maximum snapshot restore rate per node. +// It defaults to unlimited. +// Note that restores are also throttled through recovery settings. +func (s *_azureRepositorySettings) MaxRestoreBytesPerSec(bytesize types.ByteSizeVariant) *_azureRepositorySettings { + + s.v.MaxRestoreBytesPerSec = *bytesize.ByteSizeCaster() + + return s +} + +// The maximum snapshot creation rate per node. +// It defaults to 40mb per second. +// Note that if the recovery settings for managed services are set, then it +// defaults to unlimited, and the rate is additionally throttled through +// recovery settings. +func (s *_azureRepositorySettings) MaxSnapshotBytesPerSec(bytesize types.ByteSizeVariant) *_azureRepositorySettings { + + s.v.MaxSnapshotBytesPerSec = *bytesize.ByteSizeCaster() + + return s +} + +// If `true`, the repository is read-only. +// The cluster can retrieve and restore snapshots from the repository but not +// write to the repository or create snapshots in it. +// +// Only a cluster with write access can create snapshots in the repository. +// All other clusters connected to the repository should have the `readonly` +// parameter set to `true`. +// If `false`, the cluster can write to the repository and create snapshots in +// it. +// +// IMPORTANT: If you register the same snapshot repository with multiple +// clusters, only one cluster should have write access to the repository. +// Having multiple clusters write to the repository at the same time risks +// corrupting the contents of the repository. +func (s *_azureRepositorySettings) Readonly(readonly bool) *_azureRepositorySettings { + + s.v.Readonly = &readonly + + return s +} + +func (s *_azureRepositorySettings) AzureRepositorySettingsCaster() *types.AzureRepositorySettings { + return s.v +} diff --git a/typedapi/esdsl/basqueanalyzer.go b/typedapi/esdsl/basqueanalyzer.go new file mode 100644 index 0000000000..592266dbad --- /dev/null +++ b/typedapi/esdsl/basqueanalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _basqueAnalyzer struct { + v *types.BasqueAnalyzer +} + +func NewBasqueAnalyzer() *_basqueAnalyzer { + + return &_basqueAnalyzer{v: types.NewBasqueAnalyzer()} + +} + +func (s *_basqueAnalyzer) StemExclusion(stemexclusions ...string) *_basqueAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_basqueAnalyzer) Stopwords(stopwords ...string) *_basqueAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_basqueAnalyzer) StopwordsPath(stopwordspath string) *_basqueAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_basqueAnalyzer) BasqueAnalyzerCaster() *types.BasqueAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/bengalianalyzer.go b/typedapi/esdsl/bengalianalyzer.go new file mode 100644 index 0000000000..339700724e --- /dev/null +++ b/typedapi/esdsl/bengalianalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _bengaliAnalyzer struct { + v *types.BengaliAnalyzer +} + +func NewBengaliAnalyzer() *_bengaliAnalyzer { + + return &_bengaliAnalyzer{v: types.NewBengaliAnalyzer()} + +} + +func (s *_bengaliAnalyzer) StemExclusion(stemexclusions ...string) *_bengaliAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_bengaliAnalyzer) Stopwords(stopwords ...string) *_bengaliAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_bengaliAnalyzer) StopwordsPath(stopwordspath string) *_bengaliAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_bengaliAnalyzer) BengaliAnalyzerCaster() *types.BengaliAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/binaryproperty.go b/typedapi/esdsl/binaryproperty.go new file mode 100644 index 0000000000..3c42aac651 --- /dev/null +++ b/typedapi/esdsl/binaryproperty.go @@ -0,0 +1,153 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _binaryProperty struct { + v *types.BinaryProperty +} + +func NewBinaryProperty() *_binaryProperty { + + return &_binaryProperty{v: types.NewBinaryProperty()} + +} + +func (s *_binaryProperty) CopyTo(fields ...string) *_binaryProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_binaryProperty) DocValues(docvalues bool) *_binaryProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_binaryProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_binaryProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_binaryProperty) Fields(fields map[string]types.Property) *_binaryProperty { + + s.v.Fields = fields + return s +} + +func (s *_binaryProperty) AddField(key string, value types.PropertyVariant) *_binaryProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_binaryProperty) IgnoreAbove(ignoreabove int) *_binaryProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +// Metadata about the field. +func (s *_binaryProperty) Meta(meta map[string]string) *_binaryProperty { + + s.v.Meta = meta + return s +} + +func (s *_binaryProperty) AddMeta(key string, value string) *_binaryProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_binaryProperty) Properties(properties map[string]types.Property) *_binaryProperty { + + s.v.Properties = properties + return s +} + +func (s *_binaryProperty) AddProperty(key string, value types.PropertyVariant) *_binaryProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_binaryProperty) Store(store bool) *_binaryProperty { + + s.v.Store = &store + + return s +} + +func (s *_binaryProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_binaryProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_binaryProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_binaryProperty) BinaryPropertyCaster() *types.BinaryProperty { + return s.v +} diff --git a/typedapi/esdsl/booleanproperty.go b/typedapi/esdsl/booleanproperty.go new file mode 100644 index 0000000000..62044dfdde --- /dev/null +++ b/typedapi/esdsl/booleanproperty.go @@ -0,0 +1,181 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _booleanProperty struct { + v *types.BooleanProperty +} + +func NewBooleanProperty() *_booleanProperty { + + return &_booleanProperty{v: types.NewBooleanProperty()} + +} + +func (s *_booleanProperty) Boost(boost types.Float64) *_booleanProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_booleanProperty) CopyTo(fields ...string) *_booleanProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_booleanProperty) DocValues(docvalues bool) *_booleanProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_booleanProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_booleanProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_booleanProperty) Fielddata(fielddata types.NumericFielddataVariant) *_booleanProperty { + + s.v.Fielddata = fielddata.NumericFielddataCaster() + + return s +} + +func (s *_booleanProperty) Fields(fields map[string]types.Property) *_booleanProperty { + + s.v.Fields = fields + return s +} + +func (s *_booleanProperty) AddField(key string, value types.PropertyVariant) *_booleanProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_booleanProperty) IgnoreAbove(ignoreabove int) *_booleanProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_booleanProperty) Index(index bool) *_booleanProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_booleanProperty) Meta(meta map[string]string) *_booleanProperty { + + s.v.Meta = meta + return s +} + +func (s *_booleanProperty) AddMeta(key string, value string) *_booleanProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_booleanProperty) NullValue(nullvalue bool) *_booleanProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_booleanProperty) Properties(properties map[string]types.Property) *_booleanProperty { + + s.v.Properties = properties + return s +} + +func (s *_booleanProperty) AddProperty(key string, value types.PropertyVariant) *_booleanProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_booleanProperty) Store(store bool) *_booleanProperty { + + s.v.Store = &store + + return s +} + +func (s *_booleanProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_booleanProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_booleanProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_booleanProperty) BooleanPropertyCaster() *types.BooleanProperty { + return s.v +} diff --git a/typedapi/esdsl/boolquery.go b/typedapi/esdsl/boolquery.go new file mode 100644 index 0000000000..cf5ea93dda --- /dev/null +++ b/typedapi/esdsl/boolquery.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _boolQuery struct { + v *types.BoolQuery +} + +// matches documents matching boolean combinations of other queries. +func NewBoolQuery() *_boolQuery { + + return &_boolQuery{v: types.NewBoolQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_boolQuery) Boost(boost float32) *_boolQuery { + + s.v.Boost = &boost + + return s +} + +// The clause (query) must appear in matching documents. +// However, unlike `must`, the score of the query will be ignored. +func (s *_boolQuery) Filter(filters ...types.QueryVariant) *_boolQuery { + + s.v.Filter = make([]types.Query, len(filters)) + for i, v := range filters { + s.v.Filter[i] = *v.QueryCaster() + } + + return s +} + +// Specifies the number or percentage of `should` clauses returned documents +// must match. +func (s *_boolQuery) MinimumShouldMatch(minimumshouldmatch types.MinimumShouldMatchVariant) *_boolQuery { + + s.v.MinimumShouldMatch = *minimumshouldmatch.MinimumShouldMatchCaster() + + return s +} + +// The clause (query) must appear in matching documents and will contribute to +// the score. +func (s *_boolQuery) Must(musts ...types.QueryVariant) *_boolQuery { + + s.v.Must = make([]types.Query, len(musts)) + for i, v := range musts { + s.v.Must[i] = *v.QueryCaster() + } + + return s +} + +// The clause (query) must not appear in the matching documents. +// Because scoring is ignored, a score of `0` is returned for all documents. +func (s *_boolQuery) MustNot(mustnots ...types.QueryVariant) *_boolQuery { + + s.v.MustNot = make([]types.Query, len(mustnots)) + for i, v := range mustnots { + s.v.MustNot[i] = *v.QueryCaster() + } + + return s +} + +func (s *_boolQuery) QueryName_(queryname_ string) *_boolQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// The clause (query) should appear in the matching document. +func (s *_boolQuery) Should(shoulds ...types.QueryVariant) *_boolQuery { + + s.v.Should = make([]types.Query, len(shoulds)) + for i, v := range shoulds { + s.v.Should[i] = *v.QueryCaster() + } + + return s +} + +func (s *_boolQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.Bool = s.v + + return container +} + +func (s *_boolQuery) ApiKeyQueryContainerCaster() *types.ApiKeyQueryContainer { + container := types.NewApiKeyQueryContainer() + + container.Bool = s.v + + return container +} + +func (s *_boolQuery) RoleQueryContainerCaster() *types.RoleQueryContainer { + container := types.NewRoleQueryContainer() + + container.Bool = s.v + + return container +} + +func (s *_boolQuery) UserQueryContainerCaster() *types.UserQueryContainer { + container := types.NewUserQueryContainer() + + container.Bool = s.v + + return container +} + +func (s *_boolQuery) BoolQueryCaster() *types.BoolQuery { + return s.v +} diff --git a/typedapi/esdsl/boostingquery.go b/typedapi/esdsl/boostingquery.go new file mode 100644 index 0000000000..f96cba0f09 --- /dev/null +++ b/typedapi/esdsl/boostingquery.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _boostingQuery struct { + v *types.BoostingQuery +} + +// Returns documents matching a `positive` query while reducing the relevance +// score of documents that also match a `negative` query. +func NewBoostingQuery(negative types.QueryVariant, negativeboost types.Float64, positive types.QueryVariant) *_boostingQuery { + + tmp := &_boostingQuery{v: types.NewBoostingQuery()} + + tmp.Negative(negative) + + tmp.NegativeBoost(negativeboost) + + tmp.Positive(positive) + + return tmp + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_boostingQuery) Boost(boost float32) *_boostingQuery { + + s.v.Boost = &boost + + return s +} + +// Query used to decrease the relevance score of matching documents. +func (s *_boostingQuery) Negative(negative types.QueryVariant) *_boostingQuery { + + s.v.Negative = *negative.QueryCaster() + + return s +} + +// Floating point number between 0 and 1.0 used to decrease the relevance scores +// of documents matching the `negative` query. +func (s *_boostingQuery) NegativeBoost(negativeboost types.Float64) *_boostingQuery { + + s.v.NegativeBoost = negativeboost + + return s +} + +// Any returned documents must match this query. +func (s *_boostingQuery) Positive(positive types.QueryVariant) *_boostingQuery { + + s.v.Positive = *positive.QueryCaster() + + return s +} + +func (s *_boostingQuery) QueryName_(queryname_ string) *_boostingQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_boostingQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.Boosting = s.v + + return container +} + +func (s *_boostingQuery) BoostingQueryCaster() *types.BoostingQuery { + return s.v +} diff --git a/typedapi/esdsl/boxplotaggregation.go b/typedapi/esdsl/boxplotaggregation.go new file mode 100644 index 0000000000..31a8af981c --- /dev/null +++ b/typedapi/esdsl/boxplotaggregation.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _boxplotAggregation struct { + v *types.BoxplotAggregation +} + +// A metrics aggregation that computes a box plot of numeric values extracted +// from the aggregated documents. +func NewBoxplotAggregation() *_boxplotAggregation { + + return &_boxplotAggregation{v: types.NewBoxplotAggregation()} + +} + +// Limits the maximum number of nodes used by the underlying TDigest algorithm +// to `20 * compression`, enabling control of memory usage and approximation +// error. +func (s *_boxplotAggregation) Compression(compression types.Float64) *_boxplotAggregation { + + s.v.Compression = &compression + + return s +} + +// The field on which to run the aggregation. +func (s *_boxplotAggregation) Field(field string) *_boxplotAggregation { + + s.v.Field = &field + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_boxplotAggregation) Missing(missing types.MissingVariant) *_boxplotAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_boxplotAggregation) Script(script types.ScriptVariant) *_boxplotAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_boxplotAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Boxplot = s.v + + return container +} + +func (s *_boxplotAggregation) BoxplotAggregationCaster() *types.BoxplotAggregation { + return s.v +} diff --git a/typedapi/esdsl/braziliananalyzer.go b/typedapi/esdsl/braziliananalyzer.go new file mode 100644 index 0000000000..fd924d8ce6 --- /dev/null +++ b/typedapi/esdsl/braziliananalyzer.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _brazilianAnalyzer struct { + v *types.BrazilianAnalyzer +} + +func NewBrazilianAnalyzer() *_brazilianAnalyzer { + + return &_brazilianAnalyzer{v: types.NewBrazilianAnalyzer()} + +} + +func (s *_brazilianAnalyzer) Stopwords(stopwords ...string) *_brazilianAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_brazilianAnalyzer) StopwordsPath(stopwordspath string) *_brazilianAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_brazilianAnalyzer) BrazilianAnalyzerCaster() *types.BrazilianAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/bucketcorrelationaggregation.go b/typedapi/esdsl/bucketcorrelationaggregation.go new file mode 100644 index 0000000000..ec521098ac --- /dev/null +++ b/typedapi/esdsl/bucketcorrelationaggregation.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _bucketCorrelationAggregation struct { + v *types.BucketCorrelationAggregation +} + +// A sibling pipeline aggregation which runs a correlation function on the +// configured sibling multi-bucket aggregation. +func NewBucketCorrelationAggregation(function types.BucketCorrelationFunctionVariant) *_bucketCorrelationAggregation { + + tmp := &_bucketCorrelationAggregation{v: types.NewBucketCorrelationAggregation()} + + tmp.Function(function) + + return tmp + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_bucketCorrelationAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_bucketCorrelationAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// The correlation function to execute. +func (s *_bucketCorrelationAggregation) Function(function types.BucketCorrelationFunctionVariant) *_bucketCorrelationAggregation { + + s.v.Function = *function.BucketCorrelationFunctionCaster() + + return s +} + +func (s *_bucketCorrelationAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.BucketCorrelation = s.v + + return container +} + +func (s *_bucketCorrelationAggregation) BucketCorrelationAggregationCaster() *types.BucketCorrelationAggregation { + return s.v +} diff --git a/typedapi/esdsl/bucketcorrelationfunction.go b/typedapi/esdsl/bucketcorrelationfunction.go new file mode 100644 index 0000000000..305ca31d9d --- /dev/null +++ b/typedapi/esdsl/bucketcorrelationfunction.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _bucketCorrelationFunction struct { + v *types.BucketCorrelationFunction +} + +func NewBucketCorrelationFunction(countcorrelation types.BucketCorrelationFunctionCountCorrelationVariant) *_bucketCorrelationFunction { + + tmp := &_bucketCorrelationFunction{v: types.NewBucketCorrelationFunction()} + + tmp.CountCorrelation(countcorrelation) + + return tmp + +} + +// The configuration to calculate a count correlation. This function is designed +// for determining the correlation of a term value and a given metric. +func (s *_bucketCorrelationFunction) CountCorrelation(countcorrelation types.BucketCorrelationFunctionCountCorrelationVariant) *_bucketCorrelationFunction { + + s.v.CountCorrelation = *countcorrelation.BucketCorrelationFunctionCountCorrelationCaster() + + return s +} + +func (s *_bucketCorrelationFunction) BucketCorrelationFunctionCaster() *types.BucketCorrelationFunction { + return s.v +} diff --git a/typedapi/esdsl/bucketcorrelationfunctioncountcorrelation.go b/typedapi/esdsl/bucketcorrelationfunctioncountcorrelation.go new file mode 100644 index 0000000000..d8662f1d21 --- /dev/null +++ b/typedapi/esdsl/bucketcorrelationfunctioncountcorrelation.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _bucketCorrelationFunctionCountCorrelation struct { + v *types.BucketCorrelationFunctionCountCorrelation +} + +func NewBucketCorrelationFunctionCountCorrelation(indicator types.BucketCorrelationFunctionCountCorrelationIndicatorVariant) *_bucketCorrelationFunctionCountCorrelation { + + tmp := &_bucketCorrelationFunctionCountCorrelation{v: types.NewBucketCorrelationFunctionCountCorrelation()} + + tmp.Indicator(indicator) + + return tmp + +} + +// The indicator with which to correlate the configured `bucket_path` values. +func (s *_bucketCorrelationFunctionCountCorrelation) Indicator(indicator types.BucketCorrelationFunctionCountCorrelationIndicatorVariant) *_bucketCorrelationFunctionCountCorrelation { + + s.v.Indicator = *indicator.BucketCorrelationFunctionCountCorrelationIndicatorCaster() + + return s +} + +func (s *_bucketCorrelationFunctionCountCorrelation) BucketCorrelationFunctionCountCorrelationCaster() *types.BucketCorrelationFunctionCountCorrelation { + return s.v +} diff --git a/typedapi/esdsl/bucketcorrelationfunctioncountcorrelationindicator.go b/typedapi/esdsl/bucketcorrelationfunctioncountcorrelationindicator.go new file mode 100644 index 0000000000..323fce94f1 --- /dev/null +++ b/typedapi/esdsl/bucketcorrelationfunctioncountcorrelationindicator.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _bucketCorrelationFunctionCountCorrelationIndicator struct { + v *types.BucketCorrelationFunctionCountCorrelationIndicator +} + +func NewBucketCorrelationFunctionCountCorrelationIndicator(doccount int) *_bucketCorrelationFunctionCountCorrelationIndicator { + + tmp := &_bucketCorrelationFunctionCountCorrelationIndicator{v: types.NewBucketCorrelationFunctionCountCorrelationIndicator()} + + tmp.DocCount(doccount) + + return tmp + +} + +// The total number of documents that initially created the expectations. It’s +// required to be greater +// than or equal to the sum of all values in the buckets_path as this is the +// originating superset of data +// to which the term values are correlated. +func (s *_bucketCorrelationFunctionCountCorrelationIndicator) DocCount(doccount int) *_bucketCorrelationFunctionCountCorrelationIndicator { + + s.v.DocCount = doccount + + return s +} + +// An array of numbers with which to correlate the configured `bucket_path` +// values. +// The length of this value must always equal the number of buckets returned by +// the `bucket_path`. +func (s *_bucketCorrelationFunctionCountCorrelationIndicator) Expectations(expectations ...types.Float64) *_bucketCorrelationFunctionCountCorrelationIndicator { + + for _, v := range expectations { + + s.v.Expectations = append(s.v.Expectations, v) + + } + return s +} + +// An array of fractions to use when averaging and calculating variance. This +// should be used if +// the pre-calculated data and the buckets_path have known gaps. The length of +// fractions, if provided, +// must equal expectations. +func (s *_bucketCorrelationFunctionCountCorrelationIndicator) Fractions(fractions ...types.Float64) *_bucketCorrelationFunctionCountCorrelationIndicator { + + for _, v := range fractions { + + s.v.Fractions = append(s.v.Fractions, v) + + } + return s +} + +func (s *_bucketCorrelationFunctionCountCorrelationIndicator) BucketCorrelationFunctionCountCorrelationIndicatorCaster() *types.BucketCorrelationFunctionCountCorrelationIndicator { + return s.v +} diff --git a/typedapi/esdsl/bucketksaggregation.go b/typedapi/esdsl/bucketksaggregation.go new file mode 100644 index 0000000000..f34d4cffb1 --- /dev/null +++ b/typedapi/esdsl/bucketksaggregation.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _bucketKsAggregation struct { + v *types.BucketKsAggregation +} + +// A sibling pipeline aggregation which runs a two sample Kolmogorov–Smirnov +// test ("K-S test") against a provided distribution and the distribution +// implied by the documents counts in the configured sibling aggregation. +func NewBucketKsAggregation() *_bucketKsAggregation { + + return &_bucketKsAggregation{v: types.NewBucketKsAggregation()} + +} + +// A list of string values indicating which K-S test alternative to calculate. +// The valid values +// are: "greater", "less", "two_sided". This parameter is key for determining +// the K-S statistic used +// when calculating the K-S test. Default value is all possible alternative +// hypotheses. +func (s *_bucketKsAggregation) Alternative(alternatives ...string) *_bucketKsAggregation { + + for _, v := range alternatives { + + s.v.Alternative = append(s.v.Alternative, v) + + } + return s +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_bucketKsAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_bucketKsAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// A list of doubles indicating the distribution of the samples with which to +// compare to the `buckets_path` results. +// In typical usage this is the overall proportion of documents in each bucket, +// which is compared with the actual +// document proportions in each bucket from the sibling aggregation counts. The +// default is to assume that overall +// documents are uniformly distributed on these buckets, which they would be if +// one used equal percentiles of a +// metric to define the bucket end points. +func (s *_bucketKsAggregation) Fractions(fractions ...types.Float64) *_bucketKsAggregation { + + for _, v := range fractions { + + s.v.Fractions = append(s.v.Fractions, v) + + } + return s +} + +// Indicates the sampling methodology when calculating the K-S test. Note, this +// is sampling of the returned values. +// This determines the cumulative distribution function (CDF) points used +// comparing the two samples. Default is +// `upper_tail`, which emphasizes the upper end of the CDF points. Valid options +// are: `upper_tail`, `uniform`, +// and `lower_tail`. +func (s *_bucketKsAggregation) SamplingMethod(samplingmethod string) *_bucketKsAggregation { + + s.v.SamplingMethod = &samplingmethod + + return s +} + +func (s *_bucketKsAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.BucketCountKsTest = s.v + + return container +} + +func (s *_bucketKsAggregation) BucketKsAggregationCaster() *types.BucketKsAggregation { + return s.v +} diff --git a/typedapi/esdsl/bucketsapikeyquerycontainer.go b/typedapi/esdsl/bucketsapikeyquerycontainer.go new file mode 100644 index 0000000000..56c57f2c68 --- /dev/null +++ b/typedapi/esdsl/bucketsapikeyquerycontainer.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _bucketsApiKeyQueryContainer struct { + v types.BucketsApiKeyQueryContainer +} + +func NewBucketsApiKeyQueryContainer() *_bucketsApiKeyQueryContainer { + return &_bucketsApiKeyQueryContainer{v: nil} +} + +func (u *_bucketsApiKeyQueryContainer) Map(value map[string]types.ApiKeyQueryContainerVariant) *_bucketsApiKeyQueryContainer { // union map + + u.v = make(map[string]types.ApiKeyQueryContainerVariant) + for k, v := range value { + u.v.(map[string]types.ApiKeyQueryContainer)[k] = *v.ApiKeyQueryContainerCaster() + } + + return u +} + +func (u *_bucketsApiKeyQueryContainer) ApiKeyQueryContainers(apikeyquerycontainers ...types.ApiKeyQueryContainerVariant) *_bucketsApiKeyQueryContainer { + + u.v = make([]types.ApiKeyQueryContainer, len(apikeyquerycontainers)) + for i, v := range apikeyquerycontainers { + u.v.([]types.ApiKeyQueryContainer)[i] = *v.ApiKeyQueryContainerCaster() + } + + return u +} + +func (u *_bucketsApiKeyQueryContainer) BucketsApiKeyQueryContainerCaster() *types.BucketsApiKeyQueryContainer { + return &u.v +} diff --git a/typedapi/esdsl/bucketscriptaggregation.go b/typedapi/esdsl/bucketscriptaggregation.go new file mode 100644 index 0000000000..72fa4c9109 --- /dev/null +++ b/typedapi/esdsl/bucketscriptaggregation.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _bucketScriptAggregation struct { + v *types.BucketScriptAggregation +} + +// A parent pipeline aggregation which runs a script which can perform per +// bucket computations on metrics in the parent multi-bucket aggregation. +func NewBucketScriptAggregation() *_bucketScriptAggregation { + + return &_bucketScriptAggregation{v: types.NewBucketScriptAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_bucketScriptAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_bucketScriptAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_bucketScriptAggregation) Format(format string) *_bucketScriptAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_bucketScriptAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_bucketScriptAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +// The script to run for this aggregation. +func (s *_bucketScriptAggregation) Script(script types.ScriptVariant) *_bucketScriptAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_bucketScriptAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.BucketScript = s.v + + return container +} + +func (s *_bucketScriptAggregation) BucketScriptAggregationCaster() *types.BucketScriptAggregation { + return s.v +} diff --git a/typedapi/esdsl/bucketselectoraggregation.go b/typedapi/esdsl/bucketselectoraggregation.go new file mode 100644 index 0000000000..8bad91fda5 --- /dev/null +++ b/typedapi/esdsl/bucketselectoraggregation.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _bucketSelectorAggregation struct { + v *types.BucketSelectorAggregation +} + +// A parent pipeline aggregation which runs a script to determine whether the +// current bucket will be retained in the parent multi-bucket aggregation. +func NewBucketSelectorAggregation() *_bucketSelectorAggregation { + + return &_bucketSelectorAggregation{v: types.NewBucketSelectorAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_bucketSelectorAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_bucketSelectorAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_bucketSelectorAggregation) Format(format string) *_bucketSelectorAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_bucketSelectorAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_bucketSelectorAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +// The script to run for this aggregation. +func (s *_bucketSelectorAggregation) Script(script types.ScriptVariant) *_bucketSelectorAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_bucketSelectorAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.BucketSelector = s.v + + return container +} + +func (s *_bucketSelectorAggregation) BucketSelectorAggregationCaster() *types.BucketSelectorAggregation { + return s.v +} diff --git a/typedapi/esdsl/bucketsortaggregation.go b/typedapi/esdsl/bucketsortaggregation.go new file mode 100644 index 0000000000..4a64739881 --- /dev/null +++ b/typedapi/esdsl/bucketsortaggregation.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _bucketSortAggregation struct { + v *types.BucketSortAggregation +} + +// A parent pipeline aggregation which sorts the buckets of its parent +// multi-bucket aggregation. +func NewBucketSortAggregation() *_bucketSortAggregation { + + return &_bucketSortAggregation{v: types.NewBucketSortAggregation()} + +} + +// Buckets in positions prior to `from` will be truncated. +func (s *_bucketSortAggregation) From(from int) *_bucketSortAggregation { + + s.v.From = &from + + return s +} + +// The policy to apply when gaps are found in the data. +func (s *_bucketSortAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_bucketSortAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +// The number of buckets to return. +// Defaults to all buckets of the parent aggregation. +func (s *_bucketSortAggregation) Size(size int) *_bucketSortAggregation { + + s.v.Size = &size + + return s +} + +// The list of fields to sort on. +func (s *_bucketSortAggregation) Sort(sorts ...types.SortCombinationsVariant) *_bucketSortAggregation { + + for _, v := range sorts { + s.v.Sort = append(s.v.Sort, *v.SortCombinationsCaster()) + } + + return s +} + +func (s *_bucketSortAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.BucketSort = s.v + + return container +} + +func (s *_bucketSortAggregation) BucketSortAggregationCaster() *types.BucketSortAggregation { + return s.v +} diff --git a/typedapi/esdsl/bucketspath.go b/typedapi/esdsl/bucketspath.go new file mode 100644 index 0000000000..75d8816b03 --- /dev/null +++ b/typedapi/esdsl/bucketspath.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _bucketsPath struct { + v types.BucketsPath +} + +func NewBucketsPath() *_bucketsPath { + return &_bucketsPath{v: nil} +} + +func (u *_bucketsPath) String(string string) *_bucketsPath { + + u.v = &string + + return u +} + +func (u *_bucketsPath) Strings(strings ...string) *_bucketsPath { + + u.v = make([]string, len(strings)) + u.v = strings + + return u +} + +func (u *_bucketsPath) Map(value map[string]string) *_bucketsPath { // union map + + u.v = make(map[string]string) + for k, v := range value { + u.v.(map[string]string)[k] = v + } + + return u +} + +func (u *_bucketsPath) BucketsPathCaster() *types.BucketsPath { + return &u.v +} diff --git a/typedapi/esdsl/bucketsquery.go b/typedapi/esdsl/bucketsquery.go new file mode 100644 index 0000000000..28e6260a63 --- /dev/null +++ b/typedapi/esdsl/bucketsquery.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _bucketsQuery struct { + v types.BucketsQuery +} + +func NewBucketsQuery() *_bucketsQuery { + return &_bucketsQuery{v: nil} +} + +func (u *_bucketsQuery) Map(value map[string]types.QueryVariant) *_bucketsQuery { // union map + + u.v = make(map[string]types.QueryVariant) + for k, v := range value { + u.v.(map[string]types.Query)[k] = *v.QueryCaster() + } + + return u +} + +func (u *_bucketsQuery) QueryContainers(querycontainers ...types.QueryVariant) *_bucketsQuery { + + u.v = make([]types.Query, len(querycontainers)) + for i, v := range querycontainers { + u.v.([]types.Query)[i] = *v.QueryCaster() + } + + return u +} + +func (u *_bucketsQuery) BucketsQueryCaster() *types.BucketsQuery { + return &u.v +} diff --git a/typedapi/esdsl/bulgariananalyzer.go b/typedapi/esdsl/bulgariananalyzer.go new file mode 100644 index 0000000000..7fef2cf341 --- /dev/null +++ b/typedapi/esdsl/bulgariananalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _bulgarianAnalyzer struct { + v *types.BulgarianAnalyzer +} + +func NewBulgarianAnalyzer() *_bulgarianAnalyzer { + + return &_bulgarianAnalyzer{v: types.NewBulgarianAnalyzer()} + +} + +func (s *_bulgarianAnalyzer) StemExclusion(stemexclusions ...string) *_bulgarianAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_bulgarianAnalyzer) Stopwords(stopwords ...string) *_bulgarianAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_bulgarianAnalyzer) StopwordsPath(stopwordspath string) *_bulgarianAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_bulgarianAnalyzer) BulgarianAnalyzerCaster() *types.BulgarianAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/bytenumberproperty.go b/typedapi/esdsl/bytenumberproperty.go new file mode 100644 index 0000000000..78b65a56c5 --- /dev/null +++ b/typedapi/esdsl/bytenumberproperty.go @@ -0,0 +1,220 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" +) + +type _byteNumberProperty struct { + v *types.ByteNumberProperty +} + +func NewByteNumberProperty() *_byteNumberProperty { + + return &_byteNumberProperty{v: types.NewByteNumberProperty()} + +} + +func (s *_byteNumberProperty) Boost(boost types.Float64) *_byteNumberProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_byteNumberProperty) Coerce(coerce bool) *_byteNumberProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_byteNumberProperty) CopyTo(fields ...string) *_byteNumberProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_byteNumberProperty) DocValues(docvalues bool) *_byteNumberProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_byteNumberProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_byteNumberProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_byteNumberProperty) Fields(fields map[string]types.Property) *_byteNumberProperty { + + s.v.Fields = fields + return s +} + +func (s *_byteNumberProperty) AddField(key string, value types.PropertyVariant) *_byteNumberProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_byteNumberProperty) IgnoreAbove(ignoreabove int) *_byteNumberProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_byteNumberProperty) IgnoreMalformed(ignoremalformed bool) *_byteNumberProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_byteNumberProperty) Index(index bool) *_byteNumberProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_byteNumberProperty) Meta(meta map[string]string) *_byteNumberProperty { + + s.v.Meta = meta + return s +} + +func (s *_byteNumberProperty) AddMeta(key string, value string) *_byteNumberProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_byteNumberProperty) NullValue(nullvalue byte) *_byteNumberProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_byteNumberProperty) OnScriptError(onscripterror onscripterror.OnScriptError) *_byteNumberProperty { + + s.v.OnScriptError = &onscripterror + return s +} + +func (s *_byteNumberProperty) Properties(properties map[string]types.Property) *_byteNumberProperty { + + s.v.Properties = properties + return s +} + +func (s *_byteNumberProperty) AddProperty(key string, value types.PropertyVariant) *_byteNumberProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_byteNumberProperty) Script(script types.ScriptVariant) *_byteNumberProperty { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_byteNumberProperty) Store(store bool) *_byteNumberProperty { + + s.v.Store = &store + + return s +} + +func (s *_byteNumberProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_byteNumberProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_byteNumberProperty) TimeSeriesDimension(timeseriesdimension bool) *_byteNumberProperty { + + s.v.TimeSeriesDimension = ×eriesdimension + + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_byteNumberProperty) TimeSeriesMetric(timeseriesmetric timeseriesmetrictype.TimeSeriesMetricType) *_byteNumberProperty { + + s.v.TimeSeriesMetric = ×eriesmetric + return s +} + +func (s *_byteNumberProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_byteNumberProperty) ByteNumberPropertyCaster() *types.ByteNumberProperty { + return s.v +} diff --git a/typedapi/esdsl/bytesize.go b/typedapi/esdsl/bytesize.go new file mode 100644 index 0000000000..1b5ecae3c8 --- /dev/null +++ b/typedapi/esdsl/bytesize.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _byteSize struct { + v types.ByteSize +} + +func NewByteSize() *_byteSize { + return &_byteSize{v: nil} +} + +func (u *_byteSize) Int64(int64 int64) *_byteSize { + + u.v = &int64 + + return u +} + +func (u *_byteSize) String(string string) *_byteSize { + + u.v = &string + + return u +} + +func (u *_byteSize) ByteSizeCaster() *types.ByteSize { + return &u.v +} diff --git a/typedapi/esdsl/bytesprocessor.go b/typedapi/esdsl/bytesprocessor.go new file mode 100644 index 0000000000..9ae4a4af46 --- /dev/null +++ b/typedapi/esdsl/bytesprocessor.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _bytesProcessor struct { + v *types.BytesProcessor +} + +// Converts a human readable byte value (for example `1kb`) to its value in +// bytes (for example `1024`). +// If the field is an array of strings, all members of the array will be +// converted. +// Supported human readable units are "b", "kb", "mb", "gb", "tb", "pb" case +// insensitive. +// An error will occur if the field is not a supported format or resultant value +// exceeds 2^63. +func NewBytesProcessor() *_bytesProcessor { + + return &_bytesProcessor{v: types.NewBytesProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_bytesProcessor) Description(description string) *_bytesProcessor { + + s.v.Description = &description + + return s +} + +// The field to convert. +func (s *_bytesProcessor) Field(field string) *_bytesProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_bytesProcessor) If(if_ types.ScriptVariant) *_bytesProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_bytesProcessor) IgnoreFailure(ignorefailure bool) *_bytesProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist or is `null`, the processor quietly +// exits without modifying the document. +func (s *_bytesProcessor) IgnoreMissing(ignoremissing bool) *_bytesProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_bytesProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_bytesProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_bytesProcessor) Tag(tag string) *_bytesProcessor { + + s.v.Tag = &tag + + return s +} + +// The field to assign the converted value to. +// By default, the field is updated in-place. +func (s *_bytesProcessor) TargetField(field string) *_bytesProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_bytesProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Bytes = s.v + + return container +} + +func (s *_bytesProcessor) BytesProcessorCaster() *types.BytesProcessor { + return s.v +} diff --git a/typedapi/esdsl/cachequeries.go b/typedapi/esdsl/cachequeries.go new file mode 100644 index 0000000000..80eacb3ee1 --- /dev/null +++ b/typedapi/esdsl/cachequeries.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _cacheQueries struct { + v *types.CacheQueries +} + +func NewCacheQueries(enabled bool) *_cacheQueries { + + tmp := &_cacheQueries{v: types.NewCacheQueries()} + + tmp.Enabled(enabled) + + return tmp + +} + +func (s *_cacheQueries) Enabled(enabled bool) *_cacheQueries { + + s.v.Enabled = enabled + + return s +} + +func (s *_cacheQueries) CacheQueriesCaster() *types.CacheQueries { + return s.v +} diff --git a/typedapi/esdsl/calendarevent.go b/typedapi/esdsl/calendarevent.go new file mode 100644 index 0000000000..cb744463eb --- /dev/null +++ b/typedapi/esdsl/calendarevent.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _calendarEvent struct { + v *types.CalendarEvent +} + +func NewCalendarEvent(description string) *_calendarEvent { + + tmp := &_calendarEvent{v: types.NewCalendarEvent()} + + tmp.Description(description) + + return tmp + +} + +// A string that uniquely identifies a calendar. +func (s *_calendarEvent) CalendarId(id string) *_calendarEvent { + + s.v.CalendarId = &id + + return s +} + +// A description of the scheduled event. +func (s *_calendarEvent) Description(description string) *_calendarEvent { + + s.v.Description = description + + return s +} + +// The timestamp for the end of the scheduled event in milliseconds since the +// epoch or ISO 8601 format. +func (s *_calendarEvent) EndTime(datetime types.DateTimeVariant) *_calendarEvent { + + s.v.EndTime = *datetime.DateTimeCaster() + + return s +} + +func (s *_calendarEvent) EventId(id string) *_calendarEvent { + + s.v.EventId = &id + + return s +} + +// Shift time by this many seconds. For example adjust time for daylight savings +// changes +func (s *_calendarEvent) ForceTimeShift(forcetimeshift int) *_calendarEvent { + + s.v.ForceTimeShift = &forcetimeshift + + return s +} + +// When true the model will not be updated for this calendar period. +func (s *_calendarEvent) SkipModelUpdate(skipmodelupdate bool) *_calendarEvent { + + s.v.SkipModelUpdate = &skipmodelupdate + + return s +} + +// When true the model will not create results for this calendar period. +func (s *_calendarEvent) SkipResult(skipresult bool) *_calendarEvent { + + s.v.SkipResult = &skipresult + + return s +} + +// The timestamp for the beginning of the scheduled event in milliseconds since +// the epoch or ISO 8601 format. +func (s *_calendarEvent) StartTime(datetime types.DateTimeVariant) *_calendarEvent { + + s.v.StartTime = *datetime.DateTimeCaster() + + return s +} + +func (s *_calendarEvent) CalendarEventCaster() *types.CalendarEvent { + return s.v +} diff --git a/typedapi/esdsl/cardinalityaggregation.go b/typedapi/esdsl/cardinalityaggregation.go new file mode 100644 index 0000000000..caf183c769 --- /dev/null +++ b/typedapi/esdsl/cardinalityaggregation.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cardinalityexecutionmode" +) + +type _cardinalityAggregation struct { + v *types.CardinalityAggregation +} + +// A single-value metrics aggregation that calculates an approximate count of +// distinct values. +func NewCardinalityAggregation() *_cardinalityAggregation { + + return &_cardinalityAggregation{v: types.NewCardinalityAggregation()} + +} + +// Mechanism by which cardinality aggregations is run. +func (s *_cardinalityAggregation) ExecutionHint(executionhint cardinalityexecutionmode.CardinalityExecutionMode) *_cardinalityAggregation { + + s.v.ExecutionHint = &executionhint + return s +} + +// The field on which to run the aggregation. +func (s *_cardinalityAggregation) Field(field string) *_cardinalityAggregation { + + s.v.Field = &field + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_cardinalityAggregation) Missing(missing types.MissingVariant) *_cardinalityAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +// A unique count below which counts are expected to be close to accurate. +// This allows to trade memory for accuracy. +func (s *_cardinalityAggregation) PrecisionThreshold(precisionthreshold int) *_cardinalityAggregation { + + s.v.PrecisionThreshold = &precisionthreshold + + return s +} + +func (s *_cardinalityAggregation) Rehash(rehash bool) *_cardinalityAggregation { + + s.v.Rehash = &rehash + + return s +} + +func (s *_cardinalityAggregation) Script(script types.ScriptVariant) *_cardinalityAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_cardinalityAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Cardinality = s.v + + return container +} + +func (s *_cardinalityAggregation) ApiKeyAggregationContainerCaster() *types.ApiKeyAggregationContainer { + container := types.NewApiKeyAggregationContainer() + + container.Cardinality = s.v + + return container +} + +func (s *_cardinalityAggregation) CardinalityAggregationCaster() *types.CardinalityAggregation { + return s.v +} diff --git a/typedapi/esdsl/catalananalyzer.go b/typedapi/esdsl/catalananalyzer.go new file mode 100644 index 0000000000..9962c38bff --- /dev/null +++ b/typedapi/esdsl/catalananalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _catalanAnalyzer struct { + v *types.CatalanAnalyzer +} + +func NewCatalanAnalyzer() *_catalanAnalyzer { + + return &_catalanAnalyzer{v: types.NewCatalanAnalyzer()} + +} + +func (s *_catalanAnalyzer) StemExclusion(stemexclusions ...string) *_catalanAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_catalanAnalyzer) Stopwords(stopwords ...string) *_catalanAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_catalanAnalyzer) StopwordsPath(stopwordspath string) *_catalanAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_catalanAnalyzer) CatalanAnalyzerCaster() *types.CatalanAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/categorizationanalyzer.go b/typedapi/esdsl/categorizationanalyzer.go new file mode 100644 index 0000000000..9fa010f142 --- /dev/null +++ b/typedapi/esdsl/categorizationanalyzer.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _categorizationAnalyzer struct { + v types.CategorizationAnalyzer +} + +func NewCategorizationAnalyzer() *_categorizationAnalyzer { + return &_categorizationAnalyzer{v: nil} +} + +func (u *_categorizationAnalyzer) String(string string) *_categorizationAnalyzer { + + u.v = &string + + return u +} + +func (u *_categorizationAnalyzer) CategorizationAnalyzerDefinition(categorizationanalyzerdefinition types.CategorizationAnalyzerDefinitionVariant) *_categorizationAnalyzer { + + u.v = &categorizationanalyzerdefinition + + return u +} + +// Interface implementation for CategorizationAnalyzerDefinition in CategorizationAnalyzer union +func (u *_categorizationAnalyzerDefinition) CategorizationAnalyzerCaster() *types.CategorizationAnalyzer { + t := types.CategorizationAnalyzer(u.v) + return &t +} + +func (u *_categorizationAnalyzer) CategorizationAnalyzerCaster() *types.CategorizationAnalyzer { + return &u.v +} diff --git a/typedapi/esdsl/categorizationanalyzerdefinition.go b/typedapi/esdsl/categorizationanalyzerdefinition.go new file mode 100644 index 0000000000..02dc38e64c --- /dev/null +++ b/typedapi/esdsl/categorizationanalyzerdefinition.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _categorizationAnalyzerDefinition struct { + v *types.CategorizationAnalyzerDefinition +} + +func NewCategorizationAnalyzerDefinition() *_categorizationAnalyzerDefinition { + + return &_categorizationAnalyzerDefinition{v: types.NewCategorizationAnalyzerDefinition()} + +} + +// One or more character filters. In addition to the built-in character filters, +// other plugins can provide more character filters. If this property is not +// specified, no character filters are applied prior to categorization. If you +// are customizing some other aspect of the analyzer and you need to achieve the +// equivalent of `categorization_filters` (which are not permitted when some +// other aspect of the analyzer is customized), add them here as pattern replace +// character filters. +func (s *_categorizationAnalyzerDefinition) CharFilter(charfilters ...types.CharFilterVariant) *_categorizationAnalyzerDefinition { + + for _, v := range charfilters { + + s.v.CharFilter = append(s.v.CharFilter, *v.CharFilterCaster()) + + } + return s +} + +// One or more token filters. In addition to the built-in token filters, other +// plugins can provide more token filters. If this property is not specified, no +// token filters are applied prior to categorization. +func (s *_categorizationAnalyzerDefinition) Filter(filters ...types.TokenFilterVariant) *_categorizationAnalyzerDefinition { + + for _, v := range filters { + + s.v.Filter = append(s.v.Filter, *v.TokenFilterCaster()) + + } + return s +} + +// The name or definition of the tokenizer to use after character filters are +// applied. This property is compulsory if `categorization_analyzer` is +// specified as an object. Machine learning provides a tokenizer called +// `ml_standard` that tokenizes in a way that has been determined to produce +// good categorization results on a variety of log file formats for logs in +// English. If you want to use that tokenizer but change the character or token +// filters, specify "tokenizer": "ml_standard" in your +// `categorization_analyzer`. Additionally, the `ml_classic` tokenizer is +// available, which tokenizes in the same way as the non-customizable tokenizer +// in old versions of the product (before 6.2). `ml_classic` was the default +// categorization tokenizer in versions 6.2 to 7.13, so if you need +// categorization identical to the default for jobs created in these versions, +// specify "tokenizer": "ml_classic" in your `categorization_analyzer`. +func (s *_categorizationAnalyzerDefinition) Tokenizer(tokenizer types.TokenizerVariant) *_categorizationAnalyzerDefinition { + + s.v.Tokenizer = *tokenizer.TokenizerCaster() + + return s +} + +func (s *_categorizationAnalyzerDefinition) CategorizationAnalyzerDefinitionCaster() *types.CategorizationAnalyzerDefinition { + return s.v +} diff --git a/typedapi/esdsl/categorizetextaggregation.go b/typedapi/esdsl/categorizetextaggregation.go new file mode 100644 index 0000000000..06f16397d3 --- /dev/null +++ b/typedapi/esdsl/categorizetextaggregation.go @@ -0,0 +1,161 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _categorizeTextAggregation struct { + v *types.CategorizeTextAggregation +} + +// A multi-bucket aggregation that groups semi-structured text into buckets. +func NewCategorizeTextAggregation() *_categorizeTextAggregation { + + return &_categorizeTextAggregation{v: types.NewCategorizeTextAggregation()} + +} + +// The categorization analyzer specifies how the text is analyzed and tokenized +// before being categorized. +// The syntax is very similar to that used to define the analyzer in the +// [Analyze +// endpoint](https://www.elastic.co/guide/en/elasticsearch/reference/8.0/indices-analyze.html). +// This property +// cannot be used at the same time as categorization_filters. +func (s *_categorizeTextAggregation) CategorizationAnalyzer(categorizetextanalyzer types.CategorizeTextAnalyzerVariant) *_categorizeTextAggregation { + + s.v.CategorizationAnalyzer = *categorizetextanalyzer.CategorizeTextAnalyzerCaster() + + return s +} + +// This property expects an array of regular expressions. The expressions are +// used to filter out matching +// sequences from the categorization field values. You can use this +// functionality to fine tune the categorization +// by excluding sequences from consideration when categories are defined. For +// example, you can exclude SQL +// statements that appear in your log files. This property cannot be used at the +// same time as categorization_analyzer. +// If you only want to define simple regular expression filters that are applied +// prior to tokenization, setting +// this property is the easiest method. If you also want to customize the +// tokenizer or post-tokenization filtering, +// use the categorization_analyzer property instead and include the filters as +// pattern_replace character filters. +func (s *_categorizeTextAggregation) CategorizationFilters(categorizationfilters ...string) *_categorizeTextAggregation { + + for _, v := range categorizationfilters { + + s.v.CategorizationFilters = append(s.v.CategorizationFilters, v) + + } + return s +} + +// The semi-structured text field to categorize. +func (s *_categorizeTextAggregation) Field(field string) *_categorizeTextAggregation { + + s.v.Field = field + + return s +} + +// The maximum number of token positions to match on before attempting to merge +// categories. Larger +// values will use more memory and create narrower categories. Max allowed value +// is 100. +func (s *_categorizeTextAggregation) MaxMatchedTokens(maxmatchedtokens int) *_categorizeTextAggregation { + + s.v.MaxMatchedTokens = &maxmatchedtokens + + return s +} + +// The maximum number of unique tokens at any position up to max_matched_tokens. +// Must be larger than 1. +// Smaller values use less memory and create fewer categories. Larger values +// will use more memory and +// create narrower categories. Max allowed value is 100. +func (s *_categorizeTextAggregation) MaxUniqueTokens(maxuniquetokens int) *_categorizeTextAggregation { + + s.v.MaxUniqueTokens = &maxuniquetokens + + return s +} + +// The minimum number of documents in a bucket to be returned to the results. +func (s *_categorizeTextAggregation) MinDocCount(mindoccount int) *_categorizeTextAggregation { + + s.v.MinDocCount = &mindoccount + + return s +} + +// The minimum number of documents in a bucket to be returned from the shard +// before merging. +func (s *_categorizeTextAggregation) ShardMinDocCount(shardmindoccount int) *_categorizeTextAggregation { + + s.v.ShardMinDocCount = &shardmindoccount + + return s +} + +// The number of categorization buckets to return from each shard before merging +// all the results. +func (s *_categorizeTextAggregation) ShardSize(shardsize int) *_categorizeTextAggregation { + + s.v.ShardSize = &shardsize + + return s +} + +// The minimum percentage of tokens that must match for text to be added to the +// category bucket. Must +// be between 1 and 100. The larger the value the narrower the categories. +// Larger values will increase memory +// usage and create narrower categories. +func (s *_categorizeTextAggregation) SimilarityThreshold(similaritythreshold int) *_categorizeTextAggregation { + + s.v.SimilarityThreshold = &similaritythreshold + + return s +} + +// The number of buckets to return. +func (s *_categorizeTextAggregation) Size(size int) *_categorizeTextAggregation { + + s.v.Size = &size + + return s +} + +func (s *_categorizeTextAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.CategorizeText = s.v + + return container +} + +func (s *_categorizeTextAggregation) CategorizeTextAggregationCaster() *types.CategorizeTextAggregation { + return s.v +} diff --git a/typedapi/esdsl/categorizetextanalyzer.go b/typedapi/esdsl/categorizetextanalyzer.go new file mode 100644 index 0000000000..ac99850c8b --- /dev/null +++ b/typedapi/esdsl/categorizetextanalyzer.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _categorizeTextAnalyzer struct { + v types.CategorizeTextAnalyzer +} + +func NewCategorizeTextAnalyzer() *_categorizeTextAnalyzer { + return &_categorizeTextAnalyzer{v: nil} +} + +func (u *_categorizeTextAnalyzer) String(string string) *_categorizeTextAnalyzer { + + u.v = &string + + return u +} + +func (u *_categorizeTextAnalyzer) CustomCategorizeTextAnalyzer(customcategorizetextanalyzer types.CustomCategorizeTextAnalyzerVariant) *_categorizeTextAnalyzer { + + u.v = &customcategorizetextanalyzer + + return u +} + +// Interface implementation for CustomCategorizeTextAnalyzer in CategorizeTextAnalyzer union +func (u *_customCategorizeTextAnalyzer) CategorizeTextAnalyzerCaster() *types.CategorizeTextAnalyzer { + t := types.CategorizeTextAnalyzer(u.v) + return &t +} + +func (u *_categorizeTextAnalyzer) CategorizeTextAnalyzerCaster() *types.CategorizeTextAnalyzer { + return &u.v +} diff --git a/typedapi/esdsl/chaininput.go b/typedapi/esdsl/chaininput.go new file mode 100644 index 0000000000..04468fc403 --- /dev/null +++ b/typedapi/esdsl/chaininput.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _chainInput struct { + v *types.ChainInput +} + +func NewChainInput() *_chainInput { + + return &_chainInput{v: types.NewChainInput()} + +} + +func (s *_chainInput) Inputs(inputs []map[string]types.WatcherInput) *_chainInput { + + s.v.Inputs = inputs + + return s +} + +func (s *_chainInput) WatcherInputCaster() *types.WatcherInput { + container := types.NewWatcherInput() + + container.Chain = s.v + + return container +} + +func (s *_chainInput) ChainInputCaster() *types.ChainInput { + return s.v +} diff --git a/typedapi/esdsl/charfilter.go b/typedapi/esdsl/charfilter.go new file mode 100644 index 0000000000..ea2f8354b2 --- /dev/null +++ b/typedapi/esdsl/charfilter.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _charFilter struct { + v types.CharFilter +} + +func NewCharFilter() *_charFilter { + return &_charFilter{v: nil} +} + +func (u *_charFilter) String(string string) *_charFilter { + + u.v = &string + + return u +} + +func (u *_charFilter) CharFilterDefinition(charfilterdefinition types.CharFilterDefinitionVariant) *_charFilter { + + u.v = *charfilterdefinition.CharFilterDefinitionCaster() + + return u +} + +// Interface implementation for CharFilterDefinition in CharFilter union +func (u *_charFilterDefinition) CharFilterCaster() *types.CharFilter { + t := types.CharFilter(u.v) + return &t +} + +func (u *_charFilter) CharFilterCaster() *types.CharFilter { + return &u.v +} diff --git a/typedapi/esdsl/charfilterdefinition.go b/typedapi/esdsl/charfilterdefinition.go new file mode 100644 index 0000000000..4ebe04e008 --- /dev/null +++ b/typedapi/esdsl/charfilterdefinition.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _charFilterDefinition struct { + v types.CharFilterDefinition +} + +func NewCharFilterDefinition() *_charFilterDefinition { + return &_charFilterDefinition{v: nil} +} + +func (u *_charFilterDefinition) HtmlStripCharFilter(htmlstripcharfilter types.HtmlStripCharFilterVariant) *_charFilterDefinition { + + u.v = &htmlstripcharfilter + + return u +} + +// Interface implementation for HtmlStripCharFilter in CharFilterDefinition union +func (u *_htmlStripCharFilter) CharFilterDefinitionCaster() *types.CharFilterDefinition { + t := types.CharFilterDefinition(u.v) + return &t +} + +func (u *_charFilterDefinition) MappingCharFilter(mappingcharfilter types.MappingCharFilterVariant) *_charFilterDefinition { + + u.v = &mappingcharfilter + + return u +} + +// Interface implementation for MappingCharFilter in CharFilterDefinition union +func (u *_mappingCharFilter) CharFilterDefinitionCaster() *types.CharFilterDefinition { + t := types.CharFilterDefinition(u.v) + return &t +} + +func (u *_charFilterDefinition) PatternReplaceCharFilter(patternreplacecharfilter types.PatternReplaceCharFilterVariant) *_charFilterDefinition { + + u.v = &patternreplacecharfilter + + return u +} + +// Interface implementation for PatternReplaceCharFilter in CharFilterDefinition union +func (u *_patternReplaceCharFilter) CharFilterDefinitionCaster() *types.CharFilterDefinition { + t := types.CharFilterDefinition(u.v) + return &t +} + +func (u *_charFilterDefinition) IcuNormalizationCharFilter(icunormalizationcharfilter types.IcuNormalizationCharFilterVariant) *_charFilterDefinition { + + u.v = &icunormalizationcharfilter + + return u +} + +// Interface implementation for IcuNormalizationCharFilter in CharFilterDefinition union +func (u *_icuNormalizationCharFilter) CharFilterDefinitionCaster() *types.CharFilterDefinition { + t := types.CharFilterDefinition(u.v) + return &t +} + +func (u *_charFilterDefinition) KuromojiIterationMarkCharFilter(kuromojiiterationmarkcharfilter types.KuromojiIterationMarkCharFilterVariant) *_charFilterDefinition { + + u.v = &kuromojiiterationmarkcharfilter + + return u +} + +// Interface implementation for KuromojiIterationMarkCharFilter in CharFilterDefinition union +func (u *_kuromojiIterationMarkCharFilter) CharFilterDefinitionCaster() *types.CharFilterDefinition { + t := types.CharFilterDefinition(u.v) + return &t +} + +func (u *_charFilterDefinition) CharFilterDefinitionCaster() *types.CharFilterDefinition { + return &u.v +} diff --git a/typedapi/esdsl/chargrouptokenizer.go b/typedapi/esdsl/chargrouptokenizer.go new file mode 100644 index 0000000000..64b78bbcfe --- /dev/null +++ b/typedapi/esdsl/chargrouptokenizer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _charGroupTokenizer struct { + v *types.CharGroupTokenizer +} + +func NewCharGroupTokenizer() *_charGroupTokenizer { + + return &_charGroupTokenizer{v: types.NewCharGroupTokenizer()} + +} + +func (s *_charGroupTokenizer) MaxTokenLength(maxtokenlength int) *_charGroupTokenizer { + + s.v.MaxTokenLength = &maxtokenlength + + return s +} + +func (s *_charGroupTokenizer) TokenizeOnChars(tokenizeonchars ...string) *_charGroupTokenizer { + + for _, v := range tokenizeonchars { + + s.v.TokenizeOnChars = append(s.v.TokenizeOnChars, v) + + } + return s +} + +func (s *_charGroupTokenizer) Version(versionstring string) *_charGroupTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_charGroupTokenizer) CharGroupTokenizerCaster() *types.CharGroupTokenizer { + return s.v +} diff --git a/typedapi/esdsl/childrenaggregation.go b/typedapi/esdsl/childrenaggregation.go new file mode 100644 index 0000000000..971bf7ce43 --- /dev/null +++ b/typedapi/esdsl/childrenaggregation.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _childrenAggregation struct { + v *types.ChildrenAggregation +} + +// A single bucket aggregation that selects child documents that have the +// specified type, as defined in a `join` field. +func NewChildrenAggregation() *_childrenAggregation { + + return &_childrenAggregation{v: types.NewChildrenAggregation()} + +} + +// The child type that should be selected. +func (s *_childrenAggregation) Type(relationname string) *_childrenAggregation { + + s.v.Type = &relationname + + return s +} + +func (s *_childrenAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Children = s.v + + return container +} + +func (s *_childrenAggregation) ChildrenAggregationCaster() *types.ChildrenAggregation { + return s.v +} diff --git a/typedapi/esdsl/chineseanalyzer.go b/typedapi/esdsl/chineseanalyzer.go new file mode 100644 index 0000000000..f8fdef9cbe --- /dev/null +++ b/typedapi/esdsl/chineseanalyzer.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _chineseAnalyzer struct { + v *types.ChineseAnalyzer +} + +func NewChineseAnalyzer() *_chineseAnalyzer { + + return &_chineseAnalyzer{v: types.NewChineseAnalyzer()} + +} + +func (s *_chineseAnalyzer) Stopwords(stopwords ...string) *_chineseAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_chineseAnalyzer) StopwordsPath(stopwordspath string) *_chineseAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_chineseAnalyzer) ChineseAnalyzerCaster() *types.ChineseAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/chisquareheuristic.go b/typedapi/esdsl/chisquareheuristic.go new file mode 100644 index 0000000000..af4038250c --- /dev/null +++ b/typedapi/esdsl/chisquareheuristic.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _chiSquareHeuristic struct { + v *types.ChiSquareHeuristic +} + +func NewChiSquareHeuristic(backgroundissuperset bool, includenegatives bool) *_chiSquareHeuristic { + + tmp := &_chiSquareHeuristic{v: types.NewChiSquareHeuristic()} + + tmp.BackgroundIsSuperset(backgroundissuperset) + + tmp.IncludeNegatives(includenegatives) + + return tmp + +} + +// Set to `false` if you defined a custom background filter that represents a +// different set of documents that you want to compare to. +func (s *_chiSquareHeuristic) BackgroundIsSuperset(backgroundissuperset bool) *_chiSquareHeuristic { + + s.v.BackgroundIsSuperset = backgroundissuperset + + return s +} + +// Set to `false` to filter out the terms that appear less often in the subset +// than in documents outside the subset. +func (s *_chiSquareHeuristic) IncludeNegatives(includenegatives bool) *_chiSquareHeuristic { + + s.v.IncludeNegatives = includenegatives + + return s +} + +func (s *_chiSquareHeuristic) ChiSquareHeuristicCaster() *types.ChiSquareHeuristic { + return s.v +} diff --git a/typedapi/esdsl/chunkingconfig.go b/typedapi/esdsl/chunkingconfig.go new file mode 100644 index 0000000000..c8195fbdd8 --- /dev/null +++ b/typedapi/esdsl/chunkingconfig.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/chunkingmode" +) + +type _chunkingConfig struct { + v *types.ChunkingConfig +} + +func NewChunkingConfig(mode chunkingmode.ChunkingMode) *_chunkingConfig { + + tmp := &_chunkingConfig{v: types.NewChunkingConfig()} + + tmp.Mode(mode) + + return tmp + +} + +// If the mode is `auto`, the chunk size is dynamically calculated; +// this is the recommended value when the datafeed does not use aggregations. +// If the mode is `manual`, chunking is applied according to the specified +// `time_span`; +// use this mode when the datafeed uses aggregations. If the mode is `off`, no +// chunking is applied. +func (s *_chunkingConfig) Mode(mode chunkingmode.ChunkingMode) *_chunkingConfig { + + s.v.Mode = mode + return s +} + +// The time span that each search will be querying. This setting is applicable +// only when the `mode` is set to `manual`. +func (s *_chunkingConfig) TimeSpan(duration types.DurationVariant) *_chunkingConfig { + + s.v.TimeSpan = *duration.DurationCaster() + + return s +} + +func (s *_chunkingConfig) ChunkingConfigCaster() *types.ChunkingConfig { + return s.v +} diff --git a/typedapi/esdsl/circleprocessor.go b/typedapi/esdsl/circleprocessor.go new file mode 100644 index 0000000000..c27ba3c3a1 --- /dev/null +++ b/typedapi/esdsl/circleprocessor.go @@ -0,0 +1,146 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shapetype" +) + +type _circleProcessor struct { + v *types.CircleProcessor +} + +// Converts circle definitions of shapes to regular polygons which approximate +// them. +func NewCircleProcessor(errordistance types.Float64, shapetype shapetype.ShapeType) *_circleProcessor { + + tmp := &_circleProcessor{v: types.NewCircleProcessor()} + + tmp.ErrorDistance(errordistance) + + tmp.ShapeType(shapetype) + + return tmp + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_circleProcessor) Description(description string) *_circleProcessor { + + s.v.Description = &description + + return s +} + +// The difference between the resulting inscribed distance from center to side +// and the circle’s radius (measured in meters for `geo_shape`, unit-less for +// `shape`). +func (s *_circleProcessor) ErrorDistance(errordistance types.Float64) *_circleProcessor { + + s.v.ErrorDistance = errordistance + + return s +} + +// The field to interpret as a circle. Either a string in WKT format or a map +// for GeoJSON. +func (s *_circleProcessor) Field(field string) *_circleProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_circleProcessor) If(if_ types.ScriptVariant) *_circleProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_circleProcessor) IgnoreFailure(ignorefailure bool) *_circleProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist, the processor quietly exits without +// modifying the document. +func (s *_circleProcessor) IgnoreMissing(ignoremissing bool) *_circleProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_circleProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_circleProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Which field mapping type is to be used when processing the circle: +// `geo_shape` or `shape`. +func (s *_circleProcessor) ShapeType(shapetype shapetype.ShapeType) *_circleProcessor { + + s.v.ShapeType = shapetype + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_circleProcessor) Tag(tag string) *_circleProcessor { + + s.v.Tag = &tag + + return s +} + +// The field to assign the polygon shape to +// By default, the field is updated in-place. +func (s *_circleProcessor) TargetField(field string) *_circleProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_circleProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Circle = s.v + + return container +} + +func (s *_circleProcessor) CircleProcessorCaster() *types.CircleProcessor { + return s.v +} diff --git a/typedapi/esdsl/cjkanalyzer.go b/typedapi/esdsl/cjkanalyzer.go new file mode 100644 index 0000000000..c93d2d4a04 --- /dev/null +++ b/typedapi/esdsl/cjkanalyzer.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _cjkAnalyzer struct { + v *types.CjkAnalyzer +} + +func NewCjkAnalyzer() *_cjkAnalyzer { + + return &_cjkAnalyzer{v: types.NewCjkAnalyzer()} + +} + +func (s *_cjkAnalyzer) Stopwords(stopwords ...string) *_cjkAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_cjkAnalyzer) StopwordsPath(stopwordspath string) *_cjkAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_cjkAnalyzer) CjkAnalyzerCaster() *types.CjkAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/classictokenizer.go b/typedapi/esdsl/classictokenizer.go new file mode 100644 index 0000000000..98dced9193 --- /dev/null +++ b/typedapi/esdsl/classictokenizer.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _classicTokenizer struct { + v *types.ClassicTokenizer +} + +func NewClassicTokenizer() *_classicTokenizer { + + return &_classicTokenizer{v: types.NewClassicTokenizer()} + +} + +func (s *_classicTokenizer) MaxTokenLength(maxtokenlength int) *_classicTokenizer { + + s.v.MaxTokenLength = &maxtokenlength + + return s +} + +func (s *_classicTokenizer) Version(versionstring string) *_classicTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_classicTokenizer) ClassicTokenizerCaster() *types.ClassicTokenizer { + return s.v +} diff --git a/typedapi/esdsl/classificationinferenceoptions.go b/typedapi/esdsl/classificationinferenceoptions.go new file mode 100644 index 0000000000..96eb6bfb26 --- /dev/null +++ b/typedapi/esdsl/classificationinferenceoptions.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _classificationInferenceOptions struct { + v *types.ClassificationInferenceOptions +} + +// Classification configuration for inference. +func NewClassificationInferenceOptions() *_classificationInferenceOptions { + + return &_classificationInferenceOptions{v: types.NewClassificationInferenceOptions()} + +} + +// Specifies the number of top class predictions to return. Defaults to 0. +func (s *_classificationInferenceOptions) NumTopClasses(numtopclasses int) *_classificationInferenceOptions { + + s.v.NumTopClasses = &numtopclasses + + return s +} + +// Specifies the maximum number of feature importance values per document. +func (s *_classificationInferenceOptions) NumTopFeatureImportanceValues(numtopfeatureimportancevalues int) *_classificationInferenceOptions { + + s.v.NumTopFeatureImportanceValues = &numtopfeatureimportancevalues + + return s +} + +// Specifies the type of the predicted field to write. Acceptable values are: +// string, number, boolean. When boolean is provided 1.0 is transformed to true +// and 0.0 to false. +func (s *_classificationInferenceOptions) PredictionFieldType(predictionfieldtype string) *_classificationInferenceOptions { + + s.v.PredictionFieldType = &predictionfieldtype + + return s +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_classificationInferenceOptions) ResultsField(resultsfield string) *_classificationInferenceOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +// Specifies the field to which the top classes are written. Defaults to +// top_classes. +func (s *_classificationInferenceOptions) TopClassesResultsField(topclassesresultsfield string) *_classificationInferenceOptions { + + s.v.TopClassesResultsField = &topclassesresultsfield + + return s +} + +func (s *_classificationInferenceOptions) InferenceConfigContainerCaster() *types.InferenceConfigContainer { + container := types.NewInferenceConfigContainer() + + container.Classification = s.v + + return container +} + +func (s *_classificationInferenceOptions) InferenceConfigCreateContainerCaster() *types.InferenceConfigCreateContainer { + container := types.NewInferenceConfigCreateContainer() + + container.Classification = s.v + + return container +} + +func (s *_classificationInferenceOptions) InferenceConfigUpdateContainerCaster() *types.InferenceConfigUpdateContainer { + container := types.NewInferenceConfigUpdateContainer() + + container.Classification = s.v + + return container +} + +func (s *_classificationInferenceOptions) ClassificationInferenceOptionsCaster() *types.ClassificationInferenceOptions { + return s.v +} diff --git a/typedapi/esdsl/combinedfieldsquery.go b/typedapi/esdsl/combinedfieldsquery.go new file mode 100644 index 0000000000..30bb7a497d --- /dev/null +++ b/typedapi/esdsl/combinedfieldsquery.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/combinedfieldsoperator" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/combinedfieldszeroterms" +) + +type _combinedFieldsQuery struct { + v *types.CombinedFieldsQuery +} + +// The `combined_fields` query supports searching multiple text fields as if +// their contents had been indexed into one combined field. +func NewCombinedFieldsQuery(query string) *_combinedFieldsQuery { + + tmp := &_combinedFieldsQuery{v: types.NewCombinedFieldsQuery()} + + tmp.Query(query) + + return tmp + +} + +// If true, match phrase queries are automatically created for multi-term +// synonyms. +func (s *_combinedFieldsQuery) AutoGenerateSynonymsPhraseQuery(autogeneratesynonymsphrasequery bool) *_combinedFieldsQuery { + + s.v.AutoGenerateSynonymsPhraseQuery = &autogeneratesynonymsphrasequery + + return s +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_combinedFieldsQuery) Boost(boost float32) *_combinedFieldsQuery { + + s.v.Boost = &boost + + return s +} + +// List of fields to search. Field wildcard patterns are allowed. Only `text` +// fields are supported, and they must all have the same search `analyzer`. +func (s *_combinedFieldsQuery) Fields(fields ...string) *_combinedFieldsQuery { + + for _, v := range fields { + + s.v.Fields = append(s.v.Fields, v) + + } + return s +} + +// Minimum number of clauses that must match for a document to be returned. +func (s *_combinedFieldsQuery) MinimumShouldMatch(minimumshouldmatch types.MinimumShouldMatchVariant) *_combinedFieldsQuery { + + s.v.MinimumShouldMatch = *minimumshouldmatch.MinimumShouldMatchCaster() + + return s +} + +// Boolean logic used to interpret text in the query value. +func (s *_combinedFieldsQuery) Operator(operator combinedfieldsoperator.CombinedFieldsOperator) *_combinedFieldsQuery { + + s.v.Operator = &operator + return s +} + +// Text to search for in the provided `fields`. +// The `combined_fields` query analyzes the provided text before performing a +// search. +func (s *_combinedFieldsQuery) Query(query string) *_combinedFieldsQuery { + + s.v.Query = query + + return s +} + +func (s *_combinedFieldsQuery) QueryName_(queryname_ string) *_combinedFieldsQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Indicates whether no documents are returned if the analyzer removes all +// tokens, such as when using a `stop` filter. +func (s *_combinedFieldsQuery) ZeroTermsQuery(zerotermsquery combinedfieldszeroterms.CombinedFieldsZeroTerms) *_combinedFieldsQuery { + + s.v.ZeroTermsQuery = &zerotermsquery + return s +} + +func (s *_combinedFieldsQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.CombinedFields = s.v + + return container +} + +func (s *_combinedFieldsQuery) CombinedFieldsQueryCaster() *types.CombinedFieldsQuery { + return s.v +} diff --git a/typedapi/esdsl/command.go b/typedapi/esdsl/command.go new file mode 100644 index 0000000000..b766744be8 --- /dev/null +++ b/typedapi/esdsl/command.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _command struct { + v *types.Command +} + +func NewCommand() *_command { + + return &_command{v: types.NewCommand()} + +} + +// Allocate an empty primary shard to a node. Accepts the index and shard for +// index name and shard number, and node to allocate the shard to. Using this +// command leads to a complete loss of all data that was indexed into this +// shard, if it was previously started. If a node which has a copy of the data +// rejoins the cluster later on, that data will be deleted. To ensure that these +// implications are well-understood, this command requires the flag +// accept_data_loss to be explicitly set to true. +func (s *_command) AllocateEmptyPrimary(allocateemptyprimary types.CommandAllocatePrimaryActionVariant) *_command { + + s.v.AllocateEmptyPrimary = allocateemptyprimary.CommandAllocatePrimaryActionCaster() + + return s +} + +// Allocate an unassigned replica shard to a node. Accepts index and shard for +// index name and shard number, and node to allocate the shard to. Takes +// allocation deciders into account. +func (s *_command) AllocateReplica(allocatereplica types.CommandAllocateReplicaActionVariant) *_command { + + s.v.AllocateReplica = allocatereplica.CommandAllocateReplicaActionCaster() + + return s +} + +// Allocate a primary shard to a node that holds a stale copy. Accepts the index +// and shard for index name and shard number, and node to allocate the shard to. +// Using this command may lead to data loss for the provided shard id. If a node +// which has the good copy of the data rejoins the cluster later on, that data +// will be deleted or overwritten with the data of the stale copy that was +// forcefully allocated with this command. To ensure that these implications are +// well-understood, this command requires the flag accept_data_loss to be +// explicitly set to true. +func (s *_command) AllocateStalePrimary(allocatestaleprimary types.CommandAllocatePrimaryActionVariant) *_command { + + s.v.AllocateStalePrimary = allocatestaleprimary.CommandAllocatePrimaryActionCaster() + + return s +} + +// Cancel allocation of a shard (or recovery). Accepts index and shard for index +// name and shard number, and node for the node to cancel the shard allocation +// on. This can be used to force resynchronization of existing replicas from the +// primary shard by cancelling them and allowing them to be reinitialized +// through the standard recovery process. By default only replica shard +// allocations can be cancelled. If it is necessary to cancel the allocation of +// a primary shard then the allow_primary flag must also be included in the +// request. +func (s *_command) Cancel(cancel types.CommandCancelActionVariant) *_command { + + s.v.Cancel = cancel.CommandCancelActionCaster() + + return s +} + +// Move a started shard from one node to another node. Accepts index and shard +// for index name and shard number, from_node for the node to move the shard +// from, and to_node for the node to move the shard to. +func (s *_command) Move(move types.CommandMoveActionVariant) *_command { + + s.v.Move = move.CommandMoveActionCaster() + + return s +} + +func (s *_command) CommandCaster() *types.Command { + return s.v +} diff --git a/typedapi/esdsl/commandallocateprimaryaction.go b/typedapi/esdsl/commandallocateprimaryaction.go new file mode 100644 index 0000000000..3018c6515d --- /dev/null +++ b/typedapi/esdsl/commandallocateprimaryaction.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _commandAllocatePrimaryAction struct { + v *types.CommandAllocatePrimaryAction +} + +func NewCommandAllocatePrimaryAction(acceptdataloss bool, node string, shard int) *_commandAllocatePrimaryAction { + + tmp := &_commandAllocatePrimaryAction{v: types.NewCommandAllocatePrimaryAction()} + + tmp.AcceptDataLoss(acceptdataloss) + + tmp.Node(node) + + tmp.Shard(shard) + + return tmp + +} + +// If a node which has a copy of the data rejoins the cluster later on, that +// data will be deleted. To ensure that these implications are well-understood, +// this command requires the flag accept_data_loss to be explicitly set to true +func (s *_commandAllocatePrimaryAction) AcceptDataLoss(acceptdataloss bool) *_commandAllocatePrimaryAction { + + s.v.AcceptDataLoss = acceptdataloss + + return s +} + +func (s *_commandAllocatePrimaryAction) Index(indexname string) *_commandAllocatePrimaryAction { + + s.v.Index = indexname + + return s +} + +func (s *_commandAllocatePrimaryAction) Node(node string) *_commandAllocatePrimaryAction { + + s.v.Node = node + + return s +} + +func (s *_commandAllocatePrimaryAction) Shard(shard int) *_commandAllocatePrimaryAction { + + s.v.Shard = shard + + return s +} + +func (s *_commandAllocatePrimaryAction) CommandAllocatePrimaryActionCaster() *types.CommandAllocatePrimaryAction { + return s.v +} diff --git a/typedapi/esdsl/commandallocatereplicaaction.go b/typedapi/esdsl/commandallocatereplicaaction.go new file mode 100644 index 0000000000..deb8ed1342 --- /dev/null +++ b/typedapi/esdsl/commandallocatereplicaaction.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _commandAllocateReplicaAction struct { + v *types.CommandAllocateReplicaAction +} + +func NewCommandAllocateReplicaAction(node string, shard int) *_commandAllocateReplicaAction { + + tmp := &_commandAllocateReplicaAction{v: types.NewCommandAllocateReplicaAction()} + + tmp.Node(node) + + tmp.Shard(shard) + + return tmp + +} + +func (s *_commandAllocateReplicaAction) Index(indexname string) *_commandAllocateReplicaAction { + + s.v.Index = indexname + + return s +} + +func (s *_commandAllocateReplicaAction) Node(node string) *_commandAllocateReplicaAction { + + s.v.Node = node + + return s +} + +func (s *_commandAllocateReplicaAction) Shard(shard int) *_commandAllocateReplicaAction { + + s.v.Shard = shard + + return s +} + +func (s *_commandAllocateReplicaAction) CommandAllocateReplicaActionCaster() *types.CommandAllocateReplicaAction { + return s.v +} diff --git a/typedapi/esdsl/commandcancelaction.go b/typedapi/esdsl/commandcancelaction.go new file mode 100644 index 0000000000..b1d352f1ce --- /dev/null +++ b/typedapi/esdsl/commandcancelaction.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _commandCancelAction struct { + v *types.CommandCancelAction +} + +func NewCommandCancelAction(node string, shard int) *_commandCancelAction { + + tmp := &_commandCancelAction{v: types.NewCommandCancelAction()} + + tmp.Node(node) + + tmp.Shard(shard) + + return tmp + +} + +func (s *_commandCancelAction) AllowPrimary(allowprimary bool) *_commandCancelAction { + + s.v.AllowPrimary = &allowprimary + + return s +} + +func (s *_commandCancelAction) Index(indexname string) *_commandCancelAction { + + s.v.Index = indexname + + return s +} + +func (s *_commandCancelAction) Node(node string) *_commandCancelAction { + + s.v.Node = node + + return s +} + +func (s *_commandCancelAction) Shard(shard int) *_commandCancelAction { + + s.v.Shard = shard + + return s +} + +func (s *_commandCancelAction) CommandCancelActionCaster() *types.CommandCancelAction { + return s.v +} diff --git a/typedapi/esdsl/commandmoveaction.go b/typedapi/esdsl/commandmoveaction.go new file mode 100644 index 0000000000..4dae1ced20 --- /dev/null +++ b/typedapi/esdsl/commandmoveaction.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _commandMoveAction struct { + v *types.CommandMoveAction +} + +func NewCommandMoveAction(fromnode string, shard int, tonode string) *_commandMoveAction { + + tmp := &_commandMoveAction{v: types.NewCommandMoveAction()} + + tmp.FromNode(fromnode) + + tmp.Shard(shard) + + tmp.ToNode(tonode) + + return tmp + +} + +// The node to move the shard from +func (s *_commandMoveAction) FromNode(fromnode string) *_commandMoveAction { + + s.v.FromNode = fromnode + + return s +} + +func (s *_commandMoveAction) Index(indexname string) *_commandMoveAction { + + s.v.Index = indexname + + return s +} + +func (s *_commandMoveAction) Shard(shard int) *_commandMoveAction { + + s.v.Shard = shard + + return s +} + +// The node to move the shard to +func (s *_commandMoveAction) ToNode(tonode string) *_commandMoveAction { + + s.v.ToNode = tonode + + return s +} + +func (s *_commandMoveAction) CommandMoveActionCaster() *types.CommandMoveAction { + return s.v +} diff --git a/typedapi/esdsl/commongramstokenfilter.go b/typedapi/esdsl/commongramstokenfilter.go new file mode 100644 index 0000000000..bc1fcbad3e --- /dev/null +++ b/typedapi/esdsl/commongramstokenfilter.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _commonGramsTokenFilter struct { + v *types.CommonGramsTokenFilter +} + +func NewCommonGramsTokenFilter() *_commonGramsTokenFilter { + + return &_commonGramsTokenFilter{v: types.NewCommonGramsTokenFilter()} + +} + +func (s *_commonGramsTokenFilter) CommonWords(commonwords ...string) *_commonGramsTokenFilter { + + for _, v := range commonwords { + + s.v.CommonWords = append(s.v.CommonWords, v) + + } + return s +} + +func (s *_commonGramsTokenFilter) CommonWordsPath(commonwordspath string) *_commonGramsTokenFilter { + + s.v.CommonWordsPath = &commonwordspath + + return s +} + +func (s *_commonGramsTokenFilter) IgnoreCase(ignorecase bool) *_commonGramsTokenFilter { + + s.v.IgnoreCase = &ignorecase + + return s +} + +func (s *_commonGramsTokenFilter) QueryMode(querymode bool) *_commonGramsTokenFilter { + + s.v.QueryMode = &querymode + + return s +} + +func (s *_commonGramsTokenFilter) Version(versionstring string) *_commonGramsTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_commonGramsTokenFilter) CommonGramsTokenFilterCaster() *types.CommonGramsTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/commontermsquery.go b/typedapi/esdsl/commontermsquery.go new file mode 100644 index 0000000000..3262d680b6 --- /dev/null +++ b/typedapi/esdsl/commontermsquery.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" +) + +type _commonTermsQuery struct { + k string + v *types.CommonTermsQuery +} + +func NewCommonTermsQuery(field string, query string) *_commonTermsQuery { + tmp := &_commonTermsQuery{ + k: field, + v: types.NewCommonTermsQuery(), + } + + tmp.Query(query) + return tmp +} + +func (s *_commonTermsQuery) Analyzer(analyzer string) *_commonTermsQuery { + + s.v.Analyzer = &analyzer + + return s +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_commonTermsQuery) Boost(boost float32) *_commonTermsQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_commonTermsQuery) CutoffFrequency(cutofffrequency types.Float64) *_commonTermsQuery { + + s.v.CutoffFrequency = &cutofffrequency + + return s +} + +func (s *_commonTermsQuery) HighFreqOperator(highfreqoperator operator.Operator) *_commonTermsQuery { + + s.v.HighFreqOperator = &highfreqoperator + return s +} + +func (s *_commonTermsQuery) LowFreqOperator(lowfreqoperator operator.Operator) *_commonTermsQuery { + + s.v.LowFreqOperator = &lowfreqoperator + return s +} + +func (s *_commonTermsQuery) MinimumShouldMatch(minimumshouldmatch types.MinimumShouldMatchVariant) *_commonTermsQuery { + + s.v.MinimumShouldMatch = *minimumshouldmatch.MinimumShouldMatchCaster() + + return s +} + +func (s *_commonTermsQuery) Query(query string) *_commonTermsQuery { + + s.v.Query = query + + return s +} + +func (s *_commonTermsQuery) QueryName_(queryname_ string) *_commonTermsQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_commonTermsQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.Common = map[string]types.CommonTermsQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleCommonTermsQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleCommonTermsQuery() *_commonTermsQuery { + return &_commonTermsQuery{ + k: "", + v: types.NewCommonTermsQuery(), + } +} + +func (s *_commonTermsQuery) CommonTermsQueryCaster() *types.CommonTermsQuery { + return s.v.CommonTermsQueryCaster() +} diff --git a/typedapi/esdsl/communityidprocessor.go b/typedapi/esdsl/communityidprocessor.go new file mode 100644 index 0000000000..6d0666f1b8 --- /dev/null +++ b/typedapi/esdsl/communityidprocessor.go @@ -0,0 +1,187 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _communityIDProcessor struct { + v *types.CommunityIDProcessor +} + +// Computes the Community ID for network flow data as defined in the +// Community ID Specification. You can use a community ID to correlate network +// events related to a single flow. +func NewCommunityIDProcessor() *_communityIDProcessor { + + return &_communityIDProcessor{v: types.NewCommunityIDProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_communityIDProcessor) Description(description string) *_communityIDProcessor { + + s.v.Description = &description + + return s +} + +// Field containing the destination IP address. +func (s *_communityIDProcessor) DestinationIp(field string) *_communityIDProcessor { + + s.v.DestinationIp = &field + + return s +} + +// Field containing the destination port. +func (s *_communityIDProcessor) DestinationPort(field string) *_communityIDProcessor { + + s.v.DestinationPort = &field + + return s +} + +// Field containing the IANA number. +func (s *_communityIDProcessor) IanaNumber(field string) *_communityIDProcessor { + + s.v.IanaNumber = &field + + return s +} + +// Field containing the ICMP code. +func (s *_communityIDProcessor) IcmpCode(field string) *_communityIDProcessor { + + s.v.IcmpCode = &field + + return s +} + +// Field containing the ICMP type. +func (s *_communityIDProcessor) IcmpType(field string) *_communityIDProcessor { + + s.v.IcmpType = &field + + return s +} + +// Conditionally execute the processor. +func (s *_communityIDProcessor) If(if_ types.ScriptVariant) *_communityIDProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_communityIDProcessor) IgnoreFailure(ignorefailure bool) *_communityIDProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If true and any required fields are missing, the processor quietly exits +// without modifying the document. +func (s *_communityIDProcessor) IgnoreMissing(ignoremissing bool) *_communityIDProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_communityIDProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_communityIDProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Seed for the community ID hash. Must be between 0 and 65535 (inclusive). The +// seed can prevent hash collisions between network domains, such as a staging +// and production network that use the same addressing scheme. +func (s *_communityIDProcessor) Seed(seed int) *_communityIDProcessor { + + s.v.Seed = &seed + + return s +} + +// Field containing the source IP address. +func (s *_communityIDProcessor) SourceIp(field string) *_communityIDProcessor { + + s.v.SourceIp = &field + + return s +} + +// Field containing the source port. +func (s *_communityIDProcessor) SourcePort(field string) *_communityIDProcessor { + + s.v.SourcePort = &field + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_communityIDProcessor) Tag(tag string) *_communityIDProcessor { + + s.v.Tag = &tag + + return s +} + +// Output field for the community ID. +func (s *_communityIDProcessor) TargetField(field string) *_communityIDProcessor { + + s.v.TargetField = &field + + return s +} + +// Field containing the transport protocol name or number. Used only when the +// iana_number field is not present. The following protocol names are currently +// supported: eigrp, gre, icmp, icmpv6, igmp, ipv6-icmp, ospf, pim, sctp, tcp, +// udp +func (s *_communityIDProcessor) Transport(field string) *_communityIDProcessor { + + s.v.Transport = &field + + return s +} + +func (s *_communityIDProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.CommunityId = s.v + + return container +} + +func (s *_communityIDProcessor) CommunityIDProcessorCaster() *types.CommunityIDProcessor { + return s.v +} diff --git a/typedapi/esdsl/completioncontext.go b/typedapi/esdsl/completioncontext.go new file mode 100644 index 0000000000..15e461554d --- /dev/null +++ b/typedapi/esdsl/completioncontext.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _completionContext struct { + v *types.CompletionContext +} + +func NewCompletionContext() *_completionContext { + + return &_completionContext{v: types.NewCompletionContext()} + +} + +// The factor by which the score of the suggestion should be boosted. +// The score is computed by multiplying the boost with the suggestion weight. +func (s *_completionContext) Boost(boost types.Float64) *_completionContext { + + s.v.Boost = &boost + + return s +} + +// The value of the category to filter/boost on. +func (s *_completionContext) Context(context types.ContextVariant) *_completionContext { + + s.v.Context = *context.ContextCaster() + + return s +} + +// An array of precision values at which neighboring geohashes should be taken +// into account. +// Precision value can be a distance value (`5m`, `10km`, etc.) or a raw geohash +// precision (`1`..`12`). +// Defaults to generating neighbors for index time precision level. +func (s *_completionContext) Neighbours(neighbours ...types.GeoHashPrecisionVariant) *_completionContext { + + for _, v := range neighbours { + + s.v.Neighbours = append(s.v.Neighbours, *v.GeoHashPrecisionCaster()) + + } + return s +} + +// The precision of the geohash to encode the query geo point. +// Can be specified as a distance value (`5m`, `10km`, etc.), or as a raw +// geohash precision (`1`..`12`). +// Defaults to index time precision level. +func (s *_completionContext) Precision(geohashprecision types.GeoHashPrecisionVariant) *_completionContext { + + s.v.Precision = *geohashprecision.GeoHashPrecisionCaster() + + return s +} + +// Whether the category value should be treated as a prefix or not. +func (s *_completionContext) Prefix(prefix bool) *_completionContext { + + s.v.Prefix = &prefix + + return s +} + +func (s *_completionContext) CompletionContextCaster() *types.CompletionContext { + return s.v +} diff --git a/typedapi/esdsl/completionproperty.go b/typedapi/esdsl/completionproperty.go new file mode 100644 index 0000000000..d0bbeb8843 --- /dev/null +++ b/typedapi/esdsl/completionproperty.go @@ -0,0 +1,198 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _completionProperty struct { + v *types.CompletionProperty +} + +func NewCompletionProperty() *_completionProperty { + + return &_completionProperty{v: types.NewCompletionProperty()} + +} + +func (s *_completionProperty) Analyzer(analyzer string) *_completionProperty { + + s.v.Analyzer = &analyzer + + return s +} + +func (s *_completionProperty) Contexts(contexts ...types.SuggestContextVariant) *_completionProperty { + + for _, v := range contexts { + + s.v.Contexts = append(s.v.Contexts, *v.SuggestContextCaster()) + + } + return s +} + +func (s *_completionProperty) CopyTo(fields ...string) *_completionProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_completionProperty) DocValues(docvalues bool) *_completionProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_completionProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_completionProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_completionProperty) Fields(fields map[string]types.Property) *_completionProperty { + + s.v.Fields = fields + return s +} + +func (s *_completionProperty) AddField(key string, value types.PropertyVariant) *_completionProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_completionProperty) IgnoreAbove(ignoreabove int) *_completionProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_completionProperty) MaxInputLength(maxinputlength int) *_completionProperty { + + s.v.MaxInputLength = &maxinputlength + + return s +} + +// Metadata about the field. +func (s *_completionProperty) Meta(meta map[string]string) *_completionProperty { + + s.v.Meta = meta + return s +} + +func (s *_completionProperty) AddMeta(key string, value string) *_completionProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_completionProperty) PreservePositionIncrements(preservepositionincrements bool) *_completionProperty { + + s.v.PreservePositionIncrements = &preservepositionincrements + + return s +} + +func (s *_completionProperty) PreserveSeparators(preserveseparators bool) *_completionProperty { + + s.v.PreserveSeparators = &preserveseparators + + return s +} + +func (s *_completionProperty) Properties(properties map[string]types.Property) *_completionProperty { + + s.v.Properties = properties + return s +} + +func (s *_completionProperty) AddProperty(key string, value types.PropertyVariant) *_completionProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_completionProperty) SearchAnalyzer(searchanalyzer string) *_completionProperty { + + s.v.SearchAnalyzer = &searchanalyzer + + return s +} + +func (s *_completionProperty) Store(store bool) *_completionProperty { + + s.v.Store = &store + + return s +} + +func (s *_completionProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_completionProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_completionProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_completionProperty) CompletionPropertyCaster() *types.CompletionProperty { + return s.v +} diff --git a/typedapi/esdsl/completionsuggester.go b/typedapi/esdsl/completionsuggester.go new file mode 100644 index 0000000000..6cbad7d6fc --- /dev/null +++ b/typedapi/esdsl/completionsuggester.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _completionSuggester struct { + v *types.CompletionSuggester +} + +// Provides auto-complete/search-as-you-type functionality. +func NewCompletionSuggester() *_completionSuggester { + + return &_completionSuggester{v: types.NewCompletionSuggester()} + +} + +// The analyzer to analyze the suggest text with. +// Defaults to the search analyzer of the suggest field. +func (s *_completionSuggester) Analyzer(analyzer string) *_completionSuggester { + + s.v.Analyzer = &analyzer + + return s +} + +// A value, geo point object, or a geo hash string to filter or boost the +// suggestion on. +func (s *_completionSuggester) Contexts(contexts map[string][]types.CompletionContext) *_completionSuggester { + + s.v.Contexts = contexts + return s +} + +// The field to fetch the candidate suggestions from. +// Needs to be set globally or per suggestion. +func (s *_completionSuggester) Field(field string) *_completionSuggester { + + s.v.Field = field + + return s +} + +// Enables fuzziness, meaning you can have a typo in your search and still get +// results back. +func (s *_completionSuggester) Fuzzy(fuzzy types.SuggestFuzzinessVariant) *_completionSuggester { + + s.v.Fuzzy = fuzzy.SuggestFuzzinessCaster() + + return s +} + +// A regex query that expresses a prefix as a regular expression. +func (s *_completionSuggester) Regex(regex types.RegexOptionsVariant) *_completionSuggester { + + s.v.Regex = regex.RegexOptionsCaster() + + return s +} + +// The maximum corrections to be returned per suggest text token. +func (s *_completionSuggester) Size(size int) *_completionSuggester { + + s.v.Size = &size + + return s +} + +// Whether duplicate suggestions should be filtered out. +func (s *_completionSuggester) SkipDuplicates(skipduplicates bool) *_completionSuggester { + + s.v.SkipDuplicates = &skipduplicates + + return s +} + +func (s *_completionSuggester) FieldSuggesterCaster() *types.FieldSuggester { + container := types.NewFieldSuggester() + + container.Completion = s.v + + return container +} + +func (s *_completionSuggester) CompletionSuggesterCaster() *types.CompletionSuggester { + return s.v +} diff --git a/typedapi/esdsl/completiontool.go b/typedapi/esdsl/completiontool.go new file mode 100644 index 0000000000..8ce8afd661 --- /dev/null +++ b/typedapi/esdsl/completiontool.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _completionTool struct { + v *types.CompletionTool +} + +func NewCompletionTool(function types.CompletionToolFunctionVariant, type_ string) *_completionTool { + + tmp := &_completionTool{v: types.NewCompletionTool()} + + tmp.Function(function) + + tmp.Type(type_) + + return tmp + +} + +// The function definition. +func (s *_completionTool) Function(function types.CompletionToolFunctionVariant) *_completionTool { + + s.v.Function = *function.CompletionToolFunctionCaster() + + return s +} + +// The type of tool. +func (s *_completionTool) Type(type_ string) *_completionTool { + + s.v.Type = type_ + + return s +} + +func (s *_completionTool) CompletionToolCaster() *types.CompletionTool { + return s.v +} diff --git a/typedapi/esdsl/completiontoolchoice.go b/typedapi/esdsl/completiontoolchoice.go new file mode 100644 index 0000000000..37fd75cf22 --- /dev/null +++ b/typedapi/esdsl/completiontoolchoice.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _completionToolChoice struct { + v *types.CompletionToolChoice +} + +func NewCompletionToolChoice(function types.CompletionToolChoiceFunctionVariant, type_ string) *_completionToolChoice { + + tmp := &_completionToolChoice{v: types.NewCompletionToolChoice()} + + tmp.Function(function) + + tmp.Type(type_) + + return tmp + +} + +// The tool choice function. +func (s *_completionToolChoice) Function(function types.CompletionToolChoiceFunctionVariant) *_completionToolChoice { + + s.v.Function = *function.CompletionToolChoiceFunctionCaster() + + return s +} + +// The type of the tool. +func (s *_completionToolChoice) Type(type_ string) *_completionToolChoice { + + s.v.Type = type_ + + return s +} + +func (s *_completionToolChoice) CompletionToolChoiceCaster() *types.CompletionToolChoice { + return s.v +} diff --git a/typedapi/esdsl/completiontoolchoicefunction.go b/typedapi/esdsl/completiontoolchoicefunction.go new file mode 100644 index 0000000000..cd5e1ca1e4 --- /dev/null +++ b/typedapi/esdsl/completiontoolchoicefunction.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _completionToolChoiceFunction struct { + v *types.CompletionToolChoiceFunction +} + +func NewCompletionToolChoiceFunction(name string) *_completionToolChoiceFunction { + + tmp := &_completionToolChoiceFunction{v: types.NewCompletionToolChoiceFunction()} + + tmp.Name(name) + + return tmp + +} + +// The name of the function to call. +func (s *_completionToolChoiceFunction) Name(name string) *_completionToolChoiceFunction { + + s.v.Name = name + + return s +} + +func (s *_completionToolChoiceFunction) CompletionToolChoiceFunctionCaster() *types.CompletionToolChoiceFunction { + return s.v +} diff --git a/typedapi/esdsl/completiontoolfunction.go b/typedapi/esdsl/completiontoolfunction.go new file mode 100644 index 0000000000..8cf1dbb825 --- /dev/null +++ b/typedapi/esdsl/completiontoolfunction.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _completionToolFunction struct { + v *types.CompletionToolFunction +} + +func NewCompletionToolFunction(name string) *_completionToolFunction { + + tmp := &_completionToolFunction{v: types.NewCompletionToolFunction()} + + tmp.Name(name) + + return tmp + +} + +// A description of what the function does. +// This is used by the model to choose when and how to call the function. +func (s *_completionToolFunction) Description(description string) *_completionToolFunction { + + s.v.Description = &description + + return s +} + +// The name of the function. +func (s *_completionToolFunction) Name(name string) *_completionToolFunction { + + s.v.Name = name + + return s +} + +// The parameters the functional accepts. This should be formatted as a JSON +// object. +func (s *_completionToolFunction) Parameters(parameters json.RawMessage) *_completionToolFunction { + + s.v.Parameters = parameters + + return s +} + +// Whether to enable schema adherence when generating the function call. +func (s *_completionToolFunction) Strict(strict bool) *_completionToolFunction { + + s.v.Strict = &strict + + return s +} + +func (s *_completionToolFunction) CompletionToolFunctionCaster() *types.CompletionToolFunction { + return s.v +} diff --git a/typedapi/esdsl/completiontooltype.go b/typedapi/esdsl/completiontooltype.go new file mode 100644 index 0000000000..6a17cbcd41 --- /dev/null +++ b/typedapi/esdsl/completiontooltype.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _completionToolType struct { + v types.CompletionToolType +} + +func NewCompletionToolType() *_completionToolType { + return &_completionToolType{v: nil} +} + +func (u *_completionToolType) String(string string) *_completionToolType { + + u.v = &string + + return u +} + +func (u *_completionToolType) CompletionToolChoice(completiontoolchoice types.CompletionToolChoiceVariant) *_completionToolType { + + u.v = &completiontoolchoice + + return u +} + +// Interface implementation for CompletionToolChoice in CompletionToolType union +func (u *_completionToolChoice) CompletionToolTypeCaster() *types.CompletionToolType { + t := types.CompletionToolType(u.v) + return &t +} + +func (u *_completionToolType) CompletionToolTypeCaster() *types.CompletionToolType { + return &u.v +} diff --git a/typedapi/esdsl/componenttemplatenode.go b/typedapi/esdsl/componenttemplatenode.go new file mode 100644 index 0000000000..59cf959a71 --- /dev/null +++ b/typedapi/esdsl/componenttemplatenode.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _componentTemplateNode struct { + v *types.ComponentTemplateNode +} + +func NewComponentTemplateNode(template types.ComponentTemplateSummaryVariant) *_componentTemplateNode { + + tmp := &_componentTemplateNode{v: types.NewComponentTemplateNode()} + + tmp.Template(template) + + return tmp + +} + +func (s *_componentTemplateNode) Deprecated(deprecated bool) *_componentTemplateNode { + + s.v.Deprecated = &deprecated + + return s +} + +func (s *_componentTemplateNode) Meta_(metadata types.MetadataVariant) *_componentTemplateNode { + + s.v.Meta_ = *metadata.MetadataCaster() + + return s +} + +func (s *_componentTemplateNode) Template(template types.ComponentTemplateSummaryVariant) *_componentTemplateNode { + + s.v.Template = *template.ComponentTemplateSummaryCaster() + + return s +} + +func (s *_componentTemplateNode) Version(versionnumber int64) *_componentTemplateNode { + + s.v.Version = &versionnumber + + return s +} + +func (s *_componentTemplateNode) ComponentTemplateNodeCaster() *types.ComponentTemplateNode { + return s.v +} diff --git a/typedapi/esdsl/componenttemplatesummary.go b/typedapi/esdsl/componenttemplatesummary.go new file mode 100644 index 0000000000..de1f846282 --- /dev/null +++ b/typedapi/esdsl/componenttemplatesummary.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _componentTemplateSummary struct { + v *types.ComponentTemplateSummary +} + +func NewComponentTemplateSummary() *_componentTemplateSummary { + + return &_componentTemplateSummary{v: types.NewComponentTemplateSummary()} + +} + +func (s *_componentTemplateSummary) Aliases(aliases map[string]types.AliasDefinition) *_componentTemplateSummary { + + s.v.Aliases = aliases + return s +} + +func (s *_componentTemplateSummary) AddAlias(key string, value types.AliasDefinitionVariant) *_componentTemplateSummary { + + var tmp map[string]types.AliasDefinition + if s.v.Aliases == nil { + s.v.Aliases = make(map[string]types.AliasDefinition) + } else { + tmp = s.v.Aliases + } + + tmp[key] = *value.AliasDefinitionCaster() + + s.v.Aliases = tmp + return s +} + +func (s *_componentTemplateSummary) Lifecycle(lifecycle types.DataStreamLifecycleWithRolloverVariant) *_componentTemplateSummary { + + s.v.Lifecycle = lifecycle.DataStreamLifecycleWithRolloverCaster() + + return s +} + +func (s *_componentTemplateSummary) Mappings(mappings types.TypeMappingVariant) *_componentTemplateSummary { + + s.v.Mappings = mappings.TypeMappingCaster() + + return s +} + +func (s *_componentTemplateSummary) Meta_(metadata types.MetadataVariant) *_componentTemplateSummary { + + s.v.Meta_ = *metadata.MetadataCaster() + + return s +} + +func (s *_componentTemplateSummary) Settings(settings map[string]types.IndexSettings) *_componentTemplateSummary { + + s.v.Settings = settings + return s +} + +func (s *_componentTemplateSummary) AddSetting(key string, value types.IndexSettingsVariant) *_componentTemplateSummary { + + var tmp map[string]types.IndexSettings + if s.v.Settings == nil { + s.v.Settings = make(map[string]types.IndexSettings) + } else { + tmp = s.v.Settings + } + + tmp[key] = *value.IndexSettingsCaster() + + s.v.Settings = tmp + return s +} + +func (s *_componentTemplateSummary) Version(versionnumber int64) *_componentTemplateSummary { + + s.v.Version = &versionnumber + + return s +} + +func (s *_componentTemplateSummary) ComponentTemplateSummaryCaster() *types.ComponentTemplateSummary { + return s.v +} diff --git a/typedapi/esdsl/compositeaggregatekey.go b/typedapi/esdsl/compositeaggregatekey.go new file mode 100644 index 0000000000..4a73b26728 --- /dev/null +++ b/typedapi/esdsl/compositeaggregatekey.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _compositeAggregateKey struct { + v types.CompositeAggregateKey +} + +func NewCompositeAggregateKey(compositeaggregatekey types.FieldValueVariant) *_compositeAggregateKey { + return &_compositeAggregateKey{v: make(map[string]types.FieldValue, 0)} +} + +func (u *_compositeAggregateKey) CompositeAggregateKeyCaster() *types.CompositeAggregateKey { + return &u.v +} diff --git a/typedapi/esdsl/compositeaggregation.go b/typedapi/esdsl/compositeaggregation.go new file mode 100644 index 0000000000..99a351c43f --- /dev/null +++ b/typedapi/esdsl/compositeaggregation.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _compositeAggregation struct { + v *types.CompositeAggregation +} + +// A multi-bucket aggregation that creates composite buckets from different +// sources. +// Unlike the other multi-bucket aggregations, you can use the `composite` +// aggregation to paginate *all* buckets from a multi-level aggregation +// efficiently. +func NewCompositeAggregation() *_compositeAggregation { + + return &_compositeAggregation{v: types.NewCompositeAggregation()} + +} + +// When paginating, use the `after_key` value returned in the previous response +// to retrieve the next page. +func (s *_compositeAggregation) After(compositeaggregatekey types.CompositeAggregateKeyVariant) *_compositeAggregation { + + s.v.After = *compositeaggregatekey.CompositeAggregateKeyCaster() + + return s +} + +// The number of composite buckets that should be returned. +func (s *_compositeAggregation) Size(size int) *_compositeAggregation { + + s.v.Size = &size + + return s +} + +// The value sources used to build composite buckets. +// Keys are returned in the order of the `sources` definition. +func (s *_compositeAggregation) Sources(sources []map[string]types.CompositeAggregationSource) *_compositeAggregation { + + s.v.Sources = sources + + return s +} + +func (s *_compositeAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Composite = s.v + + return container +} + +func (s *_compositeAggregation) ApiKeyAggregationContainerCaster() *types.ApiKeyAggregationContainer { + container := types.NewApiKeyAggregationContainer() + + container.Composite = s.v + + return container +} + +func (s *_compositeAggregation) CompositeAggregationCaster() *types.CompositeAggregation { + return s.v +} diff --git a/typedapi/esdsl/compositeaggregationsource.go b/typedapi/esdsl/compositeaggregationsource.go new file mode 100644 index 0000000000..64334ffb08 --- /dev/null +++ b/typedapi/esdsl/compositeaggregationsource.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _compositeAggregationSource struct { + v *types.CompositeAggregationSource +} + +func NewCompositeAggregationSource() *_compositeAggregationSource { + + return &_compositeAggregationSource{v: types.NewCompositeAggregationSource()} + +} + +// A date histogram aggregation. +func (s *_compositeAggregationSource) DateHistogram(datehistogram types.CompositeDateHistogramAggregationVariant) *_compositeAggregationSource { + + s.v.DateHistogram = datehistogram.CompositeDateHistogramAggregationCaster() + + return s +} + +// A geotile grid aggregation. +func (s *_compositeAggregationSource) GeotileGrid(geotilegrid types.CompositeGeoTileGridAggregationVariant) *_compositeAggregationSource { + + s.v.GeotileGrid = geotilegrid.CompositeGeoTileGridAggregationCaster() + + return s +} + +// A histogram aggregation. +func (s *_compositeAggregationSource) Histogram(histogram types.CompositeHistogramAggregationVariant) *_compositeAggregationSource { + + s.v.Histogram = histogram.CompositeHistogramAggregationCaster() + + return s +} + +// A terms aggregation. +func (s *_compositeAggregationSource) Terms(terms types.CompositeTermsAggregationVariant) *_compositeAggregationSource { + + s.v.Terms = terms.CompositeTermsAggregationCaster() + + return s +} + +func (s *_compositeAggregationSource) CompositeAggregationSourceCaster() *types.CompositeAggregationSource { + return s.v +} diff --git a/typedapi/esdsl/compositedatehistogramaggregation.go b/typedapi/esdsl/compositedatehistogramaggregation.go new file mode 100644 index 0000000000..7efb9ff264 --- /dev/null +++ b/typedapi/esdsl/compositedatehistogramaggregation.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/missingorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/valuetype" +) + +type _compositeDateHistogramAggregation struct { + v *types.CompositeDateHistogramAggregation +} + +func NewCompositeDateHistogramAggregation() *_compositeDateHistogramAggregation { + + return &_compositeDateHistogramAggregation{v: types.NewCompositeDateHistogramAggregation()} + +} + +// Either `calendar_interval` or `fixed_interval` must be present +func (s *_compositeDateHistogramAggregation) CalendarInterval(durationlarge string) *_compositeDateHistogramAggregation { + + s.v.CalendarInterval = &durationlarge + + return s +} + +// Either `field` or `script` must be present +func (s *_compositeDateHistogramAggregation) Field(field string) *_compositeDateHistogramAggregation { + + s.v.Field = &field + + return s +} + +// Either `calendar_interval` or `fixed_interval` must be present +func (s *_compositeDateHistogramAggregation) FixedInterval(durationlarge string) *_compositeDateHistogramAggregation { + + s.v.FixedInterval = &durationlarge + + return s +} + +func (s *_compositeDateHistogramAggregation) Format(format string) *_compositeDateHistogramAggregation { + + s.v.Format = &format + + return s +} + +func (s *_compositeDateHistogramAggregation) MissingBucket(missingbucket bool) *_compositeDateHistogramAggregation { + + s.v.MissingBucket = &missingbucket + + return s +} + +func (s *_compositeDateHistogramAggregation) MissingOrder(missingorder missingorder.MissingOrder) *_compositeDateHistogramAggregation { + + s.v.MissingOrder = &missingorder + return s +} + +func (s *_compositeDateHistogramAggregation) Offset(duration types.DurationVariant) *_compositeDateHistogramAggregation { + + s.v.Offset = *duration.DurationCaster() + + return s +} + +func (s *_compositeDateHistogramAggregation) Order(order sortorder.SortOrder) *_compositeDateHistogramAggregation { + + s.v.Order = &order + return s +} + +// Either `field` or `script` must be present +func (s *_compositeDateHistogramAggregation) Script(script types.ScriptVariant) *_compositeDateHistogramAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_compositeDateHistogramAggregation) TimeZone(timezone string) *_compositeDateHistogramAggregation { + + s.v.TimeZone = &timezone + + return s +} + +func (s *_compositeDateHistogramAggregation) ValueType(valuetype valuetype.ValueType) *_compositeDateHistogramAggregation { + + s.v.ValueType = &valuetype + return s +} + +func (s *_compositeDateHistogramAggregation) CompositeDateHistogramAggregationCaster() *types.CompositeDateHistogramAggregation { + return s.v +} diff --git a/typedapi/esdsl/compositegeotilegridaggregation.go b/typedapi/esdsl/compositegeotilegridaggregation.go new file mode 100644 index 0000000000..ddc99a851a --- /dev/null +++ b/typedapi/esdsl/compositegeotilegridaggregation.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/missingorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/valuetype" +) + +type _compositeGeoTileGridAggregation struct { + v *types.CompositeGeoTileGridAggregation +} + +func NewCompositeGeoTileGridAggregation() *_compositeGeoTileGridAggregation { + + return &_compositeGeoTileGridAggregation{v: types.NewCompositeGeoTileGridAggregation()} + +} + +func (s *_compositeGeoTileGridAggregation) Bounds(geobounds types.GeoBoundsVariant) *_compositeGeoTileGridAggregation { + + s.v.Bounds = *geobounds.GeoBoundsCaster() + + return s +} + +// Either `field` or `script` must be present +func (s *_compositeGeoTileGridAggregation) Field(field string) *_compositeGeoTileGridAggregation { + + s.v.Field = &field + + return s +} + +func (s *_compositeGeoTileGridAggregation) MissingBucket(missingbucket bool) *_compositeGeoTileGridAggregation { + + s.v.MissingBucket = &missingbucket + + return s +} + +func (s *_compositeGeoTileGridAggregation) MissingOrder(missingorder missingorder.MissingOrder) *_compositeGeoTileGridAggregation { + + s.v.MissingOrder = &missingorder + return s +} + +func (s *_compositeGeoTileGridAggregation) Order(order sortorder.SortOrder) *_compositeGeoTileGridAggregation { + + s.v.Order = &order + return s +} + +func (s *_compositeGeoTileGridAggregation) Precision(precision int) *_compositeGeoTileGridAggregation { + + s.v.Precision = &precision + + return s +} + +// Either `field` or `script` must be present +func (s *_compositeGeoTileGridAggregation) Script(script types.ScriptVariant) *_compositeGeoTileGridAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_compositeGeoTileGridAggregation) ValueType(valuetype valuetype.ValueType) *_compositeGeoTileGridAggregation { + + s.v.ValueType = &valuetype + return s +} + +func (s *_compositeGeoTileGridAggregation) CompositeGeoTileGridAggregationCaster() *types.CompositeGeoTileGridAggregation { + return s.v +} diff --git a/typedapi/esdsl/compositehistogramaggregation.go b/typedapi/esdsl/compositehistogramaggregation.go new file mode 100644 index 0000000000..b2a43855d6 --- /dev/null +++ b/typedapi/esdsl/compositehistogramaggregation.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/missingorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/valuetype" +) + +type _compositeHistogramAggregation struct { + v *types.CompositeHistogramAggregation +} + +func NewCompositeHistogramAggregation(interval types.Float64) *_compositeHistogramAggregation { + + tmp := &_compositeHistogramAggregation{v: types.NewCompositeHistogramAggregation()} + + tmp.Interval(interval) + + return tmp + +} + +// Either `field` or `script` must be present +func (s *_compositeHistogramAggregation) Field(field string) *_compositeHistogramAggregation { + + s.v.Field = &field + + return s +} + +func (s *_compositeHistogramAggregation) Interval(interval types.Float64) *_compositeHistogramAggregation { + + s.v.Interval = interval + + return s +} + +func (s *_compositeHistogramAggregation) MissingBucket(missingbucket bool) *_compositeHistogramAggregation { + + s.v.MissingBucket = &missingbucket + + return s +} + +func (s *_compositeHistogramAggregation) MissingOrder(missingorder missingorder.MissingOrder) *_compositeHistogramAggregation { + + s.v.MissingOrder = &missingorder + return s +} + +func (s *_compositeHistogramAggregation) Order(order sortorder.SortOrder) *_compositeHistogramAggregation { + + s.v.Order = &order + return s +} + +// Either `field` or `script` must be present +func (s *_compositeHistogramAggregation) Script(script types.ScriptVariant) *_compositeHistogramAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_compositeHistogramAggregation) ValueType(valuetype valuetype.ValueType) *_compositeHistogramAggregation { + + s.v.ValueType = &valuetype + return s +} + +func (s *_compositeHistogramAggregation) CompositeHistogramAggregationCaster() *types.CompositeHistogramAggregation { + return s.v +} diff --git a/typedapi/esdsl/compositesubfield.go b/typedapi/esdsl/compositesubfield.go new file mode 100644 index 0000000000..2dc73f4dd1 --- /dev/null +++ b/typedapi/esdsl/compositesubfield.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/runtimefieldtype" +) + +type _compositeSubField struct { + v *types.CompositeSubField +} + +func NewCompositeSubField(type_ runtimefieldtype.RuntimeFieldType) *_compositeSubField { + + tmp := &_compositeSubField{v: types.NewCompositeSubField()} + + tmp.Type(type_) + + return tmp + +} + +func (s *_compositeSubField) Type(type_ runtimefieldtype.RuntimeFieldType) *_compositeSubField { + + s.v.Type = type_ + return s +} + +func (s *_compositeSubField) CompositeSubFieldCaster() *types.CompositeSubField { + return s.v +} diff --git a/typedapi/esdsl/compositetermsaggregation.go b/typedapi/esdsl/compositetermsaggregation.go new file mode 100644 index 0000000000..5f6a17fb43 --- /dev/null +++ b/typedapi/esdsl/compositetermsaggregation.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/missingorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/valuetype" +) + +type _compositeTermsAggregation struct { + v *types.CompositeTermsAggregation +} + +func NewCompositeTermsAggregation() *_compositeTermsAggregation { + + return &_compositeTermsAggregation{v: types.NewCompositeTermsAggregation()} + +} + +// Either `field` or `script` must be present +func (s *_compositeTermsAggregation) Field(field string) *_compositeTermsAggregation { + + s.v.Field = &field + + return s +} + +func (s *_compositeTermsAggregation) MissingBucket(missingbucket bool) *_compositeTermsAggregation { + + s.v.MissingBucket = &missingbucket + + return s +} + +func (s *_compositeTermsAggregation) MissingOrder(missingorder missingorder.MissingOrder) *_compositeTermsAggregation { + + s.v.MissingOrder = &missingorder + return s +} + +func (s *_compositeTermsAggregation) Order(order sortorder.SortOrder) *_compositeTermsAggregation { + + s.v.Order = &order + return s +} + +// Either `field` or `script` must be present +func (s *_compositeTermsAggregation) Script(script types.ScriptVariant) *_compositeTermsAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_compositeTermsAggregation) ValueType(valuetype valuetype.ValueType) *_compositeTermsAggregation { + + s.v.ValueType = &valuetype + return s +} + +func (s *_compositeTermsAggregation) CompositeTermsAggregationCaster() *types.CompositeTermsAggregation { + return s.v +} diff --git a/typedapi/esdsl/conditiontokenfilter.go b/typedapi/esdsl/conditiontokenfilter.go new file mode 100644 index 0000000000..e853805a92 --- /dev/null +++ b/typedapi/esdsl/conditiontokenfilter.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _conditionTokenFilter struct { + v *types.ConditionTokenFilter +} + +func NewConditionTokenFilter(script types.ScriptVariant) *_conditionTokenFilter { + + tmp := &_conditionTokenFilter{v: types.NewConditionTokenFilter()} + + tmp.Script(script) + + return tmp + +} + +func (s *_conditionTokenFilter) Filter(filters ...string) *_conditionTokenFilter { + + for _, v := range filters { + + s.v.Filter = append(s.v.Filter, v) + + } + return s +} + +func (s *_conditionTokenFilter) Script(script types.ScriptVariant) *_conditionTokenFilter { + + s.v.Script = *script.ScriptCaster() + + return s +} + +func (s *_conditionTokenFilter) Version(versionstring string) *_conditionTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_conditionTokenFilter) ConditionTokenFilterCaster() *types.ConditionTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/configuration.go b/typedapi/esdsl/configuration.go new file mode 100644 index 0000000000..c2f12aebac --- /dev/null +++ b/typedapi/esdsl/configuration.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _configuration struct { + v *types.Configuration +} + +func NewConfiguration() *_configuration { + + return &_configuration{v: types.NewConfiguration()} + +} + +// A list of feature states to be included in this snapshot. A list of features +// available for inclusion in the snapshot and their descriptions be can be +// retrieved using the get features API. +// Each feature state includes one or more system indices containing data +// necessary for the function of that feature. Providing an empty array will +// include no feature states in the snapshot, regardless of the value of +// include_global_state. By default, all available feature states will be +// included in the snapshot if include_global_state is true, or no feature +// states if include_global_state is false. +func (s *_configuration) FeatureStates(featurestates ...string) *_configuration { + + for _, v := range featurestates { + + s.v.FeatureStates = append(s.v.FeatureStates, v) + + } + return s +} + +// If false, the snapshot fails if any data stream or index in indices is +// missing or closed. If true, the snapshot ignores missing or closed data +// streams and indices. +func (s *_configuration) IgnoreUnavailable(ignoreunavailable bool) *_configuration { + + s.v.IgnoreUnavailable = &ignoreunavailable + + return s +} + +// If true, the current global state is included in the snapshot. +func (s *_configuration) IncludeGlobalState(includeglobalstate bool) *_configuration { + + s.v.IncludeGlobalState = &includeglobalstate + + return s +} + +// A comma-separated list of data streams and indices to include in the +// snapshot. Multi-index syntax is supported. +// By default, a snapshot includes all data streams and indices in the cluster. +// If this argument is provided, the snapshot only includes the specified data +// streams and clusters. +func (s *_configuration) Indices(indices ...string) *_configuration { + + s.v.Indices = indices + + return s +} + +// Attaches arbitrary metadata to the snapshot, such as a record of who took the +// snapshot, why it was taken, or any other useful data. Metadata must be less +// than 1024 bytes. +func (s *_configuration) Metadata(metadata types.MetadataVariant) *_configuration { + + s.v.Metadata = *metadata.MetadataCaster() + + return s +} + +// If false, the entire snapshot will fail if one or more indices included in +// the snapshot do not have all primary shards available. +func (s *_configuration) Partial(partial bool) *_configuration { + + s.v.Partial = &partial + + return s +} + +func (s *_configuration) ConfigurationCaster() *types.Configuration { + return s.v +} diff --git a/typedapi/esdsl/connectorconfigproperties.go b/typedapi/esdsl/connectorconfigproperties.go new file mode 100644 index 0000000000..de185f3b8c --- /dev/null +++ b/typedapi/esdsl/connectorconfigproperties.go @@ -0,0 +1,170 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/connectorfieldtype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/displaytype" +) + +type _connectorConfigProperties struct { + v *types.ConnectorConfigProperties +} + +func NewConnectorConfigProperties(display displaytype.DisplayType, label string, required bool, sensitive bool, value json.RawMessage) *_connectorConfigProperties { + + tmp := &_connectorConfigProperties{v: types.NewConnectorConfigProperties()} + + tmp.Display(display) + + tmp.Label(label) + + tmp.Required(required) + + tmp.Sensitive(sensitive) + + tmp.Value(value) + + return tmp + +} + +func (s *_connectorConfigProperties) Category(category string) *_connectorConfigProperties { + + s.v.Category = &category + + return s +} + +func (s *_connectorConfigProperties) DefaultValue(scalarvalue types.ScalarValueVariant) *_connectorConfigProperties { + + s.v.DefaultValue = *scalarvalue.ScalarValueCaster() + + return s +} + +func (s *_connectorConfigProperties) DependsOn(dependsons ...types.DependencyVariant) *_connectorConfigProperties { + + for _, v := range dependsons { + + s.v.DependsOn = append(s.v.DependsOn, *v.DependencyCaster()) + + } + return s +} + +func (s *_connectorConfigProperties) Display(display displaytype.DisplayType) *_connectorConfigProperties { + + s.v.Display = display + return s +} + +func (s *_connectorConfigProperties) Label(label string) *_connectorConfigProperties { + + s.v.Label = label + + return s +} + +func (s *_connectorConfigProperties) Options(options ...types.SelectOptionVariant) *_connectorConfigProperties { + + for _, v := range options { + + s.v.Options = append(s.v.Options, *v.SelectOptionCaster()) + + } + return s +} + +func (s *_connectorConfigProperties) Order(order int) *_connectorConfigProperties { + + s.v.Order = &order + + return s +} + +func (s *_connectorConfigProperties) Placeholder(placeholder string) *_connectorConfigProperties { + + s.v.Placeholder = &placeholder + + return s +} + +func (s *_connectorConfigProperties) Required(required bool) *_connectorConfigProperties { + + s.v.Required = required + + return s +} + +func (s *_connectorConfigProperties) Sensitive(sensitive bool) *_connectorConfigProperties { + + s.v.Sensitive = sensitive + + return s +} + +func (s *_connectorConfigProperties) Tooltip(tooltip string) *_connectorConfigProperties { + + s.v.Tooltip = &tooltip + + return s +} + +func (s *_connectorConfigProperties) Type(type_ connectorfieldtype.ConnectorFieldType) *_connectorConfigProperties { + + s.v.Type = &type_ + return s +} + +func (s *_connectorConfigProperties) UiRestrictions(uirestrictions ...string) *_connectorConfigProperties { + + for _, v := range uirestrictions { + + s.v.UiRestrictions = append(s.v.UiRestrictions, v) + + } + return s +} + +func (s *_connectorConfigProperties) Validations(validations ...types.ValidationVariant) *_connectorConfigProperties { + + for _, v := range validations { + + s.v.Validations = append(s.v.Validations, *v.ValidationCaster()) + + } + return s +} + +func (s *_connectorConfigProperties) Value(value json.RawMessage) *_connectorConfigProperties { + + s.v.Value = value + + return s +} + +func (s *_connectorConfigProperties) ConnectorConfigPropertiesCaster() *types.ConnectorConfigProperties { + return s.v +} diff --git a/typedapi/esdsl/connectorconfiguration.go b/typedapi/esdsl/connectorconfiguration.go new file mode 100644 index 0000000000..b749bbf453 --- /dev/null +++ b/typedapi/esdsl/connectorconfiguration.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _connectorConfiguration struct { + v types.ConnectorConfiguration +} + +func NewConnectorConfiguration(connectorconfiguration types.ConnectorConfigPropertiesVariant) *_connectorConfiguration { + return &_connectorConfiguration{v: make(map[string]types.ConnectorConfigProperties, 0)} +} + +func (u *_connectorConfiguration) ConnectorConfigurationCaster() *types.ConnectorConfiguration { + return &u.v +} diff --git a/typedapi/esdsl/connectorfeatures.go b/typedapi/esdsl/connectorfeatures.go new file mode 100644 index 0000000000..fe276e7b04 --- /dev/null +++ b/typedapi/esdsl/connectorfeatures.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _connectorFeatures struct { + v *types.ConnectorFeatures +} + +func NewConnectorFeatures() *_connectorFeatures { + + return &_connectorFeatures{v: types.NewConnectorFeatures()} + +} + +// Indicates whether document-level security is enabled. +func (s *_connectorFeatures) DocumentLevelSecurity(documentlevelsecurity types.FeatureEnabledVariant) *_connectorFeatures { + + s.v.DocumentLevelSecurity = documentlevelsecurity.FeatureEnabledCaster() + + return s +} + +// Indicates whether incremental syncs are enabled. +func (s *_connectorFeatures) IncrementalSync(incrementalsync types.FeatureEnabledVariant) *_connectorFeatures { + + s.v.IncrementalSync = incrementalsync.FeatureEnabledCaster() + + return s +} + +// Indicates whether managed connector API keys are enabled. +func (s *_connectorFeatures) NativeConnectorApiKeys(nativeconnectorapikeys types.FeatureEnabledVariant) *_connectorFeatures { + + s.v.NativeConnectorApiKeys = nativeconnectorapikeys.FeatureEnabledCaster() + + return s +} + +func (s *_connectorFeatures) SyncRules(syncrules types.SyncRulesFeatureVariant) *_connectorFeatures { + + s.v.SyncRules = syncrules.SyncRulesFeatureCaster() + + return s +} + +func (s *_connectorFeatures) ConnectorFeaturesCaster() *types.ConnectorFeatures { + return s.v +} diff --git a/typedapi/esdsl/connectorscheduling.go b/typedapi/esdsl/connectorscheduling.go new file mode 100644 index 0000000000..6db3007769 --- /dev/null +++ b/typedapi/esdsl/connectorscheduling.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _connectorScheduling struct { + v *types.ConnectorScheduling +} + +func NewConnectorScheduling(enabled bool, interval string) *_connectorScheduling { + + tmp := &_connectorScheduling{v: types.NewConnectorScheduling()} + + tmp.Enabled(enabled) + + tmp.Interval(interval) + + return tmp + +} + +func (s *_connectorScheduling) Enabled(enabled bool) *_connectorScheduling { + + s.v.Enabled = enabled + + return s +} + +// The interval is expressed using the crontab syntax +func (s *_connectorScheduling) Interval(interval string) *_connectorScheduling { + + s.v.Interval = interval + + return s +} + +func (s *_connectorScheduling) ConnectorSchedulingCaster() *types.ConnectorScheduling { + return s.v +} diff --git a/typedapi/esdsl/constantkeywordproperty.go b/typedapi/esdsl/constantkeywordproperty.go new file mode 100644 index 0000000000..f9b64994a0 --- /dev/null +++ b/typedapi/esdsl/constantkeywordproperty.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _constantKeywordProperty struct { + v *types.ConstantKeywordProperty +} + +func NewConstantKeywordProperty() *_constantKeywordProperty { + + return &_constantKeywordProperty{v: types.NewConstantKeywordProperty()} + +} + +func (s *_constantKeywordProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_constantKeywordProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_constantKeywordProperty) Fields(fields map[string]types.Property) *_constantKeywordProperty { + + s.v.Fields = fields + return s +} + +func (s *_constantKeywordProperty) AddField(key string, value types.PropertyVariant) *_constantKeywordProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_constantKeywordProperty) IgnoreAbove(ignoreabove int) *_constantKeywordProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +// Metadata about the field. +func (s *_constantKeywordProperty) Meta(meta map[string]string) *_constantKeywordProperty { + + s.v.Meta = meta + return s +} + +func (s *_constantKeywordProperty) AddMeta(key string, value string) *_constantKeywordProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_constantKeywordProperty) Properties(properties map[string]types.Property) *_constantKeywordProperty { + + s.v.Properties = properties + return s +} + +func (s *_constantKeywordProperty) AddProperty(key string, value types.PropertyVariant) *_constantKeywordProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_constantKeywordProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_constantKeywordProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_constantKeywordProperty) Value(value json.RawMessage) *_constantKeywordProperty { + + s.v.Value = value + + return s +} + +func (s *_constantKeywordProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_constantKeywordProperty) ConstantKeywordPropertyCaster() *types.ConstantKeywordProperty { + return s.v +} diff --git a/typedapi/esdsl/constantscorequery.go b/typedapi/esdsl/constantscorequery.go new file mode 100644 index 0000000000..f47ed32a66 --- /dev/null +++ b/typedapi/esdsl/constantscorequery.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _constantScoreQuery struct { + v *types.ConstantScoreQuery +} + +// Wraps a filter query and returns every matching document with a relevance +// score equal to the `boost` parameter value. +func NewConstantScoreQuery(filter types.QueryVariant) *_constantScoreQuery { + + tmp := &_constantScoreQuery{v: types.NewConstantScoreQuery()} + + tmp.Filter(filter) + + return tmp + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_constantScoreQuery) Boost(boost float32) *_constantScoreQuery { + + s.v.Boost = &boost + + return s +} + +// Filter query you wish to run. Any returned documents must match this query. +// Filter queries do not calculate relevance scores. +// To speed up performance, Elasticsearch automatically caches frequently used +// filter queries. +func (s *_constantScoreQuery) Filter(filter types.QueryVariant) *_constantScoreQuery { + + s.v.Filter = *filter.QueryCaster() + + return s +} + +func (s *_constantScoreQuery) QueryName_(queryname_ string) *_constantScoreQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_constantScoreQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.ConstantScore = s.v + + return container +} + +func (s *_constantScoreQuery) ConstantScoreQueryCaster() *types.ConstantScoreQuery { + return s.v +} diff --git a/typedapi/esdsl/contentobject.go b/typedapi/esdsl/contentobject.go new file mode 100644 index 0000000000..d057a44fdc --- /dev/null +++ b/typedapi/esdsl/contentobject.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _contentObject struct { + v *types.ContentObject +} + +func NewContentObject(text string, type_ string) *_contentObject { + + tmp := &_contentObject{v: types.NewContentObject()} + + tmp.Text(text) + + tmp.Type(type_) + + return tmp + +} + +// The text content. +func (s *_contentObject) Text(text string) *_contentObject { + + s.v.Text = text + + return s +} + +// The type of content. +func (s *_contentObject) Type(type_ string) *_contentObject { + + s.v.Type = type_ + + return s +} + +func (s *_contentObject) ContentObjectCaster() *types.ContentObject { + return s.v +} diff --git a/typedapi/esdsl/context.go b/typedapi/esdsl/context.go new file mode 100644 index 0000000000..47acaed7da --- /dev/null +++ b/typedapi/esdsl/context.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _context struct { + v types.Context +} + +func NewContext() *_context { + return &_context{v: nil} +} + +func (u *_context) String(string string) *_context { + + u.v = &string + + return u +} + +func (u *_context) GeoLocation(geolocation types.GeoLocationVariant) *_context { + + u.v = *geolocation.GeoLocationCaster() + + return u +} + +// Interface implementation for GeoLocation in Context union +func (u *_geoLocation) ContextCaster() *types.Context { + t := types.Context(u.v) + return &t +} + +func (u *_context) ContextCaster() *types.Context { + return &u.v +} diff --git a/typedapi/esdsl/convertprocessor.go b/typedapi/esdsl/convertprocessor.go new file mode 100644 index 0000000000..4c93ceef3b --- /dev/null +++ b/typedapi/esdsl/convertprocessor.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/converttype" +) + +type _convertProcessor struct { + v *types.ConvertProcessor +} + +// Converts a field in the currently ingested document to a different type, such +// as converting a string to an integer. +// If the field value is an array, all members will be converted. +func NewConvertProcessor(type_ converttype.ConvertType) *_convertProcessor { + + tmp := &_convertProcessor{v: types.NewConvertProcessor()} + + tmp.Type(type_) + + return tmp + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_convertProcessor) Description(description string) *_convertProcessor { + + s.v.Description = &description + + return s +} + +// The field whose value is to be converted. +func (s *_convertProcessor) Field(field string) *_convertProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_convertProcessor) If(if_ types.ScriptVariant) *_convertProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_convertProcessor) IgnoreFailure(ignorefailure bool) *_convertProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist or is `null`, the processor quietly +// exits without modifying the document. +func (s *_convertProcessor) IgnoreMissing(ignoremissing bool) *_convertProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_convertProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_convertProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_convertProcessor) Tag(tag string) *_convertProcessor { + + s.v.Tag = &tag + + return s +} + +// The field to assign the converted value to. +// By default, the `field` is updated in-place. +func (s *_convertProcessor) TargetField(field string) *_convertProcessor { + + s.v.TargetField = &field + + return s +} + +// The type to convert the existing value to. +func (s *_convertProcessor) Type(type_ converttype.ConvertType) *_convertProcessor { + + s.v.Type = type_ + return s +} + +func (s *_convertProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Convert = s.v + + return container +} + +func (s *_convertProcessor) ConvertProcessorCaster() *types.ConvertProcessor { + return s.v +} diff --git a/typedapi/esdsl/coordsgeobounds.go b/typedapi/esdsl/coordsgeobounds.go new file mode 100644 index 0000000000..114f1b3d95 --- /dev/null +++ b/typedapi/esdsl/coordsgeobounds.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _coordsGeoBounds struct { + v *types.CoordsGeoBounds +} + +func NewCoordsGeoBounds(bottom types.Float64, left types.Float64, right types.Float64, top types.Float64) *_coordsGeoBounds { + + tmp := &_coordsGeoBounds{v: types.NewCoordsGeoBounds()} + + tmp.Bottom(bottom) + + tmp.Left(left) + + tmp.Right(right) + + tmp.Top(top) + + return tmp + +} + +func (s *_coordsGeoBounds) Bottom(bottom types.Float64) *_coordsGeoBounds { + + s.v.Bottom = bottom + + return s +} + +func (s *_coordsGeoBounds) Left(left types.Float64) *_coordsGeoBounds { + + s.v.Left = left + + return s +} + +func (s *_coordsGeoBounds) Right(right types.Float64) *_coordsGeoBounds { + + s.v.Right = right + + return s +} + +func (s *_coordsGeoBounds) Top(top types.Float64) *_coordsGeoBounds { + + s.v.Top = top + + return s +} + +func (s *_coordsGeoBounds) CoordsGeoBoundsCaster() *types.CoordsGeoBounds { + return s.v +} diff --git a/typedapi/esdsl/coreknnquery.go b/typedapi/esdsl/coreknnquery.go new file mode 100644 index 0000000000..1a068df28f --- /dev/null +++ b/typedapi/esdsl/coreknnquery.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _coreKnnQuery struct { + v *types.CoreKnnQuery +} + +func NewCoreKnnQuery(k int, numcandidates int) *_coreKnnQuery { + + tmp := &_coreKnnQuery{v: types.NewCoreKnnQuery()} + + tmp.K(k) + + tmp.NumCandidates(numcandidates) + + return tmp + +} + +// The name of the vector field to search against +func (s *_coreKnnQuery) Field(field string) *_coreKnnQuery { + + s.v.Field = field + + return s +} + +// The final number of nearest neighbors to return as top hits +func (s *_coreKnnQuery) K(k int) *_coreKnnQuery { + + s.v.K = k + + return s +} + +// The number of nearest neighbor candidates to consider per shard +func (s *_coreKnnQuery) NumCandidates(numcandidates int) *_coreKnnQuery { + + s.v.NumCandidates = numcandidates + + return s +} + +// The query vector +func (s *_coreKnnQuery) QueryVector(queryvectors ...float32) *_coreKnnQuery { + + s.v.QueryVector = queryvectors + + return s +} + +func (s *_coreKnnQuery) CoreKnnQueryCaster() *types.CoreKnnQuery { + return s.v +} diff --git a/typedapi/esdsl/countedkeywordproperty.go b/typedapi/esdsl/countedkeywordproperty.go new file mode 100644 index 0000000000..1577258c3d --- /dev/null +++ b/typedapi/esdsl/countedkeywordproperty.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _countedKeywordProperty struct { + v *types.CountedKeywordProperty +} + +func NewCountedKeywordProperty() *_countedKeywordProperty { + + return &_countedKeywordProperty{v: types.NewCountedKeywordProperty()} + +} + +func (s *_countedKeywordProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_countedKeywordProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_countedKeywordProperty) Fields(fields map[string]types.Property) *_countedKeywordProperty { + + s.v.Fields = fields + return s +} + +func (s *_countedKeywordProperty) AddField(key string, value types.PropertyVariant) *_countedKeywordProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_countedKeywordProperty) IgnoreAbove(ignoreabove int) *_countedKeywordProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_countedKeywordProperty) Index(index bool) *_countedKeywordProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_countedKeywordProperty) Meta(meta map[string]string) *_countedKeywordProperty { + + s.v.Meta = meta + return s +} + +func (s *_countedKeywordProperty) AddMeta(key string, value string) *_countedKeywordProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_countedKeywordProperty) Properties(properties map[string]types.Property) *_countedKeywordProperty { + + s.v.Properties = properties + return s +} + +func (s *_countedKeywordProperty) AddProperty(key string, value types.PropertyVariant) *_countedKeywordProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_countedKeywordProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_countedKeywordProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_countedKeywordProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_countedKeywordProperty) CountedKeywordPropertyCaster() *types.CountedKeywordProperty { + return s.v +} diff --git a/typedapi/esdsl/createfrom.go b/typedapi/esdsl/createfrom.go new file mode 100644 index 0000000000..5a28a5e5c9 --- /dev/null +++ b/typedapi/esdsl/createfrom.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _createFrom struct { + v *types.CreateFrom +} + +func NewCreateFrom() *_createFrom { + + return &_createFrom{v: types.NewCreateFrom()} + +} + +// Mappings overrides to be applied to the destination index (optional) +func (s *_createFrom) MappingsOverride(mappingsoverride types.TypeMappingVariant) *_createFrom { + + s.v.MappingsOverride = mappingsoverride.TypeMappingCaster() + + return s +} + +// If index blocks should be removed when creating destination index (optional) +func (s *_createFrom) RemoveIndexBlocks(removeindexblocks bool) *_createFrom { + + s.v.RemoveIndexBlocks = &removeindexblocks + + return s +} + +// Settings overrides to be applied to the destination index (optional) +func (s *_createFrom) SettingsOverride(settingsoverride types.IndexSettingsVariant) *_createFrom { + + s.v.SettingsOverride = settingsoverride.IndexSettingsCaster() + + return s +} + +func (s *_createFrom) CreateFromCaster() *types.CreateFrom { + return s.v +} diff --git a/typedapi/esdsl/createoperation.go b/typedapi/esdsl/createoperation.go new file mode 100644 index 0000000000..2671042164 --- /dev/null +++ b/typedapi/esdsl/createoperation.go @@ -0,0 +1,148 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" +) + +type _createOperation struct { + v *types.CreateOperation +} + +// Index the specified document if it does not already exist. +// The following line must contain the source data to be indexed. +func NewCreateOperation() *_createOperation { + + return &_createOperation{v: types.NewCreateOperation()} + +} + +// A map from the full name of fields to the name of dynamic templates. +// It defaults to an empty map. +// If a name matches a dynamic template, that template will be applied +// regardless of other match predicates defined in the template. +// If a field is already defined in the mapping, then this parameter won't be +// used. +func (s *_createOperation) DynamicTemplates(dynamictemplates map[string]string) *_createOperation { + + s.v.DynamicTemplates = dynamictemplates + return s +} + +func (s *_createOperation) AddDynamicTemplate(key string, value string) *_createOperation { + + var tmp map[string]string + if s.v.DynamicTemplates == nil { + s.v.DynamicTemplates = make(map[string]string) + } else { + tmp = s.v.DynamicTemplates + } + + tmp[key] = value + + s.v.DynamicTemplates = tmp + return s +} + +// The document ID. +func (s *_createOperation) Id_(id string) *_createOperation { + + s.v.Id_ = &id + + return s +} + +func (s *_createOperation) IfPrimaryTerm(ifprimaryterm int64) *_createOperation { + + s.v.IfPrimaryTerm = &ifprimaryterm + + return s +} + +func (s *_createOperation) IfSeqNo(sequencenumber int64) *_createOperation { + + s.v.IfSeqNo = &sequencenumber + + return s +} + +// The name of the index or index alias to perform the action on. +func (s *_createOperation) Index_(indexname string) *_createOperation { + + s.v.Index_ = &indexname + + return s +} + +// The ID of the pipeline to use to preprocess incoming documents. +// If the index has a default ingest pipeline specified, setting the value to +// `_none` turns off the default ingest pipeline for this request. +// If a final pipeline is configured, it will always run regardless of the value +// of this parameter. +func (s *_createOperation) Pipeline(pipeline string) *_createOperation { + + s.v.Pipeline = &pipeline + + return s +} + +// If `true`, the request's actions must target an index alias. +func (s *_createOperation) RequireAlias(requirealias bool) *_createOperation { + + s.v.RequireAlias = &requirealias + + return s +} + +// A custom value used to route operations to a specific shard. +func (s *_createOperation) Routing(routing string) *_createOperation { + + s.v.Routing = &routing + + return s +} + +func (s *_createOperation) Version(versionnumber int64) *_createOperation { + + s.v.Version = &versionnumber + + return s +} + +func (s *_createOperation) VersionType(versiontype versiontype.VersionType) *_createOperation { + + s.v.VersionType = &versiontype + return s +} + +func (s *_createOperation) OperationContainerCaster() *types.OperationContainer { + container := types.NewOperationContainer() + + container.Create = s.v + + return container +} + +func (s *_createOperation) CreateOperationCaster() *types.CreateOperation { + return s.v +} diff --git a/typedapi/esdsl/csvprocessor.go b/typedapi/esdsl/csvprocessor.go new file mode 100644 index 0000000000..a74eafb16f --- /dev/null +++ b/typedapi/esdsl/csvprocessor.go @@ -0,0 +1,156 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _csvProcessor struct { + v *types.CsvProcessor +} + +// Extracts fields from CSV line out of a single text field within a document. +// Any empty field in CSV will be skipped. +func NewCsvProcessor() *_csvProcessor { + + return &_csvProcessor{v: types.NewCsvProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_csvProcessor) Description(description string) *_csvProcessor { + + s.v.Description = &description + + return s +} + +// Value used to fill empty fields. +// Empty fields are skipped if this is not provided. +// An empty field is one with no value (2 consecutive separators) or empty +// quotes (`""`). +func (s *_csvProcessor) EmptyValue(emptyvalue json.RawMessage) *_csvProcessor { + + s.v.EmptyValue = emptyvalue + + return s +} + +// The field to extract data from. +func (s *_csvProcessor) Field(field string) *_csvProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_csvProcessor) If(if_ types.ScriptVariant) *_csvProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_csvProcessor) IgnoreFailure(ignorefailure bool) *_csvProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist, the processor quietly exits without +// modifying the document. +func (s *_csvProcessor) IgnoreMissing(ignoremissing bool) *_csvProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_csvProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_csvProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Quote used in CSV, has to be single character string. +func (s *_csvProcessor) Quote(quote string) *_csvProcessor { + + s.v.Quote = "e + + return s +} + +// Separator used in CSV, has to be single character string. +func (s *_csvProcessor) Separator(separator string) *_csvProcessor { + + s.v.Separator = &separator + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_csvProcessor) Tag(tag string) *_csvProcessor { + + s.v.Tag = &tag + + return s +} + +// The array of fields to assign extracted values to. +func (s *_csvProcessor) TargetFields(fields ...string) *_csvProcessor { + + s.v.TargetFields = fields + + return s +} + +// Trim whitespaces in unquoted fields. +func (s *_csvProcessor) Trim(trim bool) *_csvProcessor { + + s.v.Trim = &trim + + return s +} + +func (s *_csvProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Csv = s.v + + return container +} + +func (s *_csvProcessor) CsvProcessorCaster() *types.CsvProcessor { + return s.v +} diff --git a/typedapi/esdsl/cumulativecardinalityaggregation.go b/typedapi/esdsl/cumulativecardinalityaggregation.go new file mode 100644 index 0000000000..c5cb031e67 --- /dev/null +++ b/typedapi/esdsl/cumulativecardinalityaggregation.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _cumulativeCardinalityAggregation struct { + v *types.CumulativeCardinalityAggregation +} + +// A parent pipeline aggregation which calculates the cumulative cardinality in +// a parent `histogram` or `date_histogram` aggregation. +func NewCumulativeCardinalityAggregation() *_cumulativeCardinalityAggregation { + + return &_cumulativeCardinalityAggregation{v: types.NewCumulativeCardinalityAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_cumulativeCardinalityAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_cumulativeCardinalityAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_cumulativeCardinalityAggregation) Format(format string) *_cumulativeCardinalityAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_cumulativeCardinalityAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_cumulativeCardinalityAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +func (s *_cumulativeCardinalityAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.CumulativeCardinality = s.v + + return container +} + +func (s *_cumulativeCardinalityAggregation) CumulativeCardinalityAggregationCaster() *types.CumulativeCardinalityAggregation { + return s.v +} diff --git a/typedapi/esdsl/cumulativesumaggregation.go b/typedapi/esdsl/cumulativesumaggregation.go new file mode 100644 index 0000000000..6edbb6a19d --- /dev/null +++ b/typedapi/esdsl/cumulativesumaggregation.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _cumulativeSumAggregation struct { + v *types.CumulativeSumAggregation +} + +// A parent pipeline aggregation which calculates the cumulative sum of a +// specified metric in a parent `histogram` or `date_histogram` aggregation. +func NewCumulativeSumAggregation() *_cumulativeSumAggregation { + + return &_cumulativeSumAggregation{v: types.NewCumulativeSumAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_cumulativeSumAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_cumulativeSumAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_cumulativeSumAggregation) Format(format string) *_cumulativeSumAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_cumulativeSumAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_cumulativeSumAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +func (s *_cumulativeSumAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.CumulativeSum = s.v + + return container +} + +func (s *_cumulativeSumAggregation) CumulativeSumAggregationCaster() *types.CumulativeSumAggregation { + return s.v +} diff --git a/typedapi/esdsl/customanalyzer.go b/typedapi/esdsl/customanalyzer.go new file mode 100644 index 0000000000..9919a92131 --- /dev/null +++ b/typedapi/esdsl/customanalyzer.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _customAnalyzer struct { + v *types.CustomAnalyzer +} + +func NewCustomAnalyzer(tokenizer string) *_customAnalyzer { + + tmp := &_customAnalyzer{v: types.NewCustomAnalyzer()} + + tmp.Tokenizer(tokenizer) + + return tmp + +} + +func (s *_customAnalyzer) CharFilter(charfilters ...string) *_customAnalyzer { + + s.v.CharFilter = make([]string, len(charfilters)) + s.v.CharFilter = charfilters + + return s +} + +func (s *_customAnalyzer) Filter(filters ...string) *_customAnalyzer { + + s.v.Filter = make([]string, len(filters)) + s.v.Filter = filters + + return s +} + +func (s *_customAnalyzer) PositionIncrementGap(positionincrementgap int) *_customAnalyzer { + + s.v.PositionIncrementGap = &positionincrementgap + + return s +} + +func (s *_customAnalyzer) PositionOffsetGap(positionoffsetgap int) *_customAnalyzer { + + s.v.PositionOffsetGap = &positionoffsetgap + + return s +} + +func (s *_customAnalyzer) Tokenizer(tokenizer string) *_customAnalyzer { + + s.v.Tokenizer = tokenizer + + return s +} + +func (s *_customAnalyzer) CustomAnalyzerCaster() *types.CustomAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/customcategorizetextanalyzer.go b/typedapi/esdsl/customcategorizetextanalyzer.go new file mode 100644 index 0000000000..22ca53541d --- /dev/null +++ b/typedapi/esdsl/customcategorizetextanalyzer.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _customCategorizeTextAnalyzer struct { + v *types.CustomCategorizeTextAnalyzer +} + +func NewCustomCategorizeTextAnalyzer() *_customCategorizeTextAnalyzer { + + return &_customCategorizeTextAnalyzer{v: types.NewCustomCategorizeTextAnalyzer()} + +} + +func (s *_customCategorizeTextAnalyzer) CharFilter(charfilters ...string) *_customCategorizeTextAnalyzer { + + for _, v := range charfilters { + + s.v.CharFilter = append(s.v.CharFilter, v) + + } + return s +} + +func (s *_customCategorizeTextAnalyzer) Filter(filters ...string) *_customCategorizeTextAnalyzer { + + for _, v := range filters { + + s.v.Filter = append(s.v.Filter, v) + + } + return s +} + +func (s *_customCategorizeTextAnalyzer) Tokenizer(tokenizer string) *_customCategorizeTextAnalyzer { + + s.v.Tokenizer = &tokenizer + + return s +} + +func (s *_customCategorizeTextAnalyzer) CustomCategorizeTextAnalyzerCaster() *types.CustomCategorizeTextAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/customnormalizer.go b/typedapi/esdsl/customnormalizer.go new file mode 100644 index 0000000000..3483798701 --- /dev/null +++ b/typedapi/esdsl/customnormalizer.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _customNormalizer struct { + v *types.CustomNormalizer +} + +func NewCustomNormalizer() *_customNormalizer { + + return &_customNormalizer{v: types.NewCustomNormalizer()} + +} + +func (s *_customNormalizer) CharFilter(charfilters ...string) *_customNormalizer { + + for _, v := range charfilters { + + s.v.CharFilter = append(s.v.CharFilter, v) + + } + return s +} + +func (s *_customNormalizer) Filter(filters ...string) *_customNormalizer { + + for _, v := range filters { + + s.v.Filter = append(s.v.Filter, v) + + } + return s +} + +func (s *_customNormalizer) CustomNormalizerCaster() *types.CustomNormalizer { + return s.v +} diff --git a/typedapi/esdsl/czechanalyzer.go b/typedapi/esdsl/czechanalyzer.go new file mode 100644 index 0000000000..26ec919cca --- /dev/null +++ b/typedapi/esdsl/czechanalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _czechAnalyzer struct { + v *types.CzechAnalyzer +} + +func NewCzechAnalyzer() *_czechAnalyzer { + + return &_czechAnalyzer{v: types.NewCzechAnalyzer()} + +} + +func (s *_czechAnalyzer) StemExclusion(stemexclusions ...string) *_czechAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_czechAnalyzer) Stopwords(stopwords ...string) *_czechAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_czechAnalyzer) StopwordsPath(stopwordspath string) *_czechAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_czechAnalyzer) CzechAnalyzerCaster() *types.CzechAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/dailyschedule.go b/typedapi/esdsl/dailyschedule.go new file mode 100644 index 0000000000..2621d47308 --- /dev/null +++ b/typedapi/esdsl/dailyschedule.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dailySchedule struct { + v *types.DailySchedule +} + +func NewDailySchedule() *_dailySchedule { + + return &_dailySchedule{v: types.NewDailySchedule()} + +} + +func (s *_dailySchedule) At(ats ...types.ScheduleTimeOfDayVariant) *_dailySchedule { + + for _, v := range ats { + + s.v.At = append(s.v.At, *v.ScheduleTimeOfDayCaster()) + + } + return s +} + +func (s *_dailySchedule) ScheduleContainerCaster() *types.ScheduleContainer { + container := types.NewScheduleContainer() + + container.Daily = s.v + + return container +} + +func (s *_dailySchedule) DailyScheduleCaster() *types.DailySchedule { + return s.v +} diff --git a/typedapi/esdsl/danishanalyzer.go b/typedapi/esdsl/danishanalyzer.go new file mode 100644 index 0000000000..4e1f4169cb --- /dev/null +++ b/typedapi/esdsl/danishanalyzer.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _danishAnalyzer struct { + v *types.DanishAnalyzer +} + +func NewDanishAnalyzer() *_danishAnalyzer { + + return &_danishAnalyzer{v: types.NewDanishAnalyzer()} + +} + +func (s *_danishAnalyzer) Stopwords(stopwords ...string) *_danishAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_danishAnalyzer) StopwordsPath(stopwordspath string) *_danishAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_danishAnalyzer) DanishAnalyzerCaster() *types.DanishAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/databaseconfiguration.go b/typedapi/esdsl/databaseconfiguration.go new file mode 100644 index 0000000000..1557f68361 --- /dev/null +++ b/typedapi/esdsl/databaseconfiguration.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _databaseConfiguration struct { + v *types.DatabaseConfiguration +} + +func NewDatabaseConfiguration() *_databaseConfiguration { + return &_databaseConfiguration{v: types.NewDatabaseConfiguration()} +} + +// AdditionalDatabaseConfigurationProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_databaseConfiguration) AdditionalDatabaseConfigurationProperty(key string, value json.RawMessage) *_databaseConfiguration { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalDatabaseConfigurationProperty = tmp + return s +} + +func (s *_databaseConfiguration) Ipinfo(ipinfo types.IpinfoVariant) *_databaseConfiguration { + + s.v.Ipinfo = ipinfo.IpinfoCaster() + + return s +} + +func (s *_databaseConfiguration) Maxmind(maxmind types.MaxmindVariant) *_databaseConfiguration { + + s.v.Maxmind = maxmind.MaxmindCaster() + + return s +} + +// The provider-assigned name of the IP geolocation database to download. +func (s *_databaseConfiguration) Name(name string) *_databaseConfiguration { + + s.v.Name = name + + return s +} + +func (s *_databaseConfiguration) DatabaseConfigurationCaster() *types.DatabaseConfiguration { + return s.v +} diff --git a/typedapi/esdsl/datadescription.go b/typedapi/esdsl/datadescription.go new file mode 100644 index 0000000000..f8f9643e25 --- /dev/null +++ b/typedapi/esdsl/datadescription.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataDescription struct { + v *types.DataDescription +} + +func NewDataDescription() *_dataDescription { + + return &_dataDescription{v: types.NewDataDescription()} + +} + +func (s *_dataDescription) FieldDelimiter(fielddelimiter string) *_dataDescription { + + s.v.FieldDelimiter = &fielddelimiter + + return s +} + +// Only JSON format is supported at this time. +func (s *_dataDescription) Format(format string) *_dataDescription { + + s.v.Format = &format + + return s +} + +// The name of the field that contains the timestamp. +func (s *_dataDescription) TimeField(field string) *_dataDescription { + + s.v.TimeField = &field + + return s +} + +// The time format, which can be `epoch`, `epoch_ms`, or a custom pattern. The +// value `epoch` refers to UNIX or Epoch time (the number of seconds since 1 Jan +// 1970). The value `epoch_ms` indicates that time is measured in milliseconds +// since the epoch. The `epoch` and `epoch_ms` time formats accept either +// integer or real values. Custom patterns must conform to the Java +// DateTimeFormatter class. When you use date-time formatting patterns, it is +// recommended that you provide the full date, time and time zone. For example: +// `yyyy-MM-dd'T'HH:mm:ssX`. If the pattern that you specify is not sufficient +// to produce a complete timestamp, job creation fails. +func (s *_dataDescription) TimeFormat(timeformat string) *_dataDescription { + + s.v.TimeFormat = &timeformat + + return s +} + +func (s *_dataDescription) DataDescriptionCaster() *types.DataDescription { + return s.v +} diff --git a/typedapi/esdsl/dataemailattachment.go b/typedapi/esdsl/dataemailattachment.go new file mode 100644 index 0000000000..1fa36d5540 --- /dev/null +++ b/typedapi/esdsl/dataemailattachment.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dataattachmentformat" +) + +type _dataEmailAttachment struct { + v *types.DataEmailAttachment +} + +func NewDataEmailAttachment() *_dataEmailAttachment { + + return &_dataEmailAttachment{v: types.NewDataEmailAttachment()} + +} + +func (s *_dataEmailAttachment) Format(format dataattachmentformat.DataAttachmentFormat) *_dataEmailAttachment { + + s.v.Format = &format + return s +} + +func (s *_dataEmailAttachment) EmailAttachmentContainerCaster() *types.EmailAttachmentContainer { + container := types.NewEmailAttachmentContainer() + + container.Data = s.v + + return container +} + +func (s *_dataEmailAttachment) DataEmailAttachmentCaster() *types.DataEmailAttachment { + return s.v +} diff --git a/typedapi/esdsl/datafeedconfig.go b/typedapi/esdsl/datafeedconfig.go new file mode 100644 index 0000000000..cbec7339c3 --- /dev/null +++ b/typedapi/esdsl/datafeedconfig.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _datafeedConfig struct { + v *types.DatafeedConfig +} + +func NewDatafeedConfig() *_datafeedConfig { + + return &_datafeedConfig{v: types.NewDatafeedConfig()} + +} + +// If set, the datafeed performs aggregation searches. Support for aggregations +// is limited and should be used only with low cardinality data. +func (s *_datafeedConfig) Aggregations(aggregations map[string]types.Aggregations) *_datafeedConfig { + + s.v.Aggregations = aggregations + return s +} + +func (s *_datafeedConfig) AddAggregation(key string, value types.AggregationsVariant) *_datafeedConfig { + + var tmp map[string]types.Aggregations + if s.v.Aggregations == nil { + s.v.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = s.v.Aggregations + } + + tmp[key] = *value.AggregationsCaster() + + s.v.Aggregations = tmp + return s +} + +// Datafeeds might be required to search over long time periods, for several +// months or years. This search is split into time chunks in order to ensure the +// load on Elasticsearch is managed. Chunking configuration controls how the +// size of these time chunks are calculated and is an advanced configuration +// option. +func (s *_datafeedConfig) ChunkingConfig(chunkingconfig types.ChunkingConfigVariant) *_datafeedConfig { + + s.v.ChunkingConfig = chunkingconfig.ChunkingConfigCaster() + + return s +} + +// A numerical character string that uniquely identifies the datafeed. This +// identifier can contain lowercase alphanumeric characters (a-z and 0-9), +// hyphens, and underscores. It must start and end with alphanumeric characters. +// The default value is the job identifier. +func (s *_datafeedConfig) DatafeedId(id string) *_datafeedConfig { + + s.v.DatafeedId = &id + + return s +} + +// Specifies whether the datafeed checks for missing data and the size of the +// window. The datafeed can optionally search over indices that have already +// been read in an effort to determine whether any data has subsequently been +// added to the index. If missing data is found, it is a good indication that +// the `query_delay` option is set too low and the data is being indexed after +// the datafeed has passed that moment in time. This check runs only on +// real-time datafeeds. +func (s *_datafeedConfig) DelayedDataCheckConfig(delayeddatacheckconfig types.DelayedDataCheckConfigVariant) *_datafeedConfig { + + s.v.DelayedDataCheckConfig = delayeddatacheckconfig.DelayedDataCheckConfigCaster() + + return s +} + +// The interval at which scheduled queries are made while the datafeed runs in +// real time. The default value is either the bucket span for short bucket +// spans, or, for longer bucket spans, a sensible fraction of the bucket span. +// For example: `150s`. When `frequency` is shorter than the bucket span, +// interim results for the last (partial) bucket are written then eventually +// overwritten by the full bucket results. If the datafeed uses aggregations, +// this value must be divisible by the interval of the date histogram +// aggregation. +func (s *_datafeedConfig) Frequency(duration types.DurationVariant) *_datafeedConfig { + + s.v.Frequency = *duration.DurationCaster() + + return s +} + +// An array of index names. Wildcards are supported. If any indices are in +// remote clusters, the machine learning nodes must have the +// `remote_cluster_client` role. +func (s *_datafeedConfig) Indices(indices ...string) *_datafeedConfig { + + s.v.Indices = indices + + return s +} + +// Specifies index expansion options that are used during search. +func (s *_datafeedConfig) IndicesOptions(indicesoptions types.IndicesOptionsVariant) *_datafeedConfig { + + s.v.IndicesOptions = indicesoptions.IndicesOptionsCaster() + + return s +} + +func (s *_datafeedConfig) JobId(id string) *_datafeedConfig { + + s.v.JobId = &id + + return s +} + +// If a real-time datafeed has never seen any data (including during any initial +// training period) then it will automatically stop itself and close its +// associated job after this many real-time searches that return no documents. +// In other words, it will stop after `frequency` times `max_empty_searches` of +// real-time operation. If not set then a datafeed with no end time that sees no +// data will remain started until it is explicitly stopped. +func (s *_datafeedConfig) MaxEmptySearches(maxemptysearches int) *_datafeedConfig { + + s.v.MaxEmptySearches = &maxemptysearches + + return s +} + +// The Elasticsearch query domain-specific language (DSL). This value +// corresponds to the query object in an Elasticsearch search POST body. All the +// options that are supported by Elasticsearch can be used, as this object is +// passed verbatim to Elasticsearch. +func (s *_datafeedConfig) Query(query types.QueryVariant) *_datafeedConfig { + + s.v.Query = query.QueryCaster() + + return s +} + +// The number of seconds behind real time that data is queried. For example, if +// data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 +// a.m., set this property to 120 seconds. The default value is randomly +// selected between `60s` and `120s`. This randomness improves the query +// performance when there are multiple jobs running on the same node. +func (s *_datafeedConfig) QueryDelay(duration types.DurationVariant) *_datafeedConfig { + + s.v.QueryDelay = *duration.DurationCaster() + + return s +} + +// Specifies runtime fields for the datafeed search. +func (s *_datafeedConfig) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *_datafeedConfig { + + s.v.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() + + return s +} + +// Specifies scripts that evaluate custom expressions and returns script fields +// to the datafeed. The detector configuration objects in a job can contain +// functions that use these script fields. +func (s *_datafeedConfig) ScriptFields(scriptfields map[string]types.ScriptField) *_datafeedConfig { + + s.v.ScriptFields = scriptfields + return s +} + +func (s *_datafeedConfig) AddScriptField(key string, value types.ScriptFieldVariant) *_datafeedConfig { + + var tmp map[string]types.ScriptField + if s.v.ScriptFields == nil { + s.v.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = s.v.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + + s.v.ScriptFields = tmp + return s +} + +// The size parameter that is used in Elasticsearch searches when the datafeed +// does not use aggregations. The maximum value is the value of +// `index.max_result_window`, which is 10,000 by default. +func (s *_datafeedConfig) ScrollSize(scrollsize int) *_datafeedConfig { + + s.v.ScrollSize = &scrollsize + + return s +} + +func (s *_datafeedConfig) DatafeedConfigCaster() *types.DatafeedConfig { + return s.v +} diff --git a/typedapi/esdsl/dataframeanalysisanalyzedfields.go b/typedapi/esdsl/dataframeanalysisanalyzedfields.go new file mode 100644 index 0000000000..6507f490e7 --- /dev/null +++ b/typedapi/esdsl/dataframeanalysisanalyzedfields.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframeAnalysisAnalyzedFields struct { + v *types.DataframeAnalysisAnalyzedFields +} + +func NewDataframeAnalysisAnalyzedFields() *_dataframeAnalysisAnalyzedFields { + + return &_dataframeAnalysisAnalyzedFields{v: types.NewDataframeAnalysisAnalyzedFields()} + +} + +// An array of strings that defines the fields that will be included in the +// analysis. +func (s *_dataframeAnalysisAnalyzedFields) Excludes(excludes ...string) *_dataframeAnalysisAnalyzedFields { + + for _, v := range excludes { + + s.v.Excludes = append(s.v.Excludes, v) + + } + return s +} + +// An array of strings that defines the fields that will be excluded from the +// analysis. You do not need to add fields with unsupported data types to +// excludes, these fields are excluded from the analysis automatically. +func (s *_dataframeAnalysisAnalyzedFields) Includes(includes ...string) *_dataframeAnalysisAnalyzedFields { + + for _, v := range includes { + + s.v.Includes = append(s.v.Includes, v) + + } + return s +} + +func (s *_dataframeAnalysisAnalyzedFields) DataframeAnalysisAnalyzedFieldsCaster() *types.DataframeAnalysisAnalyzedFields { + return s.v +} diff --git a/typedapi/esdsl/dataframeanalysisclassification.go b/typedapi/esdsl/dataframeanalysisclassification.go new file mode 100644 index 0000000000..ecae686f94 --- /dev/null +++ b/typedapi/esdsl/dataframeanalysisclassification.go @@ -0,0 +1,295 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframeAnalysisClassification struct { + v *types.DataframeAnalysisClassification +} + +// The configuration information necessary to perform classification. +func NewDataframeAnalysisClassification(dependentvariable string) *_dataframeAnalysisClassification { + + tmp := &_dataframeAnalysisClassification{v: types.NewDataframeAnalysisClassification()} + + tmp.DependentVariable(dependentvariable) + + return tmp + +} + +// Advanced configuration option. Machine learning uses loss guided tree +// growing, which means that the decision trees grow where the regularized loss +// decreases most quickly. This parameter affects loss calculations by acting as +// a multiplier of the tree depth. Higher alpha values result in shallower trees +// and faster training times. By default, this value is calculated during +// hyperparameter optimization. It must be greater than or equal to zero. +func (s *_dataframeAnalysisClassification) Alpha(alpha types.Float64) *_dataframeAnalysisClassification { + + s.v.Alpha = &alpha + + return s +} + +func (s *_dataframeAnalysisClassification) ClassAssignmentObjective(classassignmentobjective string) *_dataframeAnalysisClassification { + + s.v.ClassAssignmentObjective = &classassignmentobjective + + return s +} + +// Defines which field of the document is to be predicted. It must match one of +// the fields in the index being used to train. If this field is missing from a +// document, then that document will not be used for training, but a prediction +// with the trained model will be generated for it. It is also known as +// continuous target variable. +// For classification analysis, the data type of the field must be numeric +// (`integer`, `short`, `long`, `byte`), categorical (`ip` or `keyword`), or +// `boolean`. There must be no more than 30 different values in this field. +// For regression analysis, the data type of the field must be numeric. +func (s *_dataframeAnalysisClassification) DependentVariable(dependentvariable string) *_dataframeAnalysisClassification { + + s.v.DependentVariable = dependentvariable + + return s +} + +// Advanced configuration option. Controls the fraction of data that is used to +// compute the derivatives of the loss function for tree training. A small value +// results in the use of a small fraction of the data. If this value is set to +// be less than 1, accuracy typically improves. However, too small a value may +// result in poor convergence for the ensemble and so require more trees. By +// default, this value is calculated during hyperparameter optimization. It must +// be greater than zero and less than or equal to 1. +func (s *_dataframeAnalysisClassification) DownsampleFactor(downsamplefactor types.Float64) *_dataframeAnalysisClassification { + + s.v.DownsampleFactor = &downsamplefactor + + return s +} + +// Advanced configuration option. Specifies whether the training process should +// finish if it is not finding any better performing models. If disabled, the +// training process can take significantly longer and the chance of finding a +// better performing model is unremarkable. +func (s *_dataframeAnalysisClassification) EarlyStoppingEnabled(earlystoppingenabled bool) *_dataframeAnalysisClassification { + + s.v.EarlyStoppingEnabled = &earlystoppingenabled + + return s +} + +// Advanced configuration option. The shrinkage applied to the weights. Smaller +// values result in larger forests which have a better generalization error. +// However, larger forests cause slower training. By default, this value is +// calculated during hyperparameter optimization. It must be a value between +// 0.001 and 1. +func (s *_dataframeAnalysisClassification) Eta(eta types.Float64) *_dataframeAnalysisClassification { + + s.v.Eta = &eta + + return s +} + +// Advanced configuration option. Specifies the rate at which `eta` increases +// for each new tree that is added to the forest. For example, a rate of 1.05 +// increases `eta` by 5% for each extra tree. By default, this value is +// calculated during hyperparameter optimization. It must be between 0.5 and 2. +func (s *_dataframeAnalysisClassification) EtaGrowthRatePerTree(etagrowthratepertree types.Float64) *_dataframeAnalysisClassification { + + s.v.EtaGrowthRatePerTree = &etagrowthratepertree + + return s +} + +// Advanced configuration option. Defines the fraction of features that will be +// used when selecting a random bag for each candidate split. By default, this +// value is calculated during hyperparameter optimization. +func (s *_dataframeAnalysisClassification) FeatureBagFraction(featurebagfraction types.Float64) *_dataframeAnalysisClassification { + + s.v.FeatureBagFraction = &featurebagfraction + + return s +} + +// Advanced configuration option. A collection of feature preprocessors that +// modify one or more included fields. The analysis uses the resulting one or +// more features instead of the original document field. However, these features +// are ephemeral; they are not stored in the destination index. Multiple +// `feature_processors` entries can refer to the same document fields. Automatic +// categorical feature encoding still occurs for the fields that are unprocessed +// by a custom processor or that have categorical values. Use this property only +// if you want to override the automatic feature encoding of the specified +// fields. +func (s *_dataframeAnalysisClassification) FeatureProcessors(featureprocessors ...types.DataframeAnalysisFeatureProcessorVariant) *_dataframeAnalysisClassification { + + for _, v := range featureprocessors { + + s.v.FeatureProcessors = append(s.v.FeatureProcessors, *v.DataframeAnalysisFeatureProcessorCaster()) + + } + return s +} + +// Advanced configuration option. Regularization parameter to prevent +// overfitting on the training data set. Multiplies a linear penalty associated +// with the size of individual trees in the forest. A high gamma value causes +// training to prefer small trees. A small gamma value results in larger +// individual trees and slower training. By default, this value is calculated +// during hyperparameter optimization. It must be a nonnegative value. +func (s *_dataframeAnalysisClassification) Gamma(gamma types.Float64) *_dataframeAnalysisClassification { + + s.v.Gamma = &gamma + + return s +} + +// Advanced configuration option. Regularization parameter to prevent +// overfitting on the training data set. Multiplies an L2 regularization term +// which applies to leaf weights of the individual trees in the forest. A high +// lambda value causes training to favor small leaf weights. This behavior makes +// the prediction function smoother at the expense of potentially not being able +// to capture relevant relationships between the features and the dependent +// variable. A small lambda value results in large individual trees and slower +// training. By default, this value is calculated during hyperparameter +// optimization. It must be a nonnegative value. +func (s *_dataframeAnalysisClassification) Lambda(lambda types.Float64) *_dataframeAnalysisClassification { + + s.v.Lambda = &lambda + + return s +} + +// Advanced configuration option. A multiplier responsible for determining the +// maximum number of hyperparameter optimization steps in the Bayesian +// optimization procedure. The maximum number of steps is determined based on +// the number of undefined hyperparameters times the maximum optimization rounds +// per hyperparameter. By default, this value is calculated during +// hyperparameter optimization. +func (s *_dataframeAnalysisClassification) MaxOptimizationRoundsPerHyperparameter(maxoptimizationroundsperhyperparameter int) *_dataframeAnalysisClassification { + + s.v.MaxOptimizationRoundsPerHyperparameter = &maxoptimizationroundsperhyperparameter + + return s +} + +// Advanced configuration option. Defines the maximum number of decision trees +// in the forest. The maximum value is 2000. By default, this value is +// calculated during hyperparameter optimization. +func (s *_dataframeAnalysisClassification) MaxTrees(maxtrees int) *_dataframeAnalysisClassification { + + s.v.MaxTrees = &maxtrees + + return s +} + +// Defines the number of categories for which the predicted probabilities are +// reported. It must be non-negative or -1. If it is -1 or greater than the +// total number of categories, probabilities are reported for all categories; if +// you have a large number of categories, there could be a significant effect on +// the size of your destination index. NOTE: To use the AUC ROC evaluation +// method, `num_top_classes` must be set to -1 or a value greater than or equal +// to the total number of categories. +func (s *_dataframeAnalysisClassification) NumTopClasses(numtopclasses int) *_dataframeAnalysisClassification { + + s.v.NumTopClasses = &numtopclasses + + return s +} + +// Advanced configuration option. Specifies the maximum number of feature +// importance values per document to return. By default, no feature importance +// calculation occurs. +func (s *_dataframeAnalysisClassification) NumTopFeatureImportanceValues(numtopfeatureimportancevalues int) *_dataframeAnalysisClassification { + + s.v.NumTopFeatureImportanceValues = &numtopfeatureimportancevalues + + return s +} + +// Defines the name of the prediction field in the results. Defaults to +// `_prediction`. +func (s *_dataframeAnalysisClassification) PredictionFieldName(field string) *_dataframeAnalysisClassification { + + s.v.PredictionFieldName = &field + + return s +} + +// Defines the seed for the random generator that is used to pick training data. +// By default, it is randomly generated. Set it to a specific value to use the +// same training data each time you start a job (assuming other related +// parameters such as `source` and `analyzed_fields` are the same). +func (s *_dataframeAnalysisClassification) RandomizeSeed(randomizeseed types.Float64) *_dataframeAnalysisClassification { + + s.v.RandomizeSeed = &randomizeseed + + return s +} + +// Advanced configuration option. Machine learning uses loss guided tree +// growing, which means that the decision trees grow where the regularized loss +// decreases most quickly. This soft limit combines with the +// `soft_tree_depth_tolerance` to penalize trees that exceed the specified +// depth; the regularized loss increases quickly beyond this depth. By default, +// this value is calculated during hyperparameter optimization. It must be +// greater than or equal to 0. +func (s *_dataframeAnalysisClassification) SoftTreeDepthLimit(softtreedepthlimit int) *_dataframeAnalysisClassification { + + s.v.SoftTreeDepthLimit = &softtreedepthlimit + + return s +} + +// Advanced configuration option. This option controls how quickly the +// regularized loss increases when the tree depth exceeds +// `soft_tree_depth_limit`. By default, this value is calculated during +// hyperparameter optimization. It must be greater than or equal to 0.01. +func (s *_dataframeAnalysisClassification) SoftTreeDepthTolerance(softtreedepthtolerance types.Float64) *_dataframeAnalysisClassification { + + s.v.SoftTreeDepthTolerance = &softtreedepthtolerance + + return s +} + +// Defines what percentage of the eligible documents that will be used for +// training. Documents that are ignored by the analysis (for example those that +// contain arrays with more than one value) won’t be included in the calculation +// for used percentage. +func (s *_dataframeAnalysisClassification) TrainingPercent(percentage types.PercentageVariant) *_dataframeAnalysisClassification { + + s.v.TrainingPercent = *percentage.PercentageCaster() + + return s +} + +func (s *_dataframeAnalysisClassification) DataframeAnalysisContainerCaster() *types.DataframeAnalysisContainer { + container := types.NewDataframeAnalysisContainer() + + container.Classification = s.v + + return container +} + +func (s *_dataframeAnalysisClassification) DataframeAnalysisClassificationCaster() *types.DataframeAnalysisClassification { + return s.v +} diff --git a/typedapi/esdsl/dataframeanalysiscontainer.go b/typedapi/esdsl/dataframeanalysiscontainer.go new file mode 100644 index 0000000000..7e40bef7e5 --- /dev/null +++ b/typedapi/esdsl/dataframeanalysiscontainer.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _dataframeAnalysisContainer struct { + v *types.DataframeAnalysisContainer +} + +func NewDataframeAnalysisContainer() *_dataframeAnalysisContainer { + return &_dataframeAnalysisContainer{v: types.NewDataframeAnalysisContainer()} +} + +// AdditionalDataframeAnalysisContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_dataframeAnalysisContainer) AdditionalDataframeAnalysisContainerProperty(key string, value json.RawMessage) *_dataframeAnalysisContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalDataframeAnalysisContainerProperty = tmp + return s +} + +// The configuration information necessary to perform classification. +func (s *_dataframeAnalysisContainer) Classification(classification types.DataframeAnalysisClassificationVariant) *_dataframeAnalysisContainer { + + s.v.Classification = classification.DataframeAnalysisClassificationCaster() + + return s +} + +// The configuration information necessary to perform outlier detection. NOTE: +// Advanced parameters are for fine-tuning classification analysis. They are set +// automatically by hyperparameter optimization to give the minimum validation +// error. It is highly recommended to use the default values unless you fully +// understand the function of these parameters. +func (s *_dataframeAnalysisContainer) OutlierDetection(outlierdetection types.DataframeAnalysisOutlierDetectionVariant) *_dataframeAnalysisContainer { + + s.v.OutlierDetection = outlierdetection.DataframeAnalysisOutlierDetectionCaster() + + return s +} + +// The configuration information necessary to perform regression. NOTE: Advanced +// parameters are for fine-tuning regression analysis. They are set +// automatically by hyperparameter optimization to give the minimum validation +// error. It is highly recommended to use the default values unless you fully +// understand the function of these parameters. +func (s *_dataframeAnalysisContainer) Regression(regression types.DataframeAnalysisRegressionVariant) *_dataframeAnalysisContainer { + + s.v.Regression = regression.DataframeAnalysisRegressionCaster() + + return s +} + +func (s *_dataframeAnalysisContainer) DataframeAnalysisContainerCaster() *types.DataframeAnalysisContainer { + return s.v +} diff --git a/typedapi/esdsl/dataframeanalysisfeatureprocessor.go b/typedapi/esdsl/dataframeanalysisfeatureprocessor.go new file mode 100644 index 0000000000..56223b0188 --- /dev/null +++ b/typedapi/esdsl/dataframeanalysisfeatureprocessor.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _dataframeAnalysisFeatureProcessor struct { + v *types.DataframeAnalysisFeatureProcessor +} + +func NewDataframeAnalysisFeatureProcessor() *_dataframeAnalysisFeatureProcessor { + return &_dataframeAnalysisFeatureProcessor{v: types.NewDataframeAnalysisFeatureProcessor()} +} + +// AdditionalDataframeAnalysisFeatureProcessorProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_dataframeAnalysisFeatureProcessor) AdditionalDataframeAnalysisFeatureProcessorProperty(key string, value json.RawMessage) *_dataframeAnalysisFeatureProcessor { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalDataframeAnalysisFeatureProcessorProperty = tmp + return s +} + +// The configuration information necessary to perform frequency encoding. +func (s *_dataframeAnalysisFeatureProcessor) FrequencyEncoding(frequencyencoding types.DataframeAnalysisFeatureProcessorFrequencyEncodingVariant) *_dataframeAnalysisFeatureProcessor { + + s.v.FrequencyEncoding = frequencyencoding.DataframeAnalysisFeatureProcessorFrequencyEncodingCaster() + + return s +} + +// The configuration information necessary to perform multi encoding. It allows +// multiple processors to be changed together. This way the output of a +// processor can then be passed to another as an input. +func (s *_dataframeAnalysisFeatureProcessor) MultiEncoding(multiencoding types.DataframeAnalysisFeatureProcessorMultiEncodingVariant) *_dataframeAnalysisFeatureProcessor { + + s.v.MultiEncoding = multiencoding.DataframeAnalysisFeatureProcessorMultiEncodingCaster() + + return s +} + +// The configuration information necessary to perform n-gram encoding. Features +// created by this encoder have the following name format: +// .. For example, if the feature_prefix +// is f, the feature name for the second unigram in a string is f.11. +func (s *_dataframeAnalysisFeatureProcessor) NGramEncoding(ngramencoding types.DataframeAnalysisFeatureProcessorNGramEncodingVariant) *_dataframeAnalysisFeatureProcessor { + + s.v.NGramEncoding = ngramencoding.DataframeAnalysisFeatureProcessorNGramEncodingCaster() + + return s +} + +// The configuration information necessary to perform one hot encoding. +func (s *_dataframeAnalysisFeatureProcessor) OneHotEncoding(onehotencoding types.DataframeAnalysisFeatureProcessorOneHotEncodingVariant) *_dataframeAnalysisFeatureProcessor { + + s.v.OneHotEncoding = onehotencoding.DataframeAnalysisFeatureProcessorOneHotEncodingCaster() + + return s +} + +// The configuration information necessary to perform target mean encoding. +func (s *_dataframeAnalysisFeatureProcessor) TargetMeanEncoding(targetmeanencoding types.DataframeAnalysisFeatureProcessorTargetMeanEncodingVariant) *_dataframeAnalysisFeatureProcessor { + + s.v.TargetMeanEncoding = targetmeanencoding.DataframeAnalysisFeatureProcessorTargetMeanEncodingCaster() + + return s +} + +func (s *_dataframeAnalysisFeatureProcessor) DataframeAnalysisFeatureProcessorCaster() *types.DataframeAnalysisFeatureProcessor { + return s.v +} diff --git a/typedapi/esdsl/dataframeanalysisfeatureprocessorfrequencyencoding.go b/typedapi/esdsl/dataframeanalysisfeatureprocessorfrequencyencoding.go new file mode 100644 index 0000000000..ca1fa0a33a --- /dev/null +++ b/typedapi/esdsl/dataframeanalysisfeatureprocessorfrequencyencoding.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframeAnalysisFeatureProcessorFrequencyEncoding struct { + v *types.DataframeAnalysisFeatureProcessorFrequencyEncoding +} + +// The configuration information necessary to perform frequency encoding. +func NewDataframeAnalysisFeatureProcessorFrequencyEncoding() *_dataframeAnalysisFeatureProcessorFrequencyEncoding { + + return &_dataframeAnalysisFeatureProcessorFrequencyEncoding{v: types.NewDataframeAnalysisFeatureProcessorFrequencyEncoding()} + +} + +// The resulting feature name. +func (s *_dataframeAnalysisFeatureProcessorFrequencyEncoding) FeatureName(name string) *_dataframeAnalysisFeatureProcessorFrequencyEncoding { + + s.v.FeatureName = name + + return s +} + +func (s *_dataframeAnalysisFeatureProcessorFrequencyEncoding) Field(field string) *_dataframeAnalysisFeatureProcessorFrequencyEncoding { + + s.v.Field = field + + return s +} + +// The resulting frequency map for the field value. If the field value is +// missing from the frequency_map, the resulting value is 0. +func (s *_dataframeAnalysisFeatureProcessorFrequencyEncoding) FrequencyMap(frequencymap map[string]types.Float64) *_dataframeAnalysisFeatureProcessorFrequencyEncoding { + + s.v.FrequencyMap = frequencymap + return s +} + +func (s *_dataframeAnalysisFeatureProcessorFrequencyEncoding) AddFrequencyMap(key string, value types.Float64) *_dataframeAnalysisFeatureProcessorFrequencyEncoding { + + var tmp map[string]types.Float64 + if s.v.FrequencyMap == nil { + s.v.FrequencyMap = make(map[string]types.Float64) + } else { + tmp = s.v.FrequencyMap + } + + tmp[key] = value + + s.v.FrequencyMap = tmp + return s +} + +func (s *_dataframeAnalysisFeatureProcessorFrequencyEncoding) DataframeAnalysisFeatureProcessorCaster() *types.DataframeAnalysisFeatureProcessor { + container := types.NewDataframeAnalysisFeatureProcessor() + + container.FrequencyEncoding = s.v + + return container +} + +func (s *_dataframeAnalysisFeatureProcessorFrequencyEncoding) DataframeAnalysisFeatureProcessorFrequencyEncodingCaster() *types.DataframeAnalysisFeatureProcessorFrequencyEncoding { + return s.v +} diff --git a/typedapi/esdsl/dataframeanalysisfeatureprocessormultiencoding.go b/typedapi/esdsl/dataframeanalysisfeatureprocessormultiencoding.go new file mode 100644 index 0000000000..f112acda05 --- /dev/null +++ b/typedapi/esdsl/dataframeanalysisfeatureprocessormultiencoding.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframeAnalysisFeatureProcessorMultiEncoding struct { + v *types.DataframeAnalysisFeatureProcessorMultiEncoding +} + +// The configuration information necessary to perform multi encoding. It allows +// multiple processors to be changed together. This way the output of a +// processor can then be passed to another as an input. +func NewDataframeAnalysisFeatureProcessorMultiEncoding() *_dataframeAnalysisFeatureProcessorMultiEncoding { + + return &_dataframeAnalysisFeatureProcessorMultiEncoding{v: types.NewDataframeAnalysisFeatureProcessorMultiEncoding()} + +} + +// The ordered array of custom processors to execute. Must be more than 1. +func (s *_dataframeAnalysisFeatureProcessorMultiEncoding) Processors(processors ...int) *_dataframeAnalysisFeatureProcessorMultiEncoding { + + for _, v := range processors { + + s.v.Processors = append(s.v.Processors, v) + + } + return s +} + +func (s *_dataframeAnalysisFeatureProcessorMultiEncoding) DataframeAnalysisFeatureProcessorCaster() *types.DataframeAnalysisFeatureProcessor { + container := types.NewDataframeAnalysisFeatureProcessor() + + container.MultiEncoding = s.v + + return container +} + +func (s *_dataframeAnalysisFeatureProcessorMultiEncoding) DataframeAnalysisFeatureProcessorMultiEncodingCaster() *types.DataframeAnalysisFeatureProcessorMultiEncoding { + return s.v +} diff --git a/typedapi/esdsl/dataframeanalysisfeatureprocessorngramencoding.go b/typedapi/esdsl/dataframeanalysisfeatureprocessorngramencoding.go new file mode 100644 index 0000000000..75daea82c8 --- /dev/null +++ b/typedapi/esdsl/dataframeanalysisfeatureprocessorngramencoding.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframeAnalysisFeatureProcessorNGramEncoding struct { + v *types.DataframeAnalysisFeatureProcessorNGramEncoding +} + +// The configuration information necessary to perform n-gram encoding. Features +// created by this encoder have the following name format: +// .. For example, if the feature_prefix +// is f, the feature name for the second unigram in a string is f.11. +func NewDataframeAnalysisFeatureProcessorNGramEncoding() *_dataframeAnalysisFeatureProcessorNGramEncoding { + + return &_dataframeAnalysisFeatureProcessorNGramEncoding{v: types.NewDataframeAnalysisFeatureProcessorNGramEncoding()} + +} + +func (s *_dataframeAnalysisFeatureProcessorNGramEncoding) Custom(custom bool) *_dataframeAnalysisFeatureProcessorNGramEncoding { + + s.v.Custom = &custom + + return s +} + +// The feature name prefix. Defaults to ngram__. +func (s *_dataframeAnalysisFeatureProcessorNGramEncoding) FeaturePrefix(featureprefix string) *_dataframeAnalysisFeatureProcessorNGramEncoding { + + s.v.FeaturePrefix = &featureprefix + + return s +} + +// The name of the text field to encode. +func (s *_dataframeAnalysisFeatureProcessorNGramEncoding) Field(field string) *_dataframeAnalysisFeatureProcessorNGramEncoding { + + s.v.Field = field + + return s +} + +// Specifies the length of the n-gram substring. Defaults to 50. Must be greater +// than 0. +func (s *_dataframeAnalysisFeatureProcessorNGramEncoding) Length(length int) *_dataframeAnalysisFeatureProcessorNGramEncoding { + + s.v.Length = &length + + return s +} + +// Specifies which n-grams to gather. It’s an array of integer values where the +// minimum value is 1, and a maximum value is 5. +func (s *_dataframeAnalysisFeatureProcessorNGramEncoding) NGrams(ngrams ...int) *_dataframeAnalysisFeatureProcessorNGramEncoding { + + for _, v := range ngrams { + + s.v.NGrams = append(s.v.NGrams, v) + + } + return s +} + +// Specifies the zero-indexed start of the n-gram substring. Negative values are +// allowed for encoding n-grams of string suffixes. Defaults to 0. +func (s *_dataframeAnalysisFeatureProcessorNGramEncoding) Start(start int) *_dataframeAnalysisFeatureProcessorNGramEncoding { + + s.v.Start = &start + + return s +} + +func (s *_dataframeAnalysisFeatureProcessorNGramEncoding) DataframeAnalysisFeatureProcessorCaster() *types.DataframeAnalysisFeatureProcessor { + container := types.NewDataframeAnalysisFeatureProcessor() + + container.NGramEncoding = s.v + + return container +} + +func (s *_dataframeAnalysisFeatureProcessorNGramEncoding) DataframeAnalysisFeatureProcessorNGramEncodingCaster() *types.DataframeAnalysisFeatureProcessorNGramEncoding { + return s.v +} diff --git a/typedapi/esdsl/dataframeanalysisfeatureprocessoronehotencoding.go b/typedapi/esdsl/dataframeanalysisfeatureprocessoronehotencoding.go new file mode 100644 index 0000000000..559087ba63 --- /dev/null +++ b/typedapi/esdsl/dataframeanalysisfeatureprocessoronehotencoding.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframeAnalysisFeatureProcessorOneHotEncoding struct { + v *types.DataframeAnalysisFeatureProcessorOneHotEncoding +} + +// The configuration information necessary to perform one hot encoding. +func NewDataframeAnalysisFeatureProcessorOneHotEncoding(hotmap string) *_dataframeAnalysisFeatureProcessorOneHotEncoding { + + tmp := &_dataframeAnalysisFeatureProcessorOneHotEncoding{v: types.NewDataframeAnalysisFeatureProcessorOneHotEncoding()} + + tmp.HotMap(hotmap) + + return tmp + +} + +// The name of the field to encode. +func (s *_dataframeAnalysisFeatureProcessorOneHotEncoding) Field(field string) *_dataframeAnalysisFeatureProcessorOneHotEncoding { + + s.v.Field = field + + return s +} + +// The one hot map mapping the field value with the column name. +func (s *_dataframeAnalysisFeatureProcessorOneHotEncoding) HotMap(hotmap string) *_dataframeAnalysisFeatureProcessorOneHotEncoding { + + s.v.HotMap = hotmap + + return s +} + +func (s *_dataframeAnalysisFeatureProcessorOneHotEncoding) DataframeAnalysisFeatureProcessorCaster() *types.DataframeAnalysisFeatureProcessor { + container := types.NewDataframeAnalysisFeatureProcessor() + + container.OneHotEncoding = s.v + + return container +} + +func (s *_dataframeAnalysisFeatureProcessorOneHotEncoding) DataframeAnalysisFeatureProcessorOneHotEncodingCaster() *types.DataframeAnalysisFeatureProcessorOneHotEncoding { + return s.v +} diff --git a/typedapi/esdsl/dataframeanalysisfeatureprocessortargetmeanencoding.go b/typedapi/esdsl/dataframeanalysisfeatureprocessortargetmeanencoding.go new file mode 100644 index 0000000000..c8e4cda010 --- /dev/null +++ b/typedapi/esdsl/dataframeanalysisfeatureprocessortargetmeanencoding.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _dataframeAnalysisFeatureProcessorTargetMeanEncoding struct { + v *types.DataframeAnalysisFeatureProcessorTargetMeanEncoding +} + +// The configuration information necessary to perform target mean encoding. +func NewDataframeAnalysisFeatureProcessorTargetMeanEncoding(defaultvalue int) *_dataframeAnalysisFeatureProcessorTargetMeanEncoding { + + tmp := &_dataframeAnalysisFeatureProcessorTargetMeanEncoding{v: types.NewDataframeAnalysisFeatureProcessorTargetMeanEncoding()} + + tmp.DefaultValue(defaultvalue) + + return tmp + +} + +// The default value if field value is not found in the target_map. +func (s *_dataframeAnalysisFeatureProcessorTargetMeanEncoding) DefaultValue(defaultvalue int) *_dataframeAnalysisFeatureProcessorTargetMeanEncoding { + + s.v.DefaultValue = defaultvalue + + return s +} + +// The resulting feature name. +func (s *_dataframeAnalysisFeatureProcessorTargetMeanEncoding) FeatureName(name string) *_dataframeAnalysisFeatureProcessorTargetMeanEncoding { + + s.v.FeatureName = name + + return s +} + +// The name of the field to encode. +func (s *_dataframeAnalysisFeatureProcessorTargetMeanEncoding) Field(field string) *_dataframeAnalysisFeatureProcessorTargetMeanEncoding { + + s.v.Field = field + + return s +} + +// The field value to target mean transition map. +func (s *_dataframeAnalysisFeatureProcessorTargetMeanEncoding) TargetMap(targetmap map[string]json.RawMessage) *_dataframeAnalysisFeatureProcessorTargetMeanEncoding { + + s.v.TargetMap = targetmap + return s +} + +func (s *_dataframeAnalysisFeatureProcessorTargetMeanEncoding) AddTargetMap(key string, value json.RawMessage) *_dataframeAnalysisFeatureProcessorTargetMeanEncoding { + + var tmp map[string]json.RawMessage + if s.v.TargetMap == nil { + s.v.TargetMap = make(map[string]json.RawMessage) + } else { + tmp = s.v.TargetMap + } + + tmp[key] = value + + s.v.TargetMap = tmp + return s +} + +func (s *_dataframeAnalysisFeatureProcessorTargetMeanEncoding) DataframeAnalysisFeatureProcessorCaster() *types.DataframeAnalysisFeatureProcessor { + container := types.NewDataframeAnalysisFeatureProcessor() + + container.TargetMeanEncoding = s.v + + return container +} + +func (s *_dataframeAnalysisFeatureProcessorTargetMeanEncoding) DataframeAnalysisFeatureProcessorTargetMeanEncodingCaster() *types.DataframeAnalysisFeatureProcessorTargetMeanEncoding { + return s.v +} diff --git a/typedapi/esdsl/dataframeanalysisoutlierdetection.go b/typedapi/esdsl/dataframeanalysisoutlierdetection.go new file mode 100644 index 0000000000..d7a1f1ec45 --- /dev/null +++ b/typedapi/esdsl/dataframeanalysisoutlierdetection.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframeAnalysisOutlierDetection struct { + v *types.DataframeAnalysisOutlierDetection +} + +// The configuration information necessary to perform outlier detection. NOTE: +// Advanced parameters are for fine-tuning classification analysis. They are set +// automatically by hyperparameter optimization to give the minimum validation +// error. It is highly recommended to use the default values unless you fully +// understand the function of these parameters. +func NewDataframeAnalysisOutlierDetection() *_dataframeAnalysisOutlierDetection { + + return &_dataframeAnalysisOutlierDetection{v: types.NewDataframeAnalysisOutlierDetection()} + +} + +// Specifies whether the feature influence calculation is enabled. +func (s *_dataframeAnalysisOutlierDetection) ComputeFeatureInfluence(computefeatureinfluence bool) *_dataframeAnalysisOutlierDetection { + + s.v.ComputeFeatureInfluence = &computefeatureinfluence + + return s +} + +// The minimum outlier score that a document needs to have in order to calculate +// its feature influence score. Value range: 0-1. +func (s *_dataframeAnalysisOutlierDetection) FeatureInfluenceThreshold(featureinfluencethreshold types.Float64) *_dataframeAnalysisOutlierDetection { + + s.v.FeatureInfluenceThreshold = &featureinfluencethreshold + + return s +} + +// The method that outlier detection uses. Available methods are `lof`, `ldof`, +// `distance_kth_nn`, `distance_knn`, and `ensemble`. The default value is +// ensemble, which means that outlier detection uses an ensemble of different +// methods and normalises and combines their individual outlier scores to obtain +// the overall outlier score. +func (s *_dataframeAnalysisOutlierDetection) Method(method string) *_dataframeAnalysisOutlierDetection { + + s.v.Method = &method + + return s +} + +// Defines the value for how many nearest neighbors each method of outlier +// detection uses to calculate its outlier score. When the value is not set, +// different values are used for different ensemble members. This default +// behavior helps improve the diversity in the ensemble; only override it if you +// are confident that the value you choose is appropriate for the data set. +func (s *_dataframeAnalysisOutlierDetection) NNeighbors(nneighbors int) *_dataframeAnalysisOutlierDetection { + + s.v.NNeighbors = &nneighbors + + return s +} + +// The proportion of the data set that is assumed to be outlying prior to +// outlier detection. For example, 0.05 means it is assumed that 5% of values +// are real outliers and 95% are inliers. +func (s *_dataframeAnalysisOutlierDetection) OutlierFraction(outlierfraction types.Float64) *_dataframeAnalysisOutlierDetection { + + s.v.OutlierFraction = &outlierfraction + + return s +} + +// If true, the following operation is performed on the columns before computing +// outlier scores: `(x_i - mean(x_i)) / sd(x_i)`. +func (s *_dataframeAnalysisOutlierDetection) StandardizationEnabled(standardizationenabled bool) *_dataframeAnalysisOutlierDetection { + + s.v.StandardizationEnabled = &standardizationenabled + + return s +} + +func (s *_dataframeAnalysisOutlierDetection) DataframeAnalysisContainerCaster() *types.DataframeAnalysisContainer { + container := types.NewDataframeAnalysisContainer() + + container.OutlierDetection = s.v + + return container +} + +func (s *_dataframeAnalysisOutlierDetection) DataframeAnalysisOutlierDetectionCaster() *types.DataframeAnalysisOutlierDetection { + return s.v +} diff --git a/typedapi/esdsl/dataframeanalysisregression.go b/typedapi/esdsl/dataframeanalysisregression.go new file mode 100644 index 0000000000..83c95f403a --- /dev/null +++ b/typedapi/esdsl/dataframeanalysisregression.go @@ -0,0 +1,296 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframeAnalysisRegression struct { + v *types.DataframeAnalysisRegression +} + +// The configuration information necessary to perform regression. NOTE: Advanced +// parameters are for fine-tuning regression analysis. They are set +// automatically by hyperparameter optimization to give the minimum validation +// error. It is highly recommended to use the default values unless you fully +// understand the function of these parameters. +func NewDataframeAnalysisRegression(dependentvariable string) *_dataframeAnalysisRegression { + + tmp := &_dataframeAnalysisRegression{v: types.NewDataframeAnalysisRegression()} + + tmp.DependentVariable(dependentvariable) + + return tmp + +} + +// Advanced configuration option. Machine learning uses loss guided tree +// growing, which means that the decision trees grow where the regularized loss +// decreases most quickly. This parameter affects loss calculations by acting as +// a multiplier of the tree depth. Higher alpha values result in shallower trees +// and faster training times. By default, this value is calculated during +// hyperparameter optimization. It must be greater than or equal to zero. +func (s *_dataframeAnalysisRegression) Alpha(alpha types.Float64) *_dataframeAnalysisRegression { + + s.v.Alpha = &alpha + + return s +} + +// Defines which field of the document is to be predicted. It must match one of +// the fields in the index being used to train. If this field is missing from a +// document, then that document will not be used for training, but a prediction +// with the trained model will be generated for it. It is also known as +// continuous target variable. +// For classification analysis, the data type of the field must be numeric +// (`integer`, `short`, `long`, `byte`), categorical (`ip` or `keyword`), or +// `boolean`. There must be no more than 30 different values in this field. +// For regression analysis, the data type of the field must be numeric. +func (s *_dataframeAnalysisRegression) DependentVariable(dependentvariable string) *_dataframeAnalysisRegression { + + s.v.DependentVariable = dependentvariable + + return s +} + +// Advanced configuration option. Controls the fraction of data that is used to +// compute the derivatives of the loss function for tree training. A small value +// results in the use of a small fraction of the data. If this value is set to +// be less than 1, accuracy typically improves. However, too small a value may +// result in poor convergence for the ensemble and so require more trees. By +// default, this value is calculated during hyperparameter optimization. It must +// be greater than zero and less than or equal to 1. +func (s *_dataframeAnalysisRegression) DownsampleFactor(downsamplefactor types.Float64) *_dataframeAnalysisRegression { + + s.v.DownsampleFactor = &downsamplefactor + + return s +} + +// Advanced configuration option. Specifies whether the training process should +// finish if it is not finding any better performing models. If disabled, the +// training process can take significantly longer and the chance of finding a +// better performing model is unremarkable. +func (s *_dataframeAnalysisRegression) EarlyStoppingEnabled(earlystoppingenabled bool) *_dataframeAnalysisRegression { + + s.v.EarlyStoppingEnabled = &earlystoppingenabled + + return s +} + +// Advanced configuration option. The shrinkage applied to the weights. Smaller +// values result in larger forests which have a better generalization error. +// However, larger forests cause slower training. By default, this value is +// calculated during hyperparameter optimization. It must be a value between +// 0.001 and 1. +func (s *_dataframeAnalysisRegression) Eta(eta types.Float64) *_dataframeAnalysisRegression { + + s.v.Eta = &eta + + return s +} + +// Advanced configuration option. Specifies the rate at which `eta` increases +// for each new tree that is added to the forest. For example, a rate of 1.05 +// increases `eta` by 5% for each extra tree. By default, this value is +// calculated during hyperparameter optimization. It must be between 0.5 and 2. +func (s *_dataframeAnalysisRegression) EtaGrowthRatePerTree(etagrowthratepertree types.Float64) *_dataframeAnalysisRegression { + + s.v.EtaGrowthRatePerTree = &etagrowthratepertree + + return s +} + +// Advanced configuration option. Defines the fraction of features that will be +// used when selecting a random bag for each candidate split. By default, this +// value is calculated during hyperparameter optimization. +func (s *_dataframeAnalysisRegression) FeatureBagFraction(featurebagfraction types.Float64) *_dataframeAnalysisRegression { + + s.v.FeatureBagFraction = &featurebagfraction + + return s +} + +// Advanced configuration option. A collection of feature preprocessors that +// modify one or more included fields. The analysis uses the resulting one or +// more features instead of the original document field. However, these features +// are ephemeral; they are not stored in the destination index. Multiple +// `feature_processors` entries can refer to the same document fields. Automatic +// categorical feature encoding still occurs for the fields that are unprocessed +// by a custom processor or that have categorical values. Use this property only +// if you want to override the automatic feature encoding of the specified +// fields. +func (s *_dataframeAnalysisRegression) FeatureProcessors(featureprocessors ...types.DataframeAnalysisFeatureProcessorVariant) *_dataframeAnalysisRegression { + + for _, v := range featureprocessors { + + s.v.FeatureProcessors = append(s.v.FeatureProcessors, *v.DataframeAnalysisFeatureProcessorCaster()) + + } + return s +} + +// Advanced configuration option. Regularization parameter to prevent +// overfitting on the training data set. Multiplies a linear penalty associated +// with the size of individual trees in the forest. A high gamma value causes +// training to prefer small trees. A small gamma value results in larger +// individual trees and slower training. By default, this value is calculated +// during hyperparameter optimization. It must be a nonnegative value. +func (s *_dataframeAnalysisRegression) Gamma(gamma types.Float64) *_dataframeAnalysisRegression { + + s.v.Gamma = &gamma + + return s +} + +// Advanced configuration option. Regularization parameter to prevent +// overfitting on the training data set. Multiplies an L2 regularization term +// which applies to leaf weights of the individual trees in the forest. A high +// lambda value causes training to favor small leaf weights. This behavior makes +// the prediction function smoother at the expense of potentially not being able +// to capture relevant relationships between the features and the dependent +// variable. A small lambda value results in large individual trees and slower +// training. By default, this value is calculated during hyperparameter +// optimization. It must be a nonnegative value. +func (s *_dataframeAnalysisRegression) Lambda(lambda types.Float64) *_dataframeAnalysisRegression { + + s.v.Lambda = &lambda + + return s +} + +// The loss function used during regression. Available options are `mse` (mean +// squared error), `msle` (mean squared logarithmic error), `huber` +// (Pseudo-Huber loss). +func (s *_dataframeAnalysisRegression) LossFunction(lossfunction string) *_dataframeAnalysisRegression { + + s.v.LossFunction = &lossfunction + + return s +} + +// A positive number that is used as a parameter to the `loss_function`. +func (s *_dataframeAnalysisRegression) LossFunctionParameter(lossfunctionparameter types.Float64) *_dataframeAnalysisRegression { + + s.v.LossFunctionParameter = &lossfunctionparameter + + return s +} + +// Advanced configuration option. A multiplier responsible for determining the +// maximum number of hyperparameter optimization steps in the Bayesian +// optimization procedure. The maximum number of steps is determined based on +// the number of undefined hyperparameters times the maximum optimization rounds +// per hyperparameter. By default, this value is calculated during +// hyperparameter optimization. +func (s *_dataframeAnalysisRegression) MaxOptimizationRoundsPerHyperparameter(maxoptimizationroundsperhyperparameter int) *_dataframeAnalysisRegression { + + s.v.MaxOptimizationRoundsPerHyperparameter = &maxoptimizationroundsperhyperparameter + + return s +} + +// Advanced configuration option. Defines the maximum number of decision trees +// in the forest. The maximum value is 2000. By default, this value is +// calculated during hyperparameter optimization. +func (s *_dataframeAnalysisRegression) MaxTrees(maxtrees int) *_dataframeAnalysisRegression { + + s.v.MaxTrees = &maxtrees + + return s +} + +// Advanced configuration option. Specifies the maximum number of feature +// importance values per document to return. By default, no feature importance +// calculation occurs. +func (s *_dataframeAnalysisRegression) NumTopFeatureImportanceValues(numtopfeatureimportancevalues int) *_dataframeAnalysisRegression { + + s.v.NumTopFeatureImportanceValues = &numtopfeatureimportancevalues + + return s +} + +// Defines the name of the prediction field in the results. Defaults to +// `_prediction`. +func (s *_dataframeAnalysisRegression) PredictionFieldName(field string) *_dataframeAnalysisRegression { + + s.v.PredictionFieldName = &field + + return s +} + +// Defines the seed for the random generator that is used to pick training data. +// By default, it is randomly generated. Set it to a specific value to use the +// same training data each time you start a job (assuming other related +// parameters such as `source` and `analyzed_fields` are the same). +func (s *_dataframeAnalysisRegression) RandomizeSeed(randomizeseed types.Float64) *_dataframeAnalysisRegression { + + s.v.RandomizeSeed = &randomizeseed + + return s +} + +// Advanced configuration option. Machine learning uses loss guided tree +// growing, which means that the decision trees grow where the regularized loss +// decreases most quickly. This soft limit combines with the +// `soft_tree_depth_tolerance` to penalize trees that exceed the specified +// depth; the regularized loss increases quickly beyond this depth. By default, +// this value is calculated during hyperparameter optimization. It must be +// greater than or equal to 0. +func (s *_dataframeAnalysisRegression) SoftTreeDepthLimit(softtreedepthlimit int) *_dataframeAnalysisRegression { + + s.v.SoftTreeDepthLimit = &softtreedepthlimit + + return s +} + +// Advanced configuration option. This option controls how quickly the +// regularized loss increases when the tree depth exceeds +// `soft_tree_depth_limit`. By default, this value is calculated during +// hyperparameter optimization. It must be greater than or equal to 0.01. +func (s *_dataframeAnalysisRegression) SoftTreeDepthTolerance(softtreedepthtolerance types.Float64) *_dataframeAnalysisRegression { + + s.v.SoftTreeDepthTolerance = &softtreedepthtolerance + + return s +} + +// Defines what percentage of the eligible documents that will be used for +// training. Documents that are ignored by the analysis (for example those that +// contain arrays with more than one value) won’t be included in the calculation +// for used percentage. +func (s *_dataframeAnalysisRegression) TrainingPercent(percentage types.PercentageVariant) *_dataframeAnalysisRegression { + + s.v.TrainingPercent = *percentage.PercentageCaster() + + return s +} + +func (s *_dataframeAnalysisRegression) DataframeAnalysisContainerCaster() *types.DataframeAnalysisContainer { + container := types.NewDataframeAnalysisContainer() + + container.Regression = s.v + + return container +} + +func (s *_dataframeAnalysisRegression) DataframeAnalysisRegressionCaster() *types.DataframeAnalysisRegression { + return s.v +} diff --git a/typedapi/esdsl/dataframeanalyticsdestination.go b/typedapi/esdsl/dataframeanalyticsdestination.go new file mode 100644 index 0000000000..7145837e9a --- /dev/null +++ b/typedapi/esdsl/dataframeanalyticsdestination.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframeAnalyticsDestination struct { + v *types.DataframeAnalyticsDestination +} + +func NewDataframeAnalyticsDestination() *_dataframeAnalyticsDestination { + + return &_dataframeAnalyticsDestination{v: types.NewDataframeAnalyticsDestination()} + +} + +// Defines the destination index to store the results of the data frame +// analytics job. +func (s *_dataframeAnalyticsDestination) Index(indexname string) *_dataframeAnalyticsDestination { + + s.v.Index = indexname + + return s +} + +// Defines the name of the field in which to store the results of the analysis. +// Defaults to `ml`. +func (s *_dataframeAnalyticsDestination) ResultsField(field string) *_dataframeAnalyticsDestination { + + s.v.ResultsField = &field + + return s +} + +func (s *_dataframeAnalyticsDestination) DataframeAnalyticsDestinationCaster() *types.DataframeAnalyticsDestination { + return s.v +} diff --git a/typedapi/esdsl/dataframeanalyticssource.go b/typedapi/esdsl/dataframeanalyticssource.go new file mode 100644 index 0000000000..5739901ff9 --- /dev/null +++ b/typedapi/esdsl/dataframeanalyticssource.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframeAnalyticsSource struct { + v *types.DataframeAnalyticsSource +} + +func NewDataframeAnalyticsSource() *_dataframeAnalyticsSource { + + return &_dataframeAnalyticsSource{v: types.NewDataframeAnalyticsSource()} + +} + +// Index or indices on which to perform the analysis. It can be a single index +// or index pattern as well as an array of indices or patterns. NOTE: If your +// source indices contain documents with the same IDs, only the document that is +// indexed last appears in the destination index. +func (s *_dataframeAnalyticsSource) Index(indices ...string) *_dataframeAnalyticsSource { + + s.v.Index = indices + + return s +} + +// The Elasticsearch query domain-specific language (DSL). This value +// corresponds to the query object in an Elasticsearch search POST body. All the +// options that are supported by Elasticsearch can be used, as this object is +// passed verbatim to Elasticsearch. By default, this property has the following +// value: {"match_all": {}}. +func (s *_dataframeAnalyticsSource) Query(query types.QueryVariant) *_dataframeAnalyticsSource { + + s.v.Query = query.QueryCaster() + + return s +} + +// Definitions of runtime fields that will become part of the mapping of the +// destination index. +func (s *_dataframeAnalyticsSource) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *_dataframeAnalyticsSource { + + s.v.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() + + return s +} + +// Specify `includes` and/or `excludes patterns to select which fields will be +// present in the destination. Fields that are excluded cannot be included in +// the analysis. +func (s *_dataframeAnalyticsSource) Source_(source_ types.DataframeAnalysisAnalyzedFieldsVariant) *_dataframeAnalyticsSource { + + s.v.Source_ = source_.DataframeAnalysisAnalyzedFieldsCaster() + + return s +} + +func (s *_dataframeAnalyticsSource) DataframeAnalyticsSourceCaster() *types.DataframeAnalyticsSource { + return s.v +} diff --git a/typedapi/esdsl/dataframeevaluationclassification.go b/typedapi/esdsl/dataframeevaluationclassification.go new file mode 100644 index 0000000000..4f27a54f55 --- /dev/null +++ b/typedapi/esdsl/dataframeevaluationclassification.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframeEvaluationClassification struct { + v *types.DataframeEvaluationClassification +} + +// Classification evaluation evaluates the results of a classification analysis +// which outputs a prediction that identifies to which of the classes each +// document belongs. +func NewDataframeEvaluationClassification() *_dataframeEvaluationClassification { + + return &_dataframeEvaluationClassification{v: types.NewDataframeEvaluationClassification()} + +} + +// The field of the index which contains the ground truth. The data type of this +// field can be boolean or integer. If the data type is integer, the value has +// to be either 0 (false) or 1 (true). +func (s *_dataframeEvaluationClassification) ActualField(field string) *_dataframeEvaluationClassification { + + s.v.ActualField = field + + return s +} + +// Specifies the metrics that are used for the evaluation. +func (s *_dataframeEvaluationClassification) Metrics(metrics types.DataframeEvaluationClassificationMetricsVariant) *_dataframeEvaluationClassification { + + s.v.Metrics = metrics.DataframeEvaluationClassificationMetricsCaster() + + return s +} + +// The field in the index which contains the predicted value, in other words the +// results of the classification analysis. +func (s *_dataframeEvaluationClassification) PredictedField(field string) *_dataframeEvaluationClassification { + + s.v.PredictedField = &field + + return s +} + +// The field of the index which is an array of documents of the form { +// "class_name": XXX, "class_probability": YYY }. This field must be defined as +// nested in the mappings. +func (s *_dataframeEvaluationClassification) TopClassesField(field string) *_dataframeEvaluationClassification { + + s.v.TopClassesField = &field + + return s +} + +func (s *_dataframeEvaluationClassification) DataframeEvaluationContainerCaster() *types.DataframeEvaluationContainer { + container := types.NewDataframeEvaluationContainer() + + container.Classification = s.v + + return container +} + +func (s *_dataframeEvaluationClassification) DataframeEvaluationClassificationCaster() *types.DataframeEvaluationClassification { + return s.v +} diff --git a/typedapi/esdsl/dataframeevaluationclassificationmetrics.go b/typedapi/esdsl/dataframeevaluationclassificationmetrics.go new file mode 100644 index 0000000000..da4ef92ea7 --- /dev/null +++ b/typedapi/esdsl/dataframeevaluationclassificationmetrics.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _dataframeEvaluationClassificationMetrics struct { + v *types.DataframeEvaluationClassificationMetrics +} + +func NewDataframeEvaluationClassificationMetrics() *_dataframeEvaluationClassificationMetrics { + + return &_dataframeEvaluationClassificationMetrics{v: types.NewDataframeEvaluationClassificationMetrics()} + +} + +// Accuracy of predictions (per-class and overall). +func (s *_dataframeEvaluationClassificationMetrics) Accuracy(accuracy map[string]json.RawMessage) *_dataframeEvaluationClassificationMetrics { + + s.v.Accuracy = accuracy + return s +} + +func (s *_dataframeEvaluationClassificationMetrics) AddAccuracy(key string, value json.RawMessage) *_dataframeEvaluationClassificationMetrics { + + var tmp map[string]json.RawMessage + if s.v.Accuracy == nil { + s.v.Accuracy = make(map[string]json.RawMessage) + } else { + tmp = s.v.Accuracy + } + + tmp[key] = value + + s.v.Accuracy = tmp + return s +} + +// The AUC ROC (area under the curve of the receiver operating characteristic) +// score and optionally the curve. It is calculated for a specific class +// (provided as "class_name") treated as positive. +func (s *_dataframeEvaluationClassificationMetrics) AucRoc(aucroc types.DataframeEvaluationClassificationMetricsAucRocVariant) *_dataframeEvaluationClassificationMetrics { + + s.v.AucRoc = aucroc.DataframeEvaluationClassificationMetricsAucRocCaster() + + return s +} + +// Multiclass confusion matrix. +func (s *_dataframeEvaluationClassificationMetrics) MulticlassConfusionMatrix(multiclassconfusionmatrix map[string]json.RawMessage) *_dataframeEvaluationClassificationMetrics { + + s.v.MulticlassConfusionMatrix = multiclassconfusionmatrix + return s +} + +func (s *_dataframeEvaluationClassificationMetrics) AddMulticlassConfusionMatrix(key string, value json.RawMessage) *_dataframeEvaluationClassificationMetrics { + + var tmp map[string]json.RawMessage + if s.v.MulticlassConfusionMatrix == nil { + s.v.MulticlassConfusionMatrix = make(map[string]json.RawMessage) + } else { + tmp = s.v.MulticlassConfusionMatrix + } + + tmp[key] = value + + s.v.MulticlassConfusionMatrix = tmp + return s +} + +// Precision of predictions (per-class and average). +func (s *_dataframeEvaluationClassificationMetrics) Precision(precision map[string]json.RawMessage) *_dataframeEvaluationClassificationMetrics { + + s.v.Precision = precision + return s +} + +func (s *_dataframeEvaluationClassificationMetrics) AddPrecision(key string, value json.RawMessage) *_dataframeEvaluationClassificationMetrics { + + var tmp map[string]json.RawMessage + if s.v.Precision == nil { + s.v.Precision = make(map[string]json.RawMessage) + } else { + tmp = s.v.Precision + } + + tmp[key] = value + + s.v.Precision = tmp + return s +} + +// Recall of predictions (per-class and average). +func (s *_dataframeEvaluationClassificationMetrics) Recall(recall map[string]json.RawMessage) *_dataframeEvaluationClassificationMetrics { + + s.v.Recall = recall + return s +} + +func (s *_dataframeEvaluationClassificationMetrics) AddRecall(key string, value json.RawMessage) *_dataframeEvaluationClassificationMetrics { + + var tmp map[string]json.RawMessage + if s.v.Recall == nil { + s.v.Recall = make(map[string]json.RawMessage) + } else { + tmp = s.v.Recall + } + + tmp[key] = value + + s.v.Recall = tmp + return s +} + +func (s *_dataframeEvaluationClassificationMetrics) DataframeEvaluationClassificationMetricsCaster() *types.DataframeEvaluationClassificationMetrics { + return s.v +} diff --git a/typedapi/esdsl/dataframeevaluationclassificationmetricsaucroc.go b/typedapi/esdsl/dataframeevaluationclassificationmetricsaucroc.go new file mode 100644 index 0000000000..4768232af2 --- /dev/null +++ b/typedapi/esdsl/dataframeevaluationclassificationmetricsaucroc.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframeEvaluationClassificationMetricsAucRoc struct { + v *types.DataframeEvaluationClassificationMetricsAucRoc +} + +func NewDataframeEvaluationClassificationMetricsAucRoc() *_dataframeEvaluationClassificationMetricsAucRoc { + + return &_dataframeEvaluationClassificationMetricsAucRoc{v: types.NewDataframeEvaluationClassificationMetricsAucRoc()} + +} + +// Name of the only class that is treated as positive during AUC ROC +// calculation. Other classes are treated as negative ("one-vs-all" strategy). +// All the evaluated documents must have class_name in the list of their top +// classes. +func (s *_dataframeEvaluationClassificationMetricsAucRoc) ClassName(name string) *_dataframeEvaluationClassificationMetricsAucRoc { + + s.v.ClassName = &name + + return s +} + +// Whether or not the curve should be returned in addition to the score. Default +// value is false. +func (s *_dataframeEvaluationClassificationMetricsAucRoc) IncludeCurve(includecurve bool) *_dataframeEvaluationClassificationMetricsAucRoc { + + s.v.IncludeCurve = &includecurve + + return s +} + +func (s *_dataframeEvaluationClassificationMetricsAucRoc) DataframeEvaluationClassificationMetricsAucRocCaster() *types.DataframeEvaluationClassificationMetricsAucRoc { + return s.v +} diff --git a/typedapi/esdsl/dataframeevaluationcontainer.go b/typedapi/esdsl/dataframeevaluationcontainer.go new file mode 100644 index 0000000000..7636664d34 --- /dev/null +++ b/typedapi/esdsl/dataframeevaluationcontainer.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _dataframeEvaluationContainer struct { + v *types.DataframeEvaluationContainer +} + +func NewDataframeEvaluationContainer() *_dataframeEvaluationContainer { + return &_dataframeEvaluationContainer{v: types.NewDataframeEvaluationContainer()} +} + +// AdditionalDataframeEvaluationContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_dataframeEvaluationContainer) AdditionalDataframeEvaluationContainerProperty(key string, value json.RawMessage) *_dataframeEvaluationContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalDataframeEvaluationContainerProperty = tmp + return s +} + +// Classification evaluation evaluates the results of a classification analysis +// which outputs a prediction that identifies to which of the classes each +// document belongs. +func (s *_dataframeEvaluationContainer) Classification(classification types.DataframeEvaluationClassificationVariant) *_dataframeEvaluationContainer { + + s.v.Classification = classification.DataframeEvaluationClassificationCaster() + + return s +} + +// Outlier detection evaluates the results of an outlier detection analysis +// which outputs the probability that each document is an outlier. +func (s *_dataframeEvaluationContainer) OutlierDetection(outlierdetection types.DataframeEvaluationOutlierDetectionVariant) *_dataframeEvaluationContainer { + + s.v.OutlierDetection = outlierdetection.DataframeEvaluationOutlierDetectionCaster() + + return s +} + +// Regression evaluation evaluates the results of a regression analysis which +// outputs a prediction of values. +func (s *_dataframeEvaluationContainer) Regression(regression types.DataframeEvaluationRegressionVariant) *_dataframeEvaluationContainer { + + s.v.Regression = regression.DataframeEvaluationRegressionCaster() + + return s +} + +func (s *_dataframeEvaluationContainer) DataframeEvaluationContainerCaster() *types.DataframeEvaluationContainer { + return s.v +} diff --git a/typedapi/esdsl/dataframeevaluationoutlierdetection.go b/typedapi/esdsl/dataframeevaluationoutlierdetection.go new file mode 100644 index 0000000000..e34d4b9fd4 --- /dev/null +++ b/typedapi/esdsl/dataframeevaluationoutlierdetection.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframeEvaluationOutlierDetection struct { + v *types.DataframeEvaluationOutlierDetection +} + +// Outlier detection evaluates the results of an outlier detection analysis +// which outputs the probability that each document is an outlier. +func NewDataframeEvaluationOutlierDetection() *_dataframeEvaluationOutlierDetection { + + return &_dataframeEvaluationOutlierDetection{v: types.NewDataframeEvaluationOutlierDetection()} + +} + +// The field of the index which contains the ground truth. The data type of this +// field can be boolean or integer. If the data type is integer, the value has +// to be either 0 (false) or 1 (true). +func (s *_dataframeEvaluationOutlierDetection) ActualField(field string) *_dataframeEvaluationOutlierDetection { + + s.v.ActualField = field + + return s +} + +// Specifies the metrics that are used for the evaluation. +func (s *_dataframeEvaluationOutlierDetection) Metrics(metrics types.DataframeEvaluationOutlierDetectionMetricsVariant) *_dataframeEvaluationOutlierDetection { + + s.v.Metrics = metrics.DataframeEvaluationOutlierDetectionMetricsCaster() + + return s +} + +// The field of the index that defines the probability of whether the item +// belongs to the class in question or not. It’s the field that contains the +// results of the analysis. +func (s *_dataframeEvaluationOutlierDetection) PredictedProbabilityField(field string) *_dataframeEvaluationOutlierDetection { + + s.v.PredictedProbabilityField = field + + return s +} + +func (s *_dataframeEvaluationOutlierDetection) DataframeEvaluationContainerCaster() *types.DataframeEvaluationContainer { + container := types.NewDataframeEvaluationContainer() + + container.OutlierDetection = s.v + + return container +} + +func (s *_dataframeEvaluationOutlierDetection) DataframeEvaluationOutlierDetectionCaster() *types.DataframeEvaluationOutlierDetection { + return s.v +} diff --git a/typedapi/esdsl/dataframeevaluationoutlierdetectionmetrics.go b/typedapi/esdsl/dataframeevaluationoutlierdetectionmetrics.go new file mode 100644 index 0000000000..051d1b5dbf --- /dev/null +++ b/typedapi/esdsl/dataframeevaluationoutlierdetectionmetrics.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _dataframeEvaluationOutlierDetectionMetrics struct { + v *types.DataframeEvaluationOutlierDetectionMetrics +} + +func NewDataframeEvaluationOutlierDetectionMetrics() *_dataframeEvaluationOutlierDetectionMetrics { + + return &_dataframeEvaluationOutlierDetectionMetrics{v: types.NewDataframeEvaluationOutlierDetectionMetrics()} + +} + +// The AUC ROC (area under the curve of the receiver operating characteristic) +// score and optionally the curve. It is calculated for a specific class +// (provided as "class_name") treated as positive. +func (s *_dataframeEvaluationOutlierDetectionMetrics) AucRoc(aucroc types.DataframeEvaluationClassificationMetricsAucRocVariant) *_dataframeEvaluationOutlierDetectionMetrics { + + s.v.AucRoc = aucroc.DataframeEvaluationClassificationMetricsAucRocCaster() + + return s +} + +// Accuracy of predictions (per-class and overall). +func (s *_dataframeEvaluationOutlierDetectionMetrics) ConfusionMatrix(confusionmatrix map[string]json.RawMessage) *_dataframeEvaluationOutlierDetectionMetrics { + + s.v.ConfusionMatrix = confusionmatrix + return s +} + +func (s *_dataframeEvaluationOutlierDetectionMetrics) AddConfusionMatrix(key string, value json.RawMessage) *_dataframeEvaluationOutlierDetectionMetrics { + + var tmp map[string]json.RawMessage + if s.v.ConfusionMatrix == nil { + s.v.ConfusionMatrix = make(map[string]json.RawMessage) + } else { + tmp = s.v.ConfusionMatrix + } + + tmp[key] = value + + s.v.ConfusionMatrix = tmp + return s +} + +// Precision of predictions (per-class and average). +func (s *_dataframeEvaluationOutlierDetectionMetrics) Precision(precision map[string]json.RawMessage) *_dataframeEvaluationOutlierDetectionMetrics { + + s.v.Precision = precision + return s +} + +func (s *_dataframeEvaluationOutlierDetectionMetrics) AddPrecision(key string, value json.RawMessage) *_dataframeEvaluationOutlierDetectionMetrics { + + var tmp map[string]json.RawMessage + if s.v.Precision == nil { + s.v.Precision = make(map[string]json.RawMessage) + } else { + tmp = s.v.Precision + } + + tmp[key] = value + + s.v.Precision = tmp + return s +} + +// Recall of predictions (per-class and average). +func (s *_dataframeEvaluationOutlierDetectionMetrics) Recall(recall map[string]json.RawMessage) *_dataframeEvaluationOutlierDetectionMetrics { + + s.v.Recall = recall + return s +} + +func (s *_dataframeEvaluationOutlierDetectionMetrics) AddRecall(key string, value json.RawMessage) *_dataframeEvaluationOutlierDetectionMetrics { + + var tmp map[string]json.RawMessage + if s.v.Recall == nil { + s.v.Recall = make(map[string]json.RawMessage) + } else { + tmp = s.v.Recall + } + + tmp[key] = value + + s.v.Recall = tmp + return s +} + +func (s *_dataframeEvaluationOutlierDetectionMetrics) DataframeEvaluationOutlierDetectionMetricsCaster() *types.DataframeEvaluationOutlierDetectionMetrics { + return s.v +} diff --git a/typedapi/esdsl/dataframeevaluationregression.go b/typedapi/esdsl/dataframeevaluationregression.go new file mode 100644 index 0000000000..85efd15f64 --- /dev/null +++ b/typedapi/esdsl/dataframeevaluationregression.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframeEvaluationRegression struct { + v *types.DataframeEvaluationRegression +} + +// Regression evaluation evaluates the results of a regression analysis which +// outputs a prediction of values. +func NewDataframeEvaluationRegression() *_dataframeEvaluationRegression { + + return &_dataframeEvaluationRegression{v: types.NewDataframeEvaluationRegression()} + +} + +// The field of the index which contains the ground truth. The data type of this +// field must be numerical. +func (s *_dataframeEvaluationRegression) ActualField(field string) *_dataframeEvaluationRegression { + + s.v.ActualField = field + + return s +} + +// Specifies the metrics that are used for the evaluation. For more information +// on mse, msle, and huber, consult the Jupyter notebook on regression loss +// functions. +func (s *_dataframeEvaluationRegression) Metrics(metrics types.DataframeEvaluationRegressionMetricsVariant) *_dataframeEvaluationRegression { + + s.v.Metrics = metrics.DataframeEvaluationRegressionMetricsCaster() + + return s +} + +// The field in the index that contains the predicted value, in other words the +// results of the regression analysis. +func (s *_dataframeEvaluationRegression) PredictedField(field string) *_dataframeEvaluationRegression { + + s.v.PredictedField = field + + return s +} + +func (s *_dataframeEvaluationRegression) DataframeEvaluationContainerCaster() *types.DataframeEvaluationContainer { + container := types.NewDataframeEvaluationContainer() + + container.Regression = s.v + + return container +} + +func (s *_dataframeEvaluationRegression) DataframeEvaluationRegressionCaster() *types.DataframeEvaluationRegression { + return s.v +} diff --git a/typedapi/esdsl/dataframeevaluationregressionmetrics.go b/typedapi/esdsl/dataframeevaluationregressionmetrics.go new file mode 100644 index 0000000000..bed37bf7a6 --- /dev/null +++ b/typedapi/esdsl/dataframeevaluationregressionmetrics.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _dataframeEvaluationRegressionMetrics struct { + v *types.DataframeEvaluationRegressionMetrics +} + +func NewDataframeEvaluationRegressionMetrics() *_dataframeEvaluationRegressionMetrics { + + return &_dataframeEvaluationRegressionMetrics{v: types.NewDataframeEvaluationRegressionMetrics()} + +} + +// Pseudo Huber loss function. +func (s *_dataframeEvaluationRegressionMetrics) Huber(huber types.DataframeEvaluationRegressionMetricsHuberVariant) *_dataframeEvaluationRegressionMetrics { + + s.v.Huber = huber.DataframeEvaluationRegressionMetricsHuberCaster() + + return s +} + +// Average squared difference between the predicted values and the actual +// (ground truth) value. For more information, read this wiki article. +func (s *_dataframeEvaluationRegressionMetrics) Mse(mse map[string]json.RawMessage) *_dataframeEvaluationRegressionMetrics { + + s.v.Mse = mse + return s +} + +func (s *_dataframeEvaluationRegressionMetrics) AddMse(key string, value json.RawMessage) *_dataframeEvaluationRegressionMetrics { + + var tmp map[string]json.RawMessage + if s.v.Mse == nil { + s.v.Mse = make(map[string]json.RawMessage) + } else { + tmp = s.v.Mse + } + + tmp[key] = value + + s.v.Mse = tmp + return s +} + +// Average squared difference between the logarithm of the predicted values and +// the logarithm of the actual (ground truth) value. +func (s *_dataframeEvaluationRegressionMetrics) Msle(msle types.DataframeEvaluationRegressionMetricsMsleVariant) *_dataframeEvaluationRegressionMetrics { + + s.v.Msle = msle.DataframeEvaluationRegressionMetricsMsleCaster() + + return s +} + +// Proportion of the variance in the dependent variable that is predictable from +// the independent variables. +func (s *_dataframeEvaluationRegressionMetrics) RSquared(rsquared map[string]json.RawMessage) *_dataframeEvaluationRegressionMetrics { + + s.v.RSquared = rsquared + return s +} + +func (s *_dataframeEvaluationRegressionMetrics) AddRSquared(key string, value json.RawMessage) *_dataframeEvaluationRegressionMetrics { + + var tmp map[string]json.RawMessage + if s.v.RSquared == nil { + s.v.RSquared = make(map[string]json.RawMessage) + } else { + tmp = s.v.RSquared + } + + tmp[key] = value + + s.v.RSquared = tmp + return s +} + +func (s *_dataframeEvaluationRegressionMetrics) DataframeEvaluationRegressionMetricsCaster() *types.DataframeEvaluationRegressionMetrics { + return s.v +} diff --git a/typedapi/esdsl/dataframeevaluationregressionmetricshuber.go b/typedapi/esdsl/dataframeevaluationregressionmetricshuber.go new file mode 100644 index 0000000000..3737ca7ed9 --- /dev/null +++ b/typedapi/esdsl/dataframeevaluationregressionmetricshuber.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframeEvaluationRegressionMetricsHuber struct { + v *types.DataframeEvaluationRegressionMetricsHuber +} + +func NewDataframeEvaluationRegressionMetricsHuber() *_dataframeEvaluationRegressionMetricsHuber { + + return &_dataframeEvaluationRegressionMetricsHuber{v: types.NewDataframeEvaluationRegressionMetricsHuber()} + +} + +// Approximates 1/2 (prediction - actual)2 for values much less than delta and +// approximates a straight line with slope delta for values much larger than +// delta. Defaults to 1. Delta needs to be greater than 0. +func (s *_dataframeEvaluationRegressionMetricsHuber) Delta(delta types.Float64) *_dataframeEvaluationRegressionMetricsHuber { + + s.v.Delta = &delta + + return s +} + +func (s *_dataframeEvaluationRegressionMetricsHuber) DataframeEvaluationRegressionMetricsHuberCaster() *types.DataframeEvaluationRegressionMetricsHuber { + return s.v +} diff --git a/typedapi/esdsl/dataframeevaluationregressionmetricsmsle.go b/typedapi/esdsl/dataframeevaluationregressionmetricsmsle.go new file mode 100644 index 0000000000..c25d6a920f --- /dev/null +++ b/typedapi/esdsl/dataframeevaluationregressionmetricsmsle.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframeEvaluationRegressionMetricsMsle struct { + v *types.DataframeEvaluationRegressionMetricsMsle +} + +func NewDataframeEvaluationRegressionMetricsMsle() *_dataframeEvaluationRegressionMetricsMsle { + + return &_dataframeEvaluationRegressionMetricsMsle{v: types.NewDataframeEvaluationRegressionMetricsMsle()} + +} + +// Defines the transition point at which you switch from minimizing quadratic +// error to minimizing quadratic log error. Defaults to 1. +func (s *_dataframeEvaluationRegressionMetricsMsle) Offset(offset types.Float64) *_dataframeEvaluationRegressionMetricsMsle { + + s.v.Offset = &offset + + return s +} + +func (s *_dataframeEvaluationRegressionMetricsMsle) DataframeEvaluationRegressionMetricsMsleCaster() *types.DataframeEvaluationRegressionMetricsMsle { + return s.v +} diff --git a/typedapi/esdsl/dataframepreviewconfig.go b/typedapi/esdsl/dataframepreviewconfig.go new file mode 100644 index 0000000000..d32ebee7c9 --- /dev/null +++ b/typedapi/esdsl/dataframepreviewconfig.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataframePreviewConfig struct { + v *types.DataframePreviewConfig +} + +func NewDataframePreviewConfig(analysis types.DataframeAnalysisContainerVariant, source types.DataframeAnalyticsSourceVariant) *_dataframePreviewConfig { + + tmp := &_dataframePreviewConfig{v: types.NewDataframePreviewConfig()} + + tmp.Analysis(analysis) + + tmp.Source(source) + + return tmp + +} + +func (s *_dataframePreviewConfig) Analysis(analysis types.DataframeAnalysisContainerVariant) *_dataframePreviewConfig { + + s.v.Analysis = *analysis.DataframeAnalysisContainerCaster() + + return s +} + +func (s *_dataframePreviewConfig) AnalyzedFields(analyzedfields types.DataframeAnalysisAnalyzedFieldsVariant) *_dataframePreviewConfig { + + s.v.AnalyzedFields = analyzedfields.DataframeAnalysisAnalyzedFieldsCaster() + + return s +} + +func (s *_dataframePreviewConfig) MaxNumThreads(maxnumthreads int) *_dataframePreviewConfig { + + s.v.MaxNumThreads = &maxnumthreads + + return s +} + +func (s *_dataframePreviewConfig) ModelMemoryLimit(modelmemorylimit string) *_dataframePreviewConfig { + + s.v.ModelMemoryLimit = &modelmemorylimit + + return s +} + +func (s *_dataframePreviewConfig) Source(source types.DataframeAnalyticsSourceVariant) *_dataframePreviewConfig { + + s.v.Source = *source.DataframeAnalyticsSourceCaster() + + return s +} + +func (s *_dataframePreviewConfig) DataframePreviewConfigCaster() *types.DataframePreviewConfig { + return s.v +} diff --git a/typedapi/esdsl/datastreamlifecycle.go b/typedapi/esdsl/datastreamlifecycle.go new file mode 100644 index 0000000000..9631bcab6a --- /dev/null +++ b/typedapi/esdsl/datastreamlifecycle.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataStreamLifecycle struct { + v *types.DataStreamLifecycle +} + +func NewDataStreamLifecycle() *_dataStreamLifecycle { + + return &_dataStreamLifecycle{v: types.NewDataStreamLifecycle()} + +} + +// If defined, every document added to this data stream will be stored at least +// for this time frame. +// Any time after this duration the document could be deleted. +// When empty, every document in this data stream will be stored indefinitely. +func (s *_dataStreamLifecycle) DataRetention(duration types.DurationVariant) *_dataStreamLifecycle { + + s.v.DataRetention = *duration.DurationCaster() + + return s +} + +// The downsampling configuration to execute for the managed backing index after +// rollover. +func (s *_dataStreamLifecycle) Downsampling(downsampling types.DataStreamLifecycleDownsamplingVariant) *_dataStreamLifecycle { + + s.v.Downsampling = downsampling.DataStreamLifecycleDownsamplingCaster() + + return s +} + +// If defined, it turns data stream lifecycle on/off (`true`/`false`) for this +// data stream. A data stream lifecycle +// that's disabled (enabled: `false`) will have no effect on the data stream. +func (s *_dataStreamLifecycle) Enabled(enabled bool) *_dataStreamLifecycle { + + s.v.Enabled = &enabled + + return s +} + +func (s *_dataStreamLifecycle) DataStreamLifecycleCaster() *types.DataStreamLifecycle { + return s.v +} diff --git a/typedapi/esdsl/datastreamlifecycledownsampling.go b/typedapi/esdsl/datastreamlifecycledownsampling.go new file mode 100644 index 0000000000..49a4b7d792 --- /dev/null +++ b/typedapi/esdsl/datastreamlifecycledownsampling.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataStreamLifecycleDownsampling struct { + v *types.DataStreamLifecycleDownsampling +} + +func NewDataStreamLifecycleDownsampling() *_dataStreamLifecycleDownsampling { + + return &_dataStreamLifecycleDownsampling{v: types.NewDataStreamLifecycleDownsampling()} + +} + +// The list of downsampling rounds to execute as part of this downsampling +// configuration +func (s *_dataStreamLifecycleDownsampling) Rounds(rounds ...types.DownsamplingRoundVariant) *_dataStreamLifecycleDownsampling { + + for _, v := range rounds { + + s.v.Rounds = append(s.v.Rounds, *v.DownsamplingRoundCaster()) + + } + return s +} + +func (s *_dataStreamLifecycleDownsampling) DataStreamLifecycleDownsamplingCaster() *types.DataStreamLifecycleDownsampling { + return s.v +} diff --git a/typedapi/esdsl/datastreamlifecyclerolloverconditions.go b/typedapi/esdsl/datastreamlifecyclerolloverconditions.go new file mode 100644 index 0000000000..eeae396784 --- /dev/null +++ b/typedapi/esdsl/datastreamlifecyclerolloverconditions.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataStreamLifecycleRolloverConditions struct { + v *types.DataStreamLifecycleRolloverConditions +} + +func NewDataStreamLifecycleRolloverConditions() *_dataStreamLifecycleRolloverConditions { + + return &_dataStreamLifecycleRolloverConditions{v: types.NewDataStreamLifecycleRolloverConditions()} + +} + +func (s *_dataStreamLifecycleRolloverConditions) MaxAge(maxage string) *_dataStreamLifecycleRolloverConditions { + + s.v.MaxAge = &maxage + + return s +} + +func (s *_dataStreamLifecycleRolloverConditions) MaxDocs(maxdocs int64) *_dataStreamLifecycleRolloverConditions { + + s.v.MaxDocs = &maxdocs + + return s +} + +func (s *_dataStreamLifecycleRolloverConditions) MaxPrimaryShardDocs(maxprimarysharddocs int64) *_dataStreamLifecycleRolloverConditions { + + s.v.MaxPrimaryShardDocs = &maxprimarysharddocs + + return s +} + +func (s *_dataStreamLifecycleRolloverConditions) MaxPrimaryShardSize(bytesize types.ByteSizeVariant) *_dataStreamLifecycleRolloverConditions { + + s.v.MaxPrimaryShardSize = *bytesize.ByteSizeCaster() + + return s +} + +func (s *_dataStreamLifecycleRolloverConditions) MaxSize(bytesize types.ByteSizeVariant) *_dataStreamLifecycleRolloverConditions { + + s.v.MaxSize = *bytesize.ByteSizeCaster() + + return s +} + +func (s *_dataStreamLifecycleRolloverConditions) MinAge(duration types.DurationVariant) *_dataStreamLifecycleRolloverConditions { + + s.v.MinAge = *duration.DurationCaster() + + return s +} + +func (s *_dataStreamLifecycleRolloverConditions) MinDocs(mindocs int64) *_dataStreamLifecycleRolloverConditions { + + s.v.MinDocs = &mindocs + + return s +} + +func (s *_dataStreamLifecycleRolloverConditions) MinPrimaryShardDocs(minprimarysharddocs int64) *_dataStreamLifecycleRolloverConditions { + + s.v.MinPrimaryShardDocs = &minprimarysharddocs + + return s +} + +func (s *_dataStreamLifecycleRolloverConditions) MinPrimaryShardSize(bytesize types.ByteSizeVariant) *_dataStreamLifecycleRolloverConditions { + + s.v.MinPrimaryShardSize = *bytesize.ByteSizeCaster() + + return s +} + +func (s *_dataStreamLifecycleRolloverConditions) MinSize(bytesize types.ByteSizeVariant) *_dataStreamLifecycleRolloverConditions { + + s.v.MinSize = *bytesize.ByteSizeCaster() + + return s +} + +func (s *_dataStreamLifecycleRolloverConditions) DataStreamLifecycleRolloverConditionsCaster() *types.DataStreamLifecycleRolloverConditions { + return s.v +} diff --git a/typedapi/esdsl/datastreamlifecyclewithrollover.go b/typedapi/esdsl/datastreamlifecyclewithrollover.go new file mode 100644 index 0000000000..660995c4e5 --- /dev/null +++ b/typedapi/esdsl/datastreamlifecyclewithrollover.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataStreamLifecycleWithRollover struct { + v *types.DataStreamLifecycleWithRollover +} + +func NewDataStreamLifecycleWithRollover() *_dataStreamLifecycleWithRollover { + + return &_dataStreamLifecycleWithRollover{v: types.NewDataStreamLifecycleWithRollover()} + +} + +// If defined, every document added to this data stream will be stored at least +// for this time frame. +// Any time after this duration the document could be deleted. +// When empty, every document in this data stream will be stored indefinitely. +func (s *_dataStreamLifecycleWithRollover) DataRetention(duration types.DurationVariant) *_dataStreamLifecycleWithRollover { + + s.v.DataRetention = *duration.DurationCaster() + + return s +} + +// The downsampling configuration to execute for the managed backing index after +// rollover. +func (s *_dataStreamLifecycleWithRollover) Downsampling(downsampling types.DataStreamLifecycleDownsamplingVariant) *_dataStreamLifecycleWithRollover { + + s.v.Downsampling = downsampling.DataStreamLifecycleDownsamplingCaster() + + return s +} + +// If defined, it turns data stream lifecycle on/off (`true`/`false`) for this +// data stream. A data stream lifecycle +// that's disabled (enabled: `false`) will have no effect on the data stream. +func (s *_dataStreamLifecycleWithRollover) Enabled(enabled bool) *_dataStreamLifecycleWithRollover { + + s.v.Enabled = &enabled + + return s +} + +// The conditions which will trigger the rollover of a backing index as +// configured by the cluster setting `cluster.lifecycle.default.rollover`. +// This property is an implementation detail and it will only be retrieved when +// the query param `include_defaults` is set to true. +// The contents of this field are subject to change. +func (s *_dataStreamLifecycleWithRollover) Rollover(rollover types.DataStreamLifecycleRolloverConditionsVariant) *_dataStreamLifecycleWithRollover { + + s.v.Rollover = rollover.DataStreamLifecycleRolloverConditionsCaster() + + return s +} + +func (s *_dataStreamLifecycleWithRollover) DataStreamLifecycleWithRolloverCaster() *types.DataStreamLifecycleWithRollover { + return s.v +} diff --git a/typedapi/esdsl/datastreamtimestamp.go b/typedapi/esdsl/datastreamtimestamp.go new file mode 100644 index 0000000000..81af707a6a --- /dev/null +++ b/typedapi/esdsl/datastreamtimestamp.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataStreamTimestamp struct { + v *types.DataStreamTimestamp +} + +func NewDataStreamTimestamp(enabled bool) *_dataStreamTimestamp { + + tmp := &_dataStreamTimestamp{v: types.NewDataStreamTimestamp()} + + tmp.Enabled(enabled) + + return tmp + +} + +func (s *_dataStreamTimestamp) Enabled(enabled bool) *_dataStreamTimestamp { + + s.v.Enabled = enabled + + return s +} + +func (s *_dataStreamTimestamp) DataStreamTimestampCaster() *types.DataStreamTimestamp { + return s.v +} diff --git a/typedapi/esdsl/datastreamvisibility.go b/typedapi/esdsl/datastreamvisibility.go new file mode 100644 index 0000000000..849c55d7cb --- /dev/null +++ b/typedapi/esdsl/datastreamvisibility.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dataStreamVisibility struct { + v *types.DataStreamVisibility +} + +func NewDataStreamVisibility() *_dataStreamVisibility { + + return &_dataStreamVisibility{v: types.NewDataStreamVisibility()} + +} + +func (s *_dataStreamVisibility) AllowCustomRouting(allowcustomrouting bool) *_dataStreamVisibility { + + s.v.AllowCustomRouting = &allowcustomrouting + + return s +} + +func (s *_dataStreamVisibility) Hidden(hidden bool) *_dataStreamVisibility { + + s.v.Hidden = &hidden + + return s +} + +func (s *_dataStreamVisibility) DataStreamVisibilityCaster() *types.DataStreamVisibility { + return s.v +} diff --git a/typedapi/esdsl/datedecayfunction.go b/typedapi/esdsl/datedecayfunction.go new file mode 100644 index 0000000000..4bb780131a --- /dev/null +++ b/typedapi/esdsl/datedecayfunction.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode" +) + +type _dateDecayFunction struct { + v *types.DateDecayFunction +} + +// Function that scores a document with a linear decay, depending on the +// distance of a numeric field value of the document from an origin. +func NewDateDecayFunction() *_dateDecayFunction { + + return &_dateDecayFunction{v: types.NewDateDecayFunction()} + +} + +func (s *_dateDecayFunction) DecayFunctionBaseDateMathDuration(decayfunctionbasedatemathduration map[string]types.DecayPlacementDateMathDuration) *_dateDecayFunction { + + s.v.DecayFunctionBaseDateMathDuration = decayfunctionbasedatemathduration + return s +} + +func (s *_dateDecayFunction) AddDecayFunctionBaseDateMathDuration(key string, value types.DecayPlacementDateMathDurationVariant) *_dateDecayFunction { + + var tmp map[string]types.DecayPlacementDateMathDuration + if s.v.DecayFunctionBaseDateMathDuration == nil { + s.v.DecayFunctionBaseDateMathDuration = make(map[string]types.DecayPlacementDateMathDuration) + } else { + tmp = s.v.DecayFunctionBaseDateMathDuration + } + + tmp[key] = *value.DecayPlacementDateMathDurationCaster() + + s.v.DecayFunctionBaseDateMathDuration = tmp + return s +} + +// Determines how the distance is calculated when a field used for computing the +// decay contains multiple values. +func (s *_dateDecayFunction) MultiValueMode(multivaluemode multivaluemode.MultiValueMode) *_dateDecayFunction { + + s.v.MultiValueMode = &multivaluemode + return s +} + +func (s *_dateDecayFunction) FunctionScoreCaster() *types.FunctionScore { + container := types.NewFunctionScore() + + container.Linear = s.v + + return container +} + +func (s *_dateDecayFunction) DateDecayFunctionCaster() *types.DateDecayFunction { + return s.v +} diff --git a/typedapi/esdsl/datedistancefeaturequery.go b/typedapi/esdsl/datedistancefeaturequery.go new file mode 100644 index 0000000000..2b23fda26e --- /dev/null +++ b/typedapi/esdsl/datedistancefeaturequery.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dateDistanceFeatureQuery struct { + v *types.DateDistanceFeatureQuery +} + +// Boosts the relevance score of documents closer to a provided origin date or +// point. +// For example, you can use this query to give more weight to documents closer +// to a certain date or location. +func NewDateDistanceFeatureQuery() *_dateDistanceFeatureQuery { + + return &_dateDistanceFeatureQuery{v: types.NewDateDistanceFeatureQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_dateDistanceFeatureQuery) Boost(boost float32) *_dateDistanceFeatureQuery { + + s.v.Boost = &boost + + return s +} + +// Name of the field used to calculate distances. This field must meet the +// following criteria: +// be a `date`, `date_nanos` or `geo_point` field; +// have an `index` mapping parameter value of `true`, which is the default; +// have an `doc_values` mapping parameter value of `true`, which is the default. +func (s *_dateDistanceFeatureQuery) Field(field string) *_dateDistanceFeatureQuery { + + s.v.Field = field + + return s +} + +// Date or point of origin used to calculate distances. +// If the `field` value is a `date` or `date_nanos` field, the `origin` value +// must be a date. +// Date Math, such as `now-1h`, is supported. +// If the field value is a `geo_point` field, the `origin` value must be a +// geopoint. +func (s *_dateDistanceFeatureQuery) Origin(datemath string) *_dateDistanceFeatureQuery { + + s.v.Origin = datemath + + return s +} + +// Distance from the `origin` at which relevance scores receive half of the +// `boost` value. +// If the `field` value is a `date` or `date_nanos` field, the `pivot` value +// must be a time unit, such as `1h` or `10d`. If the `field` value is a +// `geo_point` field, the `pivot` value must be a distance unit, such as `1km` +// or `12m`. +func (s *_dateDistanceFeatureQuery) Pivot(duration types.DurationVariant) *_dateDistanceFeatureQuery { + + s.v.Pivot = *duration.DurationCaster() + + return s +} + +func (s *_dateDistanceFeatureQuery) QueryName_(queryname_ string) *_dateDistanceFeatureQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_dateDistanceFeatureQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.DistanceFeature = s.v + + return container +} + +func (s *_dateDistanceFeatureQuery) DateDistanceFeatureQueryCaster() *types.DateDistanceFeatureQuery { + return s.v +} diff --git a/typedapi/esdsl/datehistogramaggregation.go b/typedapi/esdsl/datehistogramaggregation.go new file mode 100644 index 0000000000..d9ed5f28cb --- /dev/null +++ b/typedapi/esdsl/datehistogramaggregation.go @@ -0,0 +1,202 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/calendarinterval" +) + +type _dateHistogramAggregation struct { + v *types.DateHistogramAggregation +} + +// A multi-bucket values source based aggregation that can be applied on date +// values or date range values extracted from the documents. +// It dynamically builds fixed size (interval) buckets over the values. +func NewDateHistogramAggregation() *_dateHistogramAggregation { + + return &_dateHistogramAggregation{v: types.NewDateHistogramAggregation()} + +} + +// Calendar-aware interval. +// Can be specified using the unit name, such as `month`, or as a single unit +// quantity, such as `1M`. +func (s *_dateHistogramAggregation) CalendarInterval(calendarinterval calendarinterval.CalendarInterval) *_dateHistogramAggregation { + + s.v.CalendarInterval = &calendarinterval + return s +} + +// Enables extending the bounds of the histogram beyond the data itself. +func (s *_dateHistogramAggregation) ExtendedBounds(extendedbounds types.ExtendedBoundsFieldDateMathVariant) *_dateHistogramAggregation { + + s.v.ExtendedBounds = extendedbounds.ExtendedBoundsFieldDateMathCaster() + + return s +} + +// The date field whose values are use to build a histogram. +func (s *_dateHistogramAggregation) Field(field string) *_dateHistogramAggregation { + + s.v.Field = &field + + return s +} + +// Fixed intervals: a fixed number of SI units and never deviate, regardless of +// where they fall on the calendar. +func (s *_dateHistogramAggregation) FixedInterval(duration types.DurationVariant) *_dateHistogramAggregation { + + s.v.FixedInterval = *duration.DurationCaster() + + return s +} + +// The date format used to format `key_as_string` in the response. +// If no `format` is specified, the first date format specified in the field +// mapping is used. +func (s *_dateHistogramAggregation) Format(format string) *_dateHistogramAggregation { + + s.v.Format = &format + + return s +} + +// Limits the histogram to specified bounds. +func (s *_dateHistogramAggregation) HardBounds(hardbounds types.ExtendedBoundsFieldDateMathVariant) *_dateHistogramAggregation { + + s.v.HardBounds = hardbounds.ExtendedBoundsFieldDateMathCaster() + + return s +} + +func (s *_dateHistogramAggregation) Interval(duration types.DurationVariant) *_dateHistogramAggregation { + + s.v.Interval = *duration.DurationCaster() + + return s +} + +// Set to `true` to associate a unique string key with each bucket and return +// the ranges as a hash rather than an array. +func (s *_dateHistogramAggregation) Keyed(keyed bool) *_dateHistogramAggregation { + + s.v.Keyed = &keyed + + return s +} + +// Only returns buckets that have `min_doc_count` number of documents. +// By default, all buckets between the first bucket that matches documents and +// the last one are returned. +func (s *_dateHistogramAggregation) MinDocCount(mindoccount int) *_dateHistogramAggregation { + + s.v.MinDocCount = &mindoccount + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_dateHistogramAggregation) Missing(datetime types.DateTimeVariant) *_dateHistogramAggregation { + + s.v.Missing = *datetime.DateTimeCaster() + + return s +} + +// Changes the start value of each bucket by the specified positive (`+`) or +// negative offset (`-`) duration. +func (s *_dateHistogramAggregation) Offset(duration types.DurationVariant) *_dateHistogramAggregation { + + s.v.Offset = *duration.DurationCaster() + + return s +} + +// The sort order of the returned buckets. +func (s *_dateHistogramAggregation) Order(aggregateorder types.AggregateOrderVariant) *_dateHistogramAggregation { + + s.v.Order = *aggregateorder.AggregateOrderCaster() + + return s +} + +func (s *_dateHistogramAggregation) Params(params map[string]json.RawMessage) *_dateHistogramAggregation { + + s.v.Params = params + return s +} + +func (s *_dateHistogramAggregation) AddParam(key string, value json.RawMessage) *_dateHistogramAggregation { + + var tmp map[string]json.RawMessage + if s.v.Params == nil { + s.v.Params = make(map[string]json.RawMessage) + } else { + tmp = s.v.Params + } + + tmp[key] = value + + s.v.Params = tmp + return s +} + +func (s *_dateHistogramAggregation) Script(script types.ScriptVariant) *_dateHistogramAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +// Time zone used for bucketing and rounding. +// Defaults to Coordinated Universal Time (UTC). +func (s *_dateHistogramAggregation) TimeZone(timezone string) *_dateHistogramAggregation { + + s.v.TimeZone = &timezone + + return s +} + +func (s *_dateHistogramAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.DateHistogram = s.v + + return container +} + +func (s *_dateHistogramAggregation) PivotGroupByContainerCaster() *types.PivotGroupByContainer { + container := types.NewPivotGroupByContainer() + + container.DateHistogram = s.v + + return container +} + +func (s *_dateHistogramAggregation) DateHistogramAggregationCaster() *types.DateHistogramAggregation { + return s.v +} diff --git a/typedapi/esdsl/datehistogramgrouping.go b/typedapi/esdsl/datehistogramgrouping.go new file mode 100644 index 0000000000..1264662872 --- /dev/null +++ b/typedapi/esdsl/datehistogramgrouping.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dateHistogramGrouping struct { + v *types.DateHistogramGrouping +} + +func NewDateHistogramGrouping() *_dateHistogramGrouping { + + return &_dateHistogramGrouping{v: types.NewDateHistogramGrouping()} + +} + +// The interval of time buckets to be generated when rolling up. +func (s *_dateHistogramGrouping) CalendarInterval(duration types.DurationVariant) *_dateHistogramGrouping { + + s.v.CalendarInterval = *duration.DurationCaster() + + return s +} + +// How long to wait before rolling up new documents. +// By default, the indexer attempts to roll up all data that is available. +// However, it is not uncommon for data to arrive out of order. +// The indexer is unable to deal with data that arrives after a time-span has +// been rolled up. +// You need to specify a delay that matches the longest period of time you +// expect out-of-order data to arrive. +func (s *_dateHistogramGrouping) Delay(duration types.DurationVariant) *_dateHistogramGrouping { + + s.v.Delay = *duration.DurationCaster() + + return s +} + +// The date field that is to be rolled up. +func (s *_dateHistogramGrouping) Field(field string) *_dateHistogramGrouping { + + s.v.Field = field + + return s +} + +// The interval of time buckets to be generated when rolling up. +func (s *_dateHistogramGrouping) FixedInterval(duration types.DurationVariant) *_dateHistogramGrouping { + + s.v.FixedInterval = *duration.DurationCaster() + + return s +} + +func (s *_dateHistogramGrouping) Format(format string) *_dateHistogramGrouping { + + s.v.Format = &format + + return s +} + +func (s *_dateHistogramGrouping) Interval(duration types.DurationVariant) *_dateHistogramGrouping { + + s.v.Interval = *duration.DurationCaster() + + return s +} + +// Defines what `time_zone` the rollup documents are stored as. +// Unlike raw data, which can shift timezones on the fly, rolled documents have +// to be stored with a specific timezone. +// By default, rollup documents are stored in `UTC`. +func (s *_dateHistogramGrouping) TimeZone(timezone string) *_dateHistogramGrouping { + + s.v.TimeZone = &timezone + + return s +} + +func (s *_dateHistogramGrouping) DateHistogramGroupingCaster() *types.DateHistogramGrouping { + return s.v +} diff --git a/typedapi/esdsl/dateindexnameprocessor.go b/typedapi/esdsl/dateindexnameprocessor.go new file mode 100644 index 0000000000..7d1359578e --- /dev/null +++ b/typedapi/esdsl/dateindexnameprocessor.go @@ -0,0 +1,168 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dateIndexNameProcessor struct { + v *types.DateIndexNameProcessor +} + +// The purpose of this processor is to point documents to the right time based +// index based on a date or timestamp field in a document by using the date math +// index name support. +func NewDateIndexNameProcessor(daterounding string) *_dateIndexNameProcessor { + + tmp := &_dateIndexNameProcessor{v: types.NewDateIndexNameProcessor()} + + tmp.DateRounding(daterounding) + + return tmp + +} + +// An array of the expected date formats for parsing dates / timestamps in the +// document being preprocessed. +// Can be a java time pattern or one of the following formats: ISO8601, UNIX, +// UNIX_MS, or TAI64N. +func (s *_dateIndexNameProcessor) DateFormats(dateformats ...string) *_dateIndexNameProcessor { + + for _, v := range dateformats { + + s.v.DateFormats = append(s.v.DateFormats, v) + + } + return s +} + +// How to round the date when formatting the date into the index name. Valid +// values are: +// `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and +// `s` (second). +// Supports template snippets. +func (s *_dateIndexNameProcessor) DateRounding(daterounding string) *_dateIndexNameProcessor { + + s.v.DateRounding = daterounding + + return s +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_dateIndexNameProcessor) Description(description string) *_dateIndexNameProcessor { + + s.v.Description = &description + + return s +} + +// The field to get the date or timestamp from. +func (s *_dateIndexNameProcessor) Field(field string) *_dateIndexNameProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_dateIndexNameProcessor) If(if_ types.ScriptVariant) *_dateIndexNameProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_dateIndexNameProcessor) IgnoreFailure(ignorefailure bool) *_dateIndexNameProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// The format to be used when printing the parsed date into the index name. +// A valid java time pattern is expected here. +// Supports template snippets. +func (s *_dateIndexNameProcessor) IndexNameFormat(indexnameformat string) *_dateIndexNameProcessor { + + s.v.IndexNameFormat = &indexnameformat + + return s +} + +// A prefix of the index name to be prepended before the printed date. +// Supports template snippets. +func (s *_dateIndexNameProcessor) IndexNamePrefix(indexnameprefix string) *_dateIndexNameProcessor { + + s.v.IndexNamePrefix = &indexnameprefix + + return s +} + +// The locale to use when parsing the date from the document being preprocessed, +// relevant when parsing month names or week days. +func (s *_dateIndexNameProcessor) Locale(locale string) *_dateIndexNameProcessor { + + s.v.Locale = &locale + + return s +} + +// Handle failures for the processor. +func (s *_dateIndexNameProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_dateIndexNameProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_dateIndexNameProcessor) Tag(tag string) *_dateIndexNameProcessor { + + s.v.Tag = &tag + + return s +} + +// The timezone to use when parsing the date and when date math index supports +// resolves expressions into concrete index names. +func (s *_dateIndexNameProcessor) Timezone(timezone string) *_dateIndexNameProcessor { + + s.v.Timezone = &timezone + + return s +} + +func (s *_dateIndexNameProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.DateIndexName = s.v + + return container +} + +func (s *_dateIndexNameProcessor) DateIndexNameProcessorCaster() *types.DateIndexNameProcessor { + return s.v +} diff --git a/typedapi/esdsl/datenanosproperty.go b/typedapi/esdsl/datenanosproperty.go new file mode 100644 index 0000000000..8e30795c5e --- /dev/null +++ b/typedapi/esdsl/datenanosproperty.go @@ -0,0 +1,209 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _dateNanosProperty struct { + v *types.DateNanosProperty +} + +func NewDateNanosProperty() *_dateNanosProperty { + + return &_dateNanosProperty{v: types.NewDateNanosProperty()} + +} + +func (s *_dateNanosProperty) Boost(boost types.Float64) *_dateNanosProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_dateNanosProperty) CopyTo(fields ...string) *_dateNanosProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_dateNanosProperty) DocValues(docvalues bool) *_dateNanosProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_dateNanosProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_dateNanosProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_dateNanosProperty) Fields(fields map[string]types.Property) *_dateNanosProperty { + + s.v.Fields = fields + return s +} + +func (s *_dateNanosProperty) AddField(key string, value types.PropertyVariant) *_dateNanosProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_dateNanosProperty) Format(format string) *_dateNanosProperty { + + s.v.Format = &format + + return s +} + +func (s *_dateNanosProperty) IgnoreAbove(ignoreabove int) *_dateNanosProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_dateNanosProperty) IgnoreMalformed(ignoremalformed bool) *_dateNanosProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_dateNanosProperty) Index(index bool) *_dateNanosProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_dateNanosProperty) Meta(meta map[string]string) *_dateNanosProperty { + + s.v.Meta = meta + return s +} + +func (s *_dateNanosProperty) AddMeta(key string, value string) *_dateNanosProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_dateNanosProperty) NullValue(datetime types.DateTimeVariant) *_dateNanosProperty { + + s.v.NullValue = *datetime.DateTimeCaster() + + return s +} + +func (s *_dateNanosProperty) OnScriptError(onscripterror onscripterror.OnScriptError) *_dateNanosProperty { + + s.v.OnScriptError = &onscripterror + return s +} + +func (s *_dateNanosProperty) PrecisionStep(precisionstep int) *_dateNanosProperty { + + s.v.PrecisionStep = &precisionstep + + return s +} + +func (s *_dateNanosProperty) Properties(properties map[string]types.Property) *_dateNanosProperty { + + s.v.Properties = properties + return s +} + +func (s *_dateNanosProperty) AddProperty(key string, value types.PropertyVariant) *_dateNanosProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_dateNanosProperty) Script(script types.ScriptVariant) *_dateNanosProperty { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_dateNanosProperty) Store(store bool) *_dateNanosProperty { + + s.v.Store = &store + + return s +} + +func (s *_dateNanosProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_dateNanosProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_dateNanosProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_dateNanosProperty) DateNanosPropertyCaster() *types.DateNanosProperty { + return s.v +} diff --git a/typedapi/esdsl/dateprocessor.go b/typedapi/esdsl/dateprocessor.go new file mode 100644 index 0000000000..d25a192d63 --- /dev/null +++ b/typedapi/esdsl/dateprocessor.go @@ -0,0 +1,149 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dateProcessor struct { + v *types.DateProcessor +} + +// Parses dates from fields, and then uses the date or timestamp as the +// timestamp for the document. +func NewDateProcessor() *_dateProcessor { + + return &_dateProcessor{v: types.NewDateProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_dateProcessor) Description(description string) *_dateProcessor { + + s.v.Description = &description + + return s +} + +// The field to get the date from. +func (s *_dateProcessor) Field(field string) *_dateProcessor { + + s.v.Field = field + + return s +} + +// An array of the expected date formats. +// Can be a java time pattern or one of the following formats: ISO8601, UNIX, +// UNIX_MS, or TAI64N. +func (s *_dateProcessor) Formats(formats ...string) *_dateProcessor { + + for _, v := range formats { + + s.v.Formats = append(s.v.Formats, v) + + } + return s +} + +// Conditionally execute the processor. +func (s *_dateProcessor) If(if_ types.ScriptVariant) *_dateProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_dateProcessor) IgnoreFailure(ignorefailure bool) *_dateProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// The locale to use when parsing the date, relevant when parsing month names or +// week days. +// Supports template snippets. +func (s *_dateProcessor) Locale(locale string) *_dateProcessor { + + s.v.Locale = &locale + + return s +} + +// Handle failures for the processor. +func (s *_dateProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_dateProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// The format to use when writing the date to target_field. Must be a valid +// java time pattern. +func (s *_dateProcessor) OutputFormat(outputformat string) *_dateProcessor { + + s.v.OutputFormat = &outputformat + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_dateProcessor) Tag(tag string) *_dateProcessor { + + s.v.Tag = &tag + + return s +} + +// The field that will hold the parsed date. +func (s *_dateProcessor) TargetField(field string) *_dateProcessor { + + s.v.TargetField = &field + + return s +} + +// The timezone to use when parsing the date. +// Supports template snippets. +func (s *_dateProcessor) Timezone(timezone string) *_dateProcessor { + + s.v.Timezone = &timezone + + return s +} + +func (s *_dateProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Date = s.v + + return container +} + +func (s *_dateProcessor) DateProcessorCaster() *types.DateProcessor { + return s.v +} diff --git a/typedapi/esdsl/dateproperty.go b/typedapi/esdsl/dateproperty.go new file mode 100644 index 0000000000..a20b9fc494 --- /dev/null +++ b/typedapi/esdsl/dateproperty.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _dateProperty struct { + v *types.DateProperty +} + +func NewDateProperty() *_dateProperty { + + return &_dateProperty{v: types.NewDateProperty()} + +} + +func (s *_dateProperty) Boost(boost types.Float64) *_dateProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_dateProperty) CopyTo(fields ...string) *_dateProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_dateProperty) DocValues(docvalues bool) *_dateProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_dateProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_dateProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_dateProperty) Fielddata(fielddata types.NumericFielddataVariant) *_dateProperty { + + s.v.Fielddata = fielddata.NumericFielddataCaster() + + return s +} + +func (s *_dateProperty) Fields(fields map[string]types.Property) *_dateProperty { + + s.v.Fields = fields + return s +} + +func (s *_dateProperty) AddField(key string, value types.PropertyVariant) *_dateProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_dateProperty) Format(format string) *_dateProperty { + + s.v.Format = &format + + return s +} + +func (s *_dateProperty) IgnoreAbove(ignoreabove int) *_dateProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_dateProperty) IgnoreMalformed(ignoremalformed bool) *_dateProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_dateProperty) Index(index bool) *_dateProperty { + + s.v.Index = &index + + return s +} + +func (s *_dateProperty) Locale(locale string) *_dateProperty { + + s.v.Locale = &locale + + return s +} + +// Metadata about the field. +func (s *_dateProperty) Meta(meta map[string]string) *_dateProperty { + + s.v.Meta = meta + return s +} + +func (s *_dateProperty) AddMeta(key string, value string) *_dateProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_dateProperty) NullValue(datetime types.DateTimeVariant) *_dateProperty { + + s.v.NullValue = *datetime.DateTimeCaster() + + return s +} + +func (s *_dateProperty) OnScriptError(onscripterror onscripterror.OnScriptError) *_dateProperty { + + s.v.OnScriptError = &onscripterror + return s +} + +func (s *_dateProperty) PrecisionStep(precisionstep int) *_dateProperty { + + s.v.PrecisionStep = &precisionstep + + return s +} + +func (s *_dateProperty) Properties(properties map[string]types.Property) *_dateProperty { + + s.v.Properties = properties + return s +} + +func (s *_dateProperty) AddProperty(key string, value types.PropertyVariant) *_dateProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_dateProperty) Script(script types.ScriptVariant) *_dateProperty { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_dateProperty) Store(store bool) *_dateProperty { + + s.v.Store = &store + + return s +} + +func (s *_dateProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_dateProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_dateProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_dateProperty) DatePropertyCaster() *types.DateProperty { + return s.v +} diff --git a/typedapi/esdsl/daterangeaggregation.go b/typedapi/esdsl/daterangeaggregation.go new file mode 100644 index 0000000000..49638367fe --- /dev/null +++ b/typedapi/esdsl/daterangeaggregation.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dateRangeAggregation struct { + v *types.DateRangeAggregation +} + +// A multi-bucket value source based aggregation that enables the user to define +// a set of date ranges - each representing a bucket. +func NewDateRangeAggregation() *_dateRangeAggregation { + + return &_dateRangeAggregation{v: types.NewDateRangeAggregation()} + +} + +// The date field whose values are use to build ranges. +func (s *_dateRangeAggregation) Field(field string) *_dateRangeAggregation { + + s.v.Field = &field + + return s +} + +// The date format used to format `from` and `to` in the response. +func (s *_dateRangeAggregation) Format(format string) *_dateRangeAggregation { + + s.v.Format = &format + + return s +} + +// Set to `true` to associate a unique string key with each bucket and returns +// the ranges as a hash rather than an array. +func (s *_dateRangeAggregation) Keyed(keyed bool) *_dateRangeAggregation { + + s.v.Keyed = &keyed + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_dateRangeAggregation) Missing(missing types.MissingVariant) *_dateRangeAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +// Array of date ranges. +func (s *_dateRangeAggregation) Ranges(ranges ...types.DateRangeExpressionVariant) *_dateRangeAggregation { + + for _, v := range ranges { + + s.v.Ranges = append(s.v.Ranges, *v.DateRangeExpressionCaster()) + + } + return s +} + +// Time zone used to convert dates from another time zone to UTC. +func (s *_dateRangeAggregation) TimeZone(timezone string) *_dateRangeAggregation { + + s.v.TimeZone = &timezone + + return s +} + +func (s *_dateRangeAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.DateRange = s.v + + return container +} + +func (s *_dateRangeAggregation) ApiKeyAggregationContainerCaster() *types.ApiKeyAggregationContainer { + container := types.NewApiKeyAggregationContainer() + + container.DateRange = s.v + + return container +} + +func (s *_dateRangeAggregation) DateRangeAggregationCaster() *types.DateRangeAggregation { + return s.v +} diff --git a/typedapi/esdsl/daterangeexpression.go b/typedapi/esdsl/daterangeexpression.go new file mode 100644 index 0000000000..20f9873d31 --- /dev/null +++ b/typedapi/esdsl/daterangeexpression.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dateRangeExpression struct { + v *types.DateRangeExpression +} + +func NewDateRangeExpression() *_dateRangeExpression { + + return &_dateRangeExpression{v: types.NewDateRangeExpression()} + +} + +// Start of the range (inclusive). +func (s *_dateRangeExpression) From(fielddatemath types.FieldDateMathVariant) *_dateRangeExpression { + + s.v.From = *fielddatemath.FieldDateMathCaster() + + return s +} + +// Custom key to return the range with. +func (s *_dateRangeExpression) Key(key string) *_dateRangeExpression { + + s.v.Key = &key + + return s +} + +// End of the range (exclusive). +func (s *_dateRangeExpression) To(fielddatemath types.FieldDateMathVariant) *_dateRangeExpression { + + s.v.To = *fielddatemath.FieldDateMathCaster() + + return s +} + +func (s *_dateRangeExpression) DateRangeExpressionCaster() *types.DateRangeExpression { + return s.v +} diff --git a/typedapi/esdsl/daterangeproperty.go b/typedapi/esdsl/daterangeproperty.go new file mode 100644 index 0000000000..bfc7834900 --- /dev/null +++ b/typedapi/esdsl/daterangeproperty.go @@ -0,0 +1,181 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _dateRangeProperty struct { + v *types.DateRangeProperty +} + +func NewDateRangeProperty() *_dateRangeProperty { + + return &_dateRangeProperty{v: types.NewDateRangeProperty()} + +} + +func (s *_dateRangeProperty) Boost(boost types.Float64) *_dateRangeProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_dateRangeProperty) Coerce(coerce bool) *_dateRangeProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_dateRangeProperty) CopyTo(fields ...string) *_dateRangeProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_dateRangeProperty) DocValues(docvalues bool) *_dateRangeProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_dateRangeProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_dateRangeProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_dateRangeProperty) Fields(fields map[string]types.Property) *_dateRangeProperty { + + s.v.Fields = fields + return s +} + +func (s *_dateRangeProperty) AddField(key string, value types.PropertyVariant) *_dateRangeProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_dateRangeProperty) Format(format string) *_dateRangeProperty { + + s.v.Format = &format + + return s +} + +func (s *_dateRangeProperty) IgnoreAbove(ignoreabove int) *_dateRangeProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_dateRangeProperty) Index(index bool) *_dateRangeProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_dateRangeProperty) Meta(meta map[string]string) *_dateRangeProperty { + + s.v.Meta = meta + return s +} + +func (s *_dateRangeProperty) AddMeta(key string, value string) *_dateRangeProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_dateRangeProperty) Properties(properties map[string]types.Property) *_dateRangeProperty { + + s.v.Properties = properties + return s +} + +func (s *_dateRangeProperty) AddProperty(key string, value types.PropertyVariant) *_dateRangeProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_dateRangeProperty) Store(store bool) *_dateRangeProperty { + + s.v.Store = &store + + return s +} + +func (s *_dateRangeProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_dateRangeProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_dateRangeProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_dateRangeProperty) DateRangePropertyCaster() *types.DateRangeProperty { + return s.v +} diff --git a/typedapi/esdsl/daterangequery.go b/typedapi/esdsl/daterangequery.go new file mode 100644 index 0000000000..aba3ea54a8 --- /dev/null +++ b/typedapi/esdsl/daterangequery.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation" +) + +type _dateRangeQuery struct { + k string + v *types.DateRangeQuery +} + +// Returns documents that contain terms within a provided range. +func NewDateRangeQuery(key string) *_dateRangeQuery { + return &_dateRangeQuery{ + k: key, + v: types.NewDateRangeQuery(), + } +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_dateRangeQuery) Boost(boost float32) *_dateRangeQuery { + + s.v.Boost = &boost + + return s +} + +// Date format used to convert `date` values in the query. +func (s *_dateRangeQuery) Format(dateformat string) *_dateRangeQuery { + + s.v.Format = &dateformat + + return s +} + +func (s *_dateRangeQuery) From(from string) *_dateRangeQuery { + + s.v.From = &from + + return s +} + +// Greater than. +func (s *_dateRangeQuery) Gt(datemath string) *_dateRangeQuery { + + s.v.Gt = &datemath + + return s +} + +// Greater than or equal to. +func (s *_dateRangeQuery) Gte(datemath string) *_dateRangeQuery { + + s.v.Gte = &datemath + + return s +} + +// Less than. +func (s *_dateRangeQuery) Lt(datemath string) *_dateRangeQuery { + + s.v.Lt = &datemath + + return s +} + +// Less than or equal to. +func (s *_dateRangeQuery) Lte(datemath string) *_dateRangeQuery { + + s.v.Lte = &datemath + + return s +} + +func (s *_dateRangeQuery) QueryName_(queryname_ string) *_dateRangeQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Indicates how the range query matches values for `range` fields. +func (s *_dateRangeQuery) Relation(relation rangerelation.RangeRelation) *_dateRangeQuery { + + s.v.Relation = &relation + return s +} + +// Coordinated Universal Time (UTC) offset or IANA time zone used to convert +// `date` values in the query to UTC. +func (s *_dateRangeQuery) TimeZone(timezone string) *_dateRangeQuery { + + s.v.TimeZone = &timezone + + return s +} + +func (s *_dateRangeQuery) To(to string) *_dateRangeQuery { + + s.v.To = &to + + return s +} + +func (s *_dateRangeQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.Range = map[string]types.RangeQuery{ + s.k: *s.v, + } + return container +} + +func (s *_dateRangeQuery) ApiKeyQueryContainerCaster() *types.ApiKeyQueryContainer { + container := types.NewApiKeyQueryContainer() + container.Range = map[string]types.RangeQuery{ + s.k: *s.v, + } + return container +} + +func (s *_dateRangeQuery) RoleQueryContainerCaster() *types.RoleQueryContainer { + container := types.NewRoleQueryContainer() + container.Range = map[string]types.RangeQuery{ + s.k: *s.v, + } + return container +} + +func (s *_dateRangeQuery) UserQueryContainerCaster() *types.UserQueryContainer { + container := types.NewUserQueryContainer() + container.Range = map[string]types.RangeQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleDateRangeQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleDateRangeQuery() *_dateRangeQuery { + return &_dateRangeQuery{ + k: "", + v: types.NewDateRangeQuery(), + } +} + +func (s *_dateRangeQuery) DateRangeQueryCaster() *types.DateRangeQuery { + return s.v.DateRangeQueryCaster() +} diff --git a/typedapi/esdsl/datetime.go b/typedapi/esdsl/datetime.go new file mode 100644 index 0000000000..584e00e46b --- /dev/null +++ b/typedapi/esdsl/datetime.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _dateTime struct { + v types.DateTime +} + +func NewDateTime() *_dateTime { + return &_dateTime{v: nil} +} + +func (u *_dateTime) String(string string) *_dateTime { + + u.v = &string + + return u +} + +func (u *_dateTime) EpochTimeUnitMillis(epochtimeunitmillis int64) *_dateTime { + + u.v = &epochtimeunitmillis + + return u +} + +func (u *_dateTime) DateTimeCaster() *types.DateTime { + return &u.v +} diff --git a/typedapi/esdsl/decayfunction.go b/typedapi/esdsl/decayfunction.go new file mode 100644 index 0000000000..a08b0a684a --- /dev/null +++ b/typedapi/esdsl/decayfunction.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _decayFunction struct { + v types.DecayFunction +} + +func NewDecayFunction() *_decayFunction { + return &_decayFunction{v: nil} +} + +func (u *_decayFunction) UntypedDecayFunction(untypeddecayfunction types.UntypedDecayFunctionVariant) *_decayFunction { + + u.v = &untypeddecayfunction + + return u +} + +// Interface implementation for UntypedDecayFunction in DecayFunction union +func (u *_untypedDecayFunction) DecayFunctionCaster() *types.DecayFunction { + t := types.DecayFunction(u.v) + return &t +} + +func (u *_decayFunction) DateDecayFunction(datedecayfunction types.DateDecayFunctionVariant) *_decayFunction { + + u.v = &datedecayfunction + + return u +} + +// Interface implementation for DateDecayFunction in DecayFunction union +func (u *_dateDecayFunction) DecayFunctionCaster() *types.DecayFunction { + t := types.DecayFunction(u.v) + return &t +} + +func (u *_decayFunction) NumericDecayFunction(numericdecayfunction types.NumericDecayFunctionVariant) *_decayFunction { + + u.v = &numericdecayfunction + + return u +} + +// Interface implementation for NumericDecayFunction in DecayFunction union +func (u *_numericDecayFunction) DecayFunctionCaster() *types.DecayFunction { + t := types.DecayFunction(u.v) + return &t +} + +func (u *_decayFunction) GeoDecayFunction(geodecayfunction types.GeoDecayFunctionVariant) *_decayFunction { + + u.v = &geodecayfunction + + return u +} + +// Interface implementation for GeoDecayFunction in DecayFunction union +func (u *_geoDecayFunction) DecayFunctionCaster() *types.DecayFunction { + t := types.DecayFunction(u.v) + return &t +} + +func (u *_decayFunction) DecayFunctionCaster() *types.DecayFunction { + return &u.v +} diff --git a/typedapi/esdsl/decayplacement.go b/typedapi/esdsl/decayplacement.go new file mode 100644 index 0000000000..36e03caf8b --- /dev/null +++ b/typedapi/esdsl/decayplacement.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _decayPlacement struct { + v *types.DecayPlacement +} + +func NewDecayPlacement() *_decayPlacement { + + return &_decayPlacement{v: types.NewDecayPlacement()} + +} + +// Defines how documents are scored at the distance given at scale. +func (s *_decayPlacement) Decay(decay types.Float64) *_decayPlacement { + + s.v.Decay = &decay + + return s +} + +// If defined, the decay function will only compute the decay function for +// documents with a distance greater than the defined `offset`. +func (s *_decayPlacement) Offset(offset json.RawMessage) *_decayPlacement { + + s.v.Offset = offset + + return s +} + +// The point of origin used for calculating distance. Must be given as a number +// for numeric field, date for date fields and geo point for geo fields. +func (s *_decayPlacement) Origin(origin json.RawMessage) *_decayPlacement { + + s.v.Origin = origin + + return s +} + +// Defines the distance from origin + offset at which the computed score will +// equal `decay` parameter. +func (s *_decayPlacement) Scale(scale json.RawMessage) *_decayPlacement { + + s.v.Scale = scale + + return s +} + +func (s *_decayPlacement) DecayPlacementCaster() *types.DecayPlacement { + return s.v +} diff --git a/typedapi/esdsl/decayplacementdatemathduration.go b/typedapi/esdsl/decayplacementdatemathduration.go new file mode 100644 index 0000000000..5a1ff6332e --- /dev/null +++ b/typedapi/esdsl/decayplacementdatemathduration.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _decayPlacementDateMathDuration struct { + v *types.DecayPlacementDateMathDuration +} + +func NewDecayPlacementDateMathDuration() *_decayPlacementDateMathDuration { + + return &_decayPlacementDateMathDuration{v: types.NewDecayPlacementDateMathDuration()} + +} + +// Defines how documents are scored at the distance given at scale. +func (s *_decayPlacementDateMathDuration) Decay(decay types.Float64) *_decayPlacementDateMathDuration { + + s.v.Decay = &decay + + return s +} + +// If defined, the decay function will only compute the decay function for +// documents with a distance greater than the defined `offset`. +func (s *_decayPlacementDateMathDuration) Offset(duration types.DurationVariant) *_decayPlacementDateMathDuration { + + s.v.Offset = *duration.DurationCaster() + + return s +} + +// The point of origin used for calculating distance. Must be given as a number +// for numeric field, date for date fields and geo point for geo fields. +func (s *_decayPlacementDateMathDuration) Origin(datemath string) *_decayPlacementDateMathDuration { + + s.v.Origin = &datemath + + return s +} + +// Defines the distance from origin + offset at which the computed score will +// equal `decay` parameter. +func (s *_decayPlacementDateMathDuration) Scale(duration types.DurationVariant) *_decayPlacementDateMathDuration { + + s.v.Scale = *duration.DurationCaster() + + return s +} + +func (s *_decayPlacementDateMathDuration) DecayPlacementDateMathDurationCaster() *types.DecayPlacementDateMathDuration { + return s.v +} diff --git a/typedapi/esdsl/decayplacementdoubledouble.go b/typedapi/esdsl/decayplacementdoubledouble.go new file mode 100644 index 0000000000..9945c20419 --- /dev/null +++ b/typedapi/esdsl/decayplacementdoubledouble.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _decayPlacementdoubledouble struct { + v *types.DecayPlacementdoubledouble +} + +func NewDecayPlacementdoubledouble() *_decayPlacementdoubledouble { + + return &_decayPlacementdoubledouble{v: types.NewDecayPlacementdoubledouble()} + +} + +// Defines how documents are scored at the distance given at scale. +func (s *_decayPlacementdoubledouble) Decay(decay types.Float64) *_decayPlacementdoubledouble { + + s.v.Decay = &decay + + return s +} + +// If defined, the decay function will only compute the decay function for +// documents with a distance greater than the defined `offset`. +func (s *_decayPlacementdoubledouble) Offset(offset types.Float64) *_decayPlacementdoubledouble { + + s.v.Offset = &offset + + return s +} + +// The point of origin used for calculating distance. Must be given as a number +// for numeric field, date for date fields and geo point for geo fields. +func (s *_decayPlacementdoubledouble) Origin(origin types.Float64) *_decayPlacementdoubledouble { + + s.v.Origin = &origin + + return s +} + +// Defines the distance from origin + offset at which the computed score will +// equal `decay` parameter. +func (s *_decayPlacementdoubledouble) Scale(scale types.Float64) *_decayPlacementdoubledouble { + + s.v.Scale = &scale + + return s +} + +func (s *_decayPlacementdoubledouble) DecayPlacementdoubledoubleCaster() *types.DecayPlacementdoubledouble { + return s.v +} diff --git a/typedapi/esdsl/decayplacementgeolocationdistance.go b/typedapi/esdsl/decayplacementgeolocationdistance.go new file mode 100644 index 0000000000..dcdbdaebd8 --- /dev/null +++ b/typedapi/esdsl/decayplacementgeolocationdistance.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _decayPlacementGeoLocationDistance struct { + v *types.DecayPlacementGeoLocationDistance +} + +func NewDecayPlacementGeoLocationDistance() *_decayPlacementGeoLocationDistance { + + return &_decayPlacementGeoLocationDistance{v: types.NewDecayPlacementGeoLocationDistance()} + +} + +// Defines how documents are scored at the distance given at scale. +func (s *_decayPlacementGeoLocationDistance) Decay(decay types.Float64) *_decayPlacementGeoLocationDistance { + + s.v.Decay = &decay + + return s +} + +// If defined, the decay function will only compute the decay function for +// documents with a distance greater than the defined `offset`. +func (s *_decayPlacementGeoLocationDistance) Offset(distance string) *_decayPlacementGeoLocationDistance { + + s.v.Offset = &distance + + return s +} + +// The point of origin used for calculating distance. Must be given as a number +// for numeric field, date for date fields and geo point for geo fields. +func (s *_decayPlacementGeoLocationDistance) Origin(geolocation types.GeoLocationVariant) *_decayPlacementGeoLocationDistance { + + s.v.Origin = *geolocation.GeoLocationCaster() + + return s +} + +// Defines the distance from origin + offset at which the computed score will +// equal `decay` parameter. +func (s *_decayPlacementGeoLocationDistance) Scale(distance string) *_decayPlacementGeoLocationDistance { + + s.v.Scale = &distance + + return s +} + +func (s *_decayPlacementGeoLocationDistance) DecayPlacementGeoLocationDistanceCaster() *types.DecayPlacementGeoLocationDistance { + return s.v +} diff --git a/typedapi/esdsl/definition.go b/typedapi/esdsl/definition.go new file mode 100644 index 0000000000..fae4faf393 --- /dev/null +++ b/typedapi/esdsl/definition.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _definition struct { + v *types.Definition +} + +func NewDefinition(trainedmodel types.TrainedModelVariant) *_definition { + + tmp := &_definition{v: types.NewDefinition()} + + tmp.TrainedModel(trainedmodel) + + return tmp + +} + +// Collection of preprocessors +func (s *_definition) Preprocessors(preprocessors ...types.PreprocessorVariant) *_definition { + + for _, v := range preprocessors { + + s.v.Preprocessors = append(s.v.Preprocessors, *v.PreprocessorCaster()) + + } + return s +} + +// The definition of the trained model. +func (s *_definition) TrainedModel(trainedmodel types.TrainedModelVariant) *_definition { + + s.v.TrainedModel = *trainedmodel.TrainedModelCaster() + + return s +} + +func (s *_definition) DefinitionCaster() *types.Definition { + return s.v +} diff --git a/typedapi/esdsl/delayeddatacheckconfig.go b/typedapi/esdsl/delayeddatacheckconfig.go new file mode 100644 index 0000000000..efe6679f4a --- /dev/null +++ b/typedapi/esdsl/delayeddatacheckconfig.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _delayedDataCheckConfig struct { + v *types.DelayedDataCheckConfig +} + +func NewDelayedDataCheckConfig(enabled bool) *_delayedDataCheckConfig { + + tmp := &_delayedDataCheckConfig{v: types.NewDelayedDataCheckConfig()} + + tmp.Enabled(enabled) + + return tmp + +} + +// The window of time that is searched for late data. This window of time ends +// with the latest finalized bucket. +// It defaults to null, which causes an appropriate `check_window` to be +// calculated when the real-time datafeed runs. +// In particular, the default `check_window` span calculation is based on the +// maximum of `2h` or `8 * bucket_span`. +func (s *_delayedDataCheckConfig) CheckWindow(duration types.DurationVariant) *_delayedDataCheckConfig { + + s.v.CheckWindow = *duration.DurationCaster() + + return s +} + +// Specifies whether the datafeed periodically checks for delayed data. +func (s *_delayedDataCheckConfig) Enabled(enabled bool) *_delayedDataCheckConfig { + + s.v.Enabled = enabled + + return s +} + +func (s *_delayedDataCheckConfig) DelayedDataCheckConfigCaster() *types.DelayedDataCheckConfig { + return s.v +} diff --git a/typedapi/esdsl/deleteaction.go b/typedapi/esdsl/deleteaction.go new file mode 100644 index 0000000000..ae5b0a9a6d --- /dev/null +++ b/typedapi/esdsl/deleteaction.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _deleteAction struct { + v *types.DeleteAction +} + +func NewDeleteAction() *_deleteAction { + + return &_deleteAction{v: types.NewDeleteAction()} + +} + +func (s *_deleteAction) DeleteSearchableSnapshot(deletesearchablesnapshot bool) *_deleteAction { + + s.v.DeleteSearchableSnapshot = &deletesearchablesnapshot + + return s +} + +func (s *_deleteAction) DeleteActionCaster() *types.DeleteAction { + return s.v +} diff --git a/typedapi/esdsl/deleteoperation.go b/typedapi/esdsl/deleteoperation.go new file mode 100644 index 0000000000..9f93a97909 --- /dev/null +++ b/typedapi/esdsl/deleteoperation.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" +) + +type _deleteOperation struct { + v *types.DeleteOperation +} + +// Remove the specified document from the index. +func NewDeleteOperation() *_deleteOperation { + + return &_deleteOperation{v: types.NewDeleteOperation()} + +} + +// The document ID. +func (s *_deleteOperation) Id_(id string) *_deleteOperation { + + s.v.Id_ = &id + + return s +} + +func (s *_deleteOperation) IfPrimaryTerm(ifprimaryterm int64) *_deleteOperation { + + s.v.IfPrimaryTerm = &ifprimaryterm + + return s +} + +func (s *_deleteOperation) IfSeqNo(sequencenumber int64) *_deleteOperation { + + s.v.IfSeqNo = &sequencenumber + + return s +} + +// The name of the index or index alias to perform the action on. +func (s *_deleteOperation) Index_(indexname string) *_deleteOperation { + + s.v.Index_ = &indexname + + return s +} + +// A custom value used to route operations to a specific shard. +func (s *_deleteOperation) Routing(routing string) *_deleteOperation { + + s.v.Routing = &routing + + return s +} + +func (s *_deleteOperation) Version(versionnumber int64) *_deleteOperation { + + s.v.Version = &versionnumber + + return s +} + +func (s *_deleteOperation) VersionType(versiontype versiontype.VersionType) *_deleteOperation { + + s.v.VersionType = &versiontype + return s +} + +func (s *_deleteOperation) OperationContainerCaster() *types.OperationContainer { + container := types.NewOperationContainer() + + container.Delete = s.v + + return container +} + +func (s *_deleteOperation) DeleteOperationCaster() *types.DeleteOperation { + return s.v +} diff --git a/typedapi/esdsl/delimitedpayloadtokenfilter.go b/typedapi/esdsl/delimitedpayloadtokenfilter.go new file mode 100644 index 0000000000..5aec4a35a3 --- /dev/null +++ b/typedapi/esdsl/delimitedpayloadtokenfilter.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/delimitedpayloadencoding" +) + +type _delimitedPayloadTokenFilter struct { + v *types.DelimitedPayloadTokenFilter +} + +func NewDelimitedPayloadTokenFilter() *_delimitedPayloadTokenFilter { + + return &_delimitedPayloadTokenFilter{v: types.NewDelimitedPayloadTokenFilter()} + +} + +func (s *_delimitedPayloadTokenFilter) Delimiter(delimiter string) *_delimitedPayloadTokenFilter { + + s.v.Delimiter = &delimiter + + return s +} + +func (s *_delimitedPayloadTokenFilter) Encoding(encoding delimitedpayloadencoding.DelimitedPayloadEncoding) *_delimitedPayloadTokenFilter { + + s.v.Encoding = &encoding + return s +} + +func (s *_delimitedPayloadTokenFilter) Version(versionstring string) *_delimitedPayloadTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_delimitedPayloadTokenFilter) DelimitedPayloadTokenFilterCaster() *types.DelimitedPayloadTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/densevectorindexoptions.go b/typedapi/esdsl/densevectorindexoptions.go new file mode 100644 index 0000000000..ef6e418451 --- /dev/null +++ b/typedapi/esdsl/densevectorindexoptions.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/densevectorindexoptionstype" +) + +type _denseVectorIndexOptions struct { + v *types.DenseVectorIndexOptions +} + +func NewDenseVectorIndexOptions(type_ densevectorindexoptionstype.DenseVectorIndexOptionsType) *_denseVectorIndexOptions { + + tmp := &_denseVectorIndexOptions{v: types.NewDenseVectorIndexOptions()} + + tmp.Type(type_) + + return tmp + +} + +// The confidence interval to use when quantizing the vectors. Can be any value +// between and including `0.90` and +// `1.0` or exactly `0`. When the value is `0`, this indicates that dynamic +// quantiles should be calculated for +// optimized quantization. When between `0.90` and `1.0`, this value restricts +// the values used when calculating +// the quantization thresholds. +// +// For example, a value of `0.95` will only use the middle `95%` of the values +// when calculating the quantization +// thresholds (e.g. the highest and lowest `2.5%` of values will be ignored). +// +// Defaults to `1/(dims + 1)` for `int8` quantized vectors and `0` for `int4` +// for dynamic quantile calculation. +// +// Only applicable to `int8_hnsw`, `int4_hnsw`, `int8_flat`, and `int4_flat` +// index types. +func (s *_denseVectorIndexOptions) ConfidenceInterval(confidenceinterval float32) *_denseVectorIndexOptions { + + s.v.ConfidenceInterval = &confidenceinterval + + return s +} + +// The number of candidates to track while assembling the list of nearest +// neighbors for each new node. +// +// Only applicable to `hnsw`, `int8_hnsw`, and `int4_hnsw` index types. +func (s *_denseVectorIndexOptions) EfConstruction(efconstruction int) *_denseVectorIndexOptions { + + s.v.EfConstruction = &efconstruction + + return s +} + +// The number of neighbors each node will be connected to in the HNSW graph. +// +// Only applicable to `hnsw`, `int8_hnsw`, and `int4_hnsw` index types. +func (s *_denseVectorIndexOptions) M(m int) *_denseVectorIndexOptions { + + s.v.M = &m + + return s +} + +// The type of kNN algorithm to use. +func (s *_denseVectorIndexOptions) Type(type_ densevectorindexoptionstype.DenseVectorIndexOptionsType) *_denseVectorIndexOptions { + + s.v.Type = type_ + return s +} + +func (s *_denseVectorIndexOptions) DenseVectorIndexOptionsCaster() *types.DenseVectorIndexOptions { + return s.v +} diff --git a/typedapi/esdsl/densevectorproperty.go b/typedapi/esdsl/densevectorproperty.go new file mode 100644 index 0000000000..7fb0c570a6 --- /dev/null +++ b/typedapi/esdsl/densevectorproperty.go @@ -0,0 +1,194 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/densevectorelementtype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/densevectorsimilarity" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _denseVectorProperty struct { + v *types.DenseVectorProperty +} + +func NewDenseVectorProperty() *_denseVectorProperty { + + return &_denseVectorProperty{v: types.NewDenseVectorProperty()} + +} + +// Number of vector dimensions. Can't exceed `4096`. If `dims` is not specified, +// it will be set to the length of +// the first vector added to the field. +func (s *_denseVectorProperty) Dims(dims int) *_denseVectorProperty { + + s.v.Dims = &dims + + return s +} + +func (s *_denseVectorProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_denseVectorProperty { + + s.v.Dynamic = &dynamic + return s +} + +// The data type used to encode vectors. The supported data types are `float` +// (default), `byte`, and `bit`. +func (s *_denseVectorProperty) ElementType(elementtype densevectorelementtype.DenseVectorElementType) *_denseVectorProperty { + + s.v.ElementType = &elementtype + return s +} + +func (s *_denseVectorProperty) Fields(fields map[string]types.Property) *_denseVectorProperty { + + s.v.Fields = fields + return s +} + +func (s *_denseVectorProperty) AddField(key string, value types.PropertyVariant) *_denseVectorProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_denseVectorProperty) IgnoreAbove(ignoreabove int) *_denseVectorProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +// If `true`, you can search this field using the kNN search API. +func (s *_denseVectorProperty) Index(index bool) *_denseVectorProperty { + + s.v.Index = &index + + return s +} + +// An optional section that configures the kNN indexing algorithm. The HNSW +// algorithm has two internal parameters +// that influence how the data structure is built. These can be adjusted to +// improve the accuracy of results, at the +// expense of slower indexing speed. +// +// This parameter can only be specified when `index` is `true`. +func (s *_denseVectorProperty) IndexOptions(indexoptions types.DenseVectorIndexOptionsVariant) *_denseVectorProperty { + + s.v.IndexOptions = indexoptions.DenseVectorIndexOptionsCaster() + + return s +} + +// Metadata about the field. +func (s *_denseVectorProperty) Meta(meta map[string]string) *_denseVectorProperty { + + s.v.Meta = meta + return s +} + +func (s *_denseVectorProperty) AddMeta(key string, value string) *_denseVectorProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_denseVectorProperty) Properties(properties map[string]types.Property) *_denseVectorProperty { + + s.v.Properties = properties + return s +} + +func (s *_denseVectorProperty) AddProperty(key string, value types.PropertyVariant) *_denseVectorProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +// The vector similarity metric to use in kNN search. +// +// Documents are ranked by their vector field's similarity to the query vector. +// The `_score` of each document will +// be derived from the similarity, in a way that ensures scores are positive and +// that a larger score corresponds +// to a higher ranking. +// +// Defaults to `l2_norm` when `element_type` is `bit` otherwise defaults to +// `cosine`. +// +// `bit` vectors only support `l2_norm` as their similarity metric. +// +// This parameter can only be specified when `index` is `true`. +func (s *_denseVectorProperty) Similarity(similarity densevectorsimilarity.DenseVectorSimilarity) *_denseVectorProperty { + + s.v.Similarity = &similarity + return s +} + +func (s *_denseVectorProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_denseVectorProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_denseVectorProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_denseVectorProperty) DenseVectorPropertyCaster() *types.DenseVectorProperty { + return s.v +} diff --git a/typedapi/esdsl/dependency.go b/typedapi/esdsl/dependency.go new file mode 100644 index 0000000000..c8378537cc --- /dev/null +++ b/typedapi/esdsl/dependency.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dependency struct { + v *types.Dependency +} + +func NewDependency(field string) *_dependency { + + tmp := &_dependency{v: types.NewDependency()} + + tmp.Field(field) + + return tmp + +} + +func (s *_dependency) Field(field string) *_dependency { + + s.v.Field = field + + return s +} + +func (s *_dependency) Value(scalarvalue types.ScalarValueVariant) *_dependency { + + s.v.Value = *scalarvalue.ScalarValueCaster() + + return s +} + +func (s *_dependency) DependencyCaster() *types.Dependency { + return s.v +} diff --git a/typedapi/esdsl/derivativeaggregation.go b/typedapi/esdsl/derivativeaggregation.go new file mode 100644 index 0000000000..b1622dc9b8 --- /dev/null +++ b/typedapi/esdsl/derivativeaggregation.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _derivativeAggregation struct { + v *types.DerivativeAggregation +} + +// A parent pipeline aggregation which calculates the derivative of a specified +// metric in a parent `histogram` or `date_histogram` aggregation. +func NewDerivativeAggregation() *_derivativeAggregation { + + return &_derivativeAggregation{v: types.NewDerivativeAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_derivativeAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_derivativeAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_derivativeAggregation) Format(format string) *_derivativeAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_derivativeAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_derivativeAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +func (s *_derivativeAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Derivative = s.v + + return container +} + +func (s *_derivativeAggregation) DerivativeAggregationCaster() *types.DerivativeAggregation { + return s.v +} diff --git a/typedapi/esdsl/detectionrule.go b/typedapi/esdsl/detectionrule.go new file mode 100644 index 0000000000..43b6cfba19 --- /dev/null +++ b/typedapi/esdsl/detectionrule.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ruleaction" +) + +type _detectionRule struct { + v *types.DetectionRule +} + +func NewDetectionRule() *_detectionRule { + + return &_detectionRule{v: types.NewDetectionRule()} + +} + +// The set of actions to be triggered when the rule applies. If more than one +// action is specified the effects of all actions are combined. +func (s *_detectionRule) Actions(actions ...ruleaction.RuleAction) *_detectionRule { + + for _, v := range actions { + + s.v.Actions = append(s.v.Actions, v) + + } + return s +} + +// An array of numeric conditions when the rule applies. A rule must either have +// a non-empty scope or at least one condition. Multiple conditions are combined +// together with a logical AND. +func (s *_detectionRule) Conditions(conditions ...types.RuleConditionVariant) *_detectionRule { + + for _, v := range conditions { + + s.v.Conditions = append(s.v.Conditions, *v.RuleConditionCaster()) + + } + return s +} + +// A scope of series where the rule applies. A rule must either have a non-empty +// scope or at least one condition. By default, the scope includes all series. +// Scoping is allowed for any of the fields that are also specified in +// `by_field_name`, `over_field_name`, or `partition_field_name`. +func (s *_detectionRule) Scope(scope map[string]types.FilterRef) *_detectionRule { + + s.v.Scope = scope + return s +} + +func (s *_detectionRule) AddScope(key string, value types.FilterRefVariant) *_detectionRule { + + var tmp map[string]types.FilterRef + if s.v.Scope == nil { + s.v.Scope = make(map[string]types.FilterRef) + } else { + tmp = s.v.Scope + } + + tmp[key] = *value.FilterRefCaster() + + s.v.Scope = tmp + return s +} + +func (s *_detectionRule) DetectionRuleCaster() *types.DetectionRule { + return s.v +} diff --git a/typedapi/esdsl/detector.go b/typedapi/esdsl/detector.go new file mode 100644 index 0000000000..0a82c1d9f4 --- /dev/null +++ b/typedapi/esdsl/detector.go @@ -0,0 +1,138 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/excludefrequent" +) + +type _detector struct { + v *types.Detector +} + +func NewDetector() *_detector { + + return &_detector{v: types.NewDetector()} + +} + +// The field used to split the data. In particular, this property is used for +// analyzing the splits with respect to their own history. It is used for +// finding unusual values in the context of the split. +func (s *_detector) ByFieldName(field string) *_detector { + + s.v.ByFieldName = &field + + return s +} + +// Custom rules enable you to customize the way detectors operate. For example, +// a rule may dictate conditions under which results should be skipped. Kibana +// refers to custom rules as job rules. +func (s *_detector) CustomRules(customrules ...types.DetectionRuleVariant) *_detector { + + for _, v := range customrules { + + s.v.CustomRules = append(s.v.CustomRules, *v.DetectionRuleCaster()) + + } + return s +} + +// A description of the detector. +func (s *_detector) DetectorDescription(detectordescription string) *_detector { + + s.v.DetectorDescription = &detectordescription + + return s +} + +// A unique identifier for the detector. This identifier is based on the order +// of the detectors in the `analysis_config`, starting at zero. If you specify a +// value for this property, it is ignored. +func (s *_detector) DetectorIndex(detectorindex int) *_detector { + + s.v.DetectorIndex = &detectorindex + + return s +} + +// If set, frequent entities are excluded from influencing the anomaly results. +// Entities can be considered frequent over time or frequent in a population. If +// you are working with both over and by fields, you can set `exclude_frequent` +// to `all` for both fields, or to `by` or `over` for those specific fields. +func (s *_detector) ExcludeFrequent(excludefrequent excludefrequent.ExcludeFrequent) *_detector { + + s.v.ExcludeFrequent = &excludefrequent + return s +} + +// The field that the detector uses in the function. If you use an event rate +// function such as count or rare, do not specify this field. The `field_name` +// cannot contain double quotes or backslashes. +func (s *_detector) FieldName(field string) *_detector { + + s.v.FieldName = &field + + return s +} + +// The analysis function that is used. For example, `count`, `rare`, `mean`, +// `min`, `max`, or `sum`. +func (s *_detector) Function(function string) *_detector { + + s.v.Function = &function + + return s +} + +// The field used to split the data. In particular, this property is used for +// analyzing the splits with respect to the history of all splits. It is used +// for finding unusual values in the population of all splits. +func (s *_detector) OverFieldName(field string) *_detector { + + s.v.OverFieldName = &field + + return s +} + +// The field used to segment the analysis. When you use this property, you have +// completely independent baselines for each value of this field. +func (s *_detector) PartitionFieldName(field string) *_detector { + + s.v.PartitionFieldName = &field + + return s +} + +// Defines whether a new series is used as the null series when there is no +// value for the by or partition fields. +func (s *_detector) UseNull(usenull bool) *_detector { + + s.v.UseNull = &usenull + + return s +} + +func (s *_detector) DetectorCaster() *types.Detector { + return s.v +} diff --git a/typedapi/esdsl/detectorupdate.go b/typedapi/esdsl/detectorupdate.go new file mode 100644 index 0000000000..cb08951249 --- /dev/null +++ b/typedapi/esdsl/detectorupdate.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _detectorUpdate struct { + v *types.DetectorUpdate +} + +func NewDetectorUpdate(detectorindex int) *_detectorUpdate { + + tmp := &_detectorUpdate{v: types.NewDetectorUpdate()} + + tmp.DetectorIndex(detectorindex) + + return tmp + +} + +// An array of custom rule objects, which enable you to customize the way +// detectors operate. +// For example, a rule may dictate to the detector conditions under which +// results should be skipped. +// Kibana refers to custom rules as job rules. +func (s *_detectorUpdate) CustomRules(customrules ...types.DetectionRuleVariant) *_detectorUpdate { + + for _, v := range customrules { + + s.v.CustomRules = append(s.v.CustomRules, *v.DetectionRuleCaster()) + + } + return s +} + +// A description of the detector. +func (s *_detectorUpdate) Description(description string) *_detectorUpdate { + + s.v.Description = &description + + return s +} + +// A unique identifier for the detector. +// This identifier is based on the order of the detectors in the +// `analysis_config`, starting at zero. +func (s *_detectorUpdate) DetectorIndex(detectorindex int) *_detectorUpdate { + + s.v.DetectorIndex = detectorindex + + return s +} + +func (s *_detectorUpdate) DetectorUpdateCaster() *types.DetectorUpdate { + return s.v +} diff --git a/typedapi/esdsl/dictionarydecompoundertokenfilter.go b/typedapi/esdsl/dictionarydecompoundertokenfilter.go new file mode 100644 index 0000000000..fb868061cd --- /dev/null +++ b/typedapi/esdsl/dictionarydecompoundertokenfilter.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dictionaryDecompounderTokenFilter struct { + v *types.DictionaryDecompounderTokenFilter +} + +func NewDictionaryDecompounderTokenFilter() *_dictionaryDecompounderTokenFilter { + + return &_dictionaryDecompounderTokenFilter{v: types.NewDictionaryDecompounderTokenFilter()} + +} + +func (s *_dictionaryDecompounderTokenFilter) HyphenationPatternsPath(hyphenationpatternspath string) *_dictionaryDecompounderTokenFilter { + + s.v.HyphenationPatternsPath = &hyphenationpatternspath + + return s +} + +func (s *_dictionaryDecompounderTokenFilter) MaxSubwordSize(maxsubwordsize int) *_dictionaryDecompounderTokenFilter { + + s.v.MaxSubwordSize = &maxsubwordsize + + return s +} + +func (s *_dictionaryDecompounderTokenFilter) MinSubwordSize(minsubwordsize int) *_dictionaryDecompounderTokenFilter { + + s.v.MinSubwordSize = &minsubwordsize + + return s +} + +func (s *_dictionaryDecompounderTokenFilter) MinWordSize(minwordsize int) *_dictionaryDecompounderTokenFilter { + + s.v.MinWordSize = &minwordsize + + return s +} + +func (s *_dictionaryDecompounderTokenFilter) OnlyLongestMatch(onlylongestmatch bool) *_dictionaryDecompounderTokenFilter { + + s.v.OnlyLongestMatch = &onlylongestmatch + + return s +} + +func (s *_dictionaryDecompounderTokenFilter) Version(versionstring string) *_dictionaryDecompounderTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_dictionaryDecompounderTokenFilter) WordList(wordlists ...string) *_dictionaryDecompounderTokenFilter { + + for _, v := range wordlists { + + s.v.WordList = append(s.v.WordList, v) + + } + return s +} + +func (s *_dictionaryDecompounderTokenFilter) WordListPath(wordlistpath string) *_dictionaryDecompounderTokenFilter { + + s.v.WordListPath = &wordlistpath + + return s +} + +func (s *_dictionaryDecompounderTokenFilter) DictionaryDecompounderTokenFilterCaster() *types.DictionaryDecompounderTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/directgenerator.go b/typedapi/esdsl/directgenerator.go new file mode 100644 index 0000000000..845a7efc44 --- /dev/null +++ b/typedapi/esdsl/directgenerator.go @@ -0,0 +1,148 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestmode" +) + +type _directGenerator struct { + v *types.DirectGenerator +} + +func NewDirectGenerator() *_directGenerator { + + return &_directGenerator{v: types.NewDirectGenerator()} + +} + +// The field to fetch the candidate suggestions from. +// Needs to be set globally or per suggestion. +func (s *_directGenerator) Field(field string) *_directGenerator { + + s.v.Field = field + + return s +} + +// The maximum edit distance candidate suggestions can have in order to be +// considered as a suggestion. +// Can only be `1` or `2`. +func (s *_directGenerator) MaxEdits(maxedits int) *_directGenerator { + + s.v.MaxEdits = &maxedits + + return s +} + +// A factor that is used to multiply with the shard_size in order to inspect +// more candidate spelling corrections on the shard level. +// Can improve accuracy at the cost of performance. +func (s *_directGenerator) MaxInspections(maxinspections float32) *_directGenerator { + + s.v.MaxInspections = &maxinspections + + return s +} + +// The maximum threshold in number of documents in which a suggest text token +// can exist in order to be included. +// This can be used to exclude high frequency terms — which are usually spelled +// correctly — from being spellchecked. +// Can be a relative percentage number (for example `0.4`) or an absolute number +// to represent document frequencies. +// If a value higher than 1 is specified, then fractional can not be specified. +func (s *_directGenerator) MaxTermFreq(maxtermfreq float32) *_directGenerator { + + s.v.MaxTermFreq = &maxtermfreq + + return s +} + +// The minimal threshold in number of documents a suggestion should appear in. +// This can improve quality by only suggesting high frequency terms. +// Can be specified as an absolute number or as a relative percentage of number +// of documents. +// If a value higher than 1 is specified, the number cannot be fractional. +func (s *_directGenerator) MinDocFreq(mindocfreq float32) *_directGenerator { + + s.v.MinDocFreq = &mindocfreq + + return s +} + +// The minimum length a suggest text term must have in order to be included. +func (s *_directGenerator) MinWordLength(minwordlength int) *_directGenerator { + + s.v.MinWordLength = &minwordlength + + return s +} + +// A filter (analyzer) that is applied to each of the generated tokens before +// they are passed to the actual phrase scorer. +func (s *_directGenerator) PostFilter(postfilter string) *_directGenerator { + + s.v.PostFilter = &postfilter + + return s +} + +// A filter (analyzer) that is applied to each of the tokens passed to this +// candidate generator. +// This filter is applied to the original token before candidates are generated. +func (s *_directGenerator) PreFilter(prefilter string) *_directGenerator { + + s.v.PreFilter = &prefilter + + return s +} + +// The number of minimal prefix characters that must match in order be a +// candidate suggestions. +// Increasing this number improves spellcheck performance. +func (s *_directGenerator) PrefixLength(prefixlength int) *_directGenerator { + + s.v.PrefixLength = &prefixlength + + return s +} + +// The maximum corrections to be returned per suggest text token. +func (s *_directGenerator) Size(size int) *_directGenerator { + + s.v.Size = &size + + return s +} + +// Controls what suggestions are included on the suggestions generated on each +// shard. +func (s *_directGenerator) SuggestMode(suggestmode suggestmode.SuggestMode) *_directGenerator { + + s.v.SuggestMode = &suggestmode + return s +} + +func (s *_directGenerator) DirectGeneratorCaster() *types.DirectGenerator { + return s.v +} diff --git a/typedapi/esdsl/dismaxquery.go b/typedapi/esdsl/dismaxquery.go new file mode 100644 index 0000000000..6178d6ecee --- /dev/null +++ b/typedapi/esdsl/dismaxquery.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _disMaxQuery struct { + v *types.DisMaxQuery +} + +// Returns documents matching one or more wrapped queries, called query clauses +// or clauses. +// If a returned document matches multiple query clauses, the `dis_max` query +// assigns the document the highest relevance score from any matching clause, +// plus a tie breaking increment for any additional matching subqueries. +func NewDisMaxQuery() *_disMaxQuery { + + return &_disMaxQuery{v: types.NewDisMaxQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_disMaxQuery) Boost(boost float32) *_disMaxQuery { + + s.v.Boost = &boost + + return s +} + +// One or more query clauses. +// Returned documents must match one or more of these queries. +// If a document matches multiple queries, Elasticsearch uses the highest +// relevance score. +func (s *_disMaxQuery) Queries(queries ...types.QueryVariant) *_disMaxQuery { + + for _, v := range queries { + + s.v.Queries = append(s.v.Queries, *v.QueryCaster()) + + } + return s +} + +func (s *_disMaxQuery) QueryName_(queryname_ string) *_disMaxQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Floating point number between 0 and 1.0 used to increase the relevance scores +// of documents matching multiple query clauses. +func (s *_disMaxQuery) TieBreaker(tiebreaker types.Float64) *_disMaxQuery { + + s.v.TieBreaker = &tiebreaker + + return s +} + +func (s *_disMaxQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.DisMax = s.v + + return container +} + +func (s *_disMaxQuery) DisMaxQueryCaster() *types.DisMaxQuery { + return s.v +} diff --git a/typedapi/esdsl/dissectprocessor.go b/typedapi/esdsl/dissectprocessor.go new file mode 100644 index 0000000000..2633369126 --- /dev/null +++ b/typedapi/esdsl/dissectprocessor.go @@ -0,0 +1,129 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dissectProcessor struct { + v *types.DissectProcessor +} + +// Extracts structured fields out of a single text field by matching the text +// field against a delimiter-based pattern. +func NewDissectProcessor(pattern string) *_dissectProcessor { + + tmp := &_dissectProcessor{v: types.NewDissectProcessor()} + + tmp.Pattern(pattern) + + return tmp + +} + +// The character(s) that separate the appended fields. +func (s *_dissectProcessor) AppendSeparator(appendseparator string) *_dissectProcessor { + + s.v.AppendSeparator = &appendseparator + + return s +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_dissectProcessor) Description(description string) *_dissectProcessor { + + s.v.Description = &description + + return s +} + +// The field to dissect. +func (s *_dissectProcessor) Field(field string) *_dissectProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_dissectProcessor) If(if_ types.ScriptVariant) *_dissectProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_dissectProcessor) IgnoreFailure(ignorefailure bool) *_dissectProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist or is `null`, the processor quietly +// exits without modifying the document. +func (s *_dissectProcessor) IgnoreMissing(ignoremissing bool) *_dissectProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_dissectProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_dissectProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// The pattern to apply to the field. +func (s *_dissectProcessor) Pattern(pattern string) *_dissectProcessor { + + s.v.Pattern = pattern + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_dissectProcessor) Tag(tag string) *_dissectProcessor { + + s.v.Tag = &tag + + return s +} + +func (s *_dissectProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Dissect = s.v + + return container +} + +func (s *_dissectProcessor) DissectProcessorCaster() *types.DissectProcessor { + return s.v +} diff --git a/typedapi/esdsl/distancefeaturequery.go b/typedapi/esdsl/distancefeaturequery.go new file mode 100644 index 0000000000..d378ed36e2 --- /dev/null +++ b/typedapi/esdsl/distancefeaturequery.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _distanceFeatureQuery struct { + v types.DistanceFeatureQuery +} + +func NewDistanceFeatureQuery() *_distanceFeatureQuery { + return &_distanceFeatureQuery{v: nil} +} + +func (u *_distanceFeatureQuery) UntypedDistanceFeatureQuery(untypeddistancefeaturequery types.UntypedDistanceFeatureQueryVariant) *_distanceFeatureQuery { + + u.v = &untypeddistancefeaturequery + + return u +} + +// Interface implementation for UntypedDistanceFeatureQuery in DistanceFeatureQuery union +func (u *_untypedDistanceFeatureQuery) DistanceFeatureQueryCaster() *types.DistanceFeatureQuery { + t := types.DistanceFeatureQuery(u.v) + return &t +} + +func (u *_distanceFeatureQuery) GeoDistanceFeatureQuery(geodistancefeaturequery types.GeoDistanceFeatureQueryVariant) *_distanceFeatureQuery { + + u.v = &geodistancefeaturequery + + return u +} + +// Interface implementation for GeoDistanceFeatureQuery in DistanceFeatureQuery union +func (u *_geoDistanceFeatureQuery) DistanceFeatureQueryCaster() *types.DistanceFeatureQuery { + t := types.DistanceFeatureQuery(u.v) + return &t +} + +func (u *_distanceFeatureQuery) DateDistanceFeatureQuery(datedistancefeaturequery types.DateDistanceFeatureQueryVariant) *_distanceFeatureQuery { + + u.v = &datedistancefeaturequery + + return u +} + +// Interface implementation for DateDistanceFeatureQuery in DistanceFeatureQuery union +func (u *_dateDistanceFeatureQuery) DistanceFeatureQueryCaster() *types.DistanceFeatureQuery { + t := types.DistanceFeatureQuery(u.v) + return &t +} + +func (u *_distanceFeatureQuery) DistanceFeatureQueryCaster() *types.DistanceFeatureQuery { + return &u.v +} diff --git a/typedapi/esdsl/diversifiedsampleraggregation.go b/typedapi/esdsl/diversifiedsampleraggregation.go new file mode 100644 index 0000000000..512fc87bf0 --- /dev/null +++ b/typedapi/esdsl/diversifiedsampleraggregation.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sampleraggregationexecutionhint" +) + +type _diversifiedSamplerAggregation struct { + v *types.DiversifiedSamplerAggregation +} + +// A filtering aggregation used to limit any sub aggregations' processing to a +// sample of the top-scoring documents. +// Similar to the `sampler` aggregation, but adds the ability to limit the +// number of matches that share a common value. +func NewDiversifiedSamplerAggregation() *_diversifiedSamplerAggregation { + + return &_diversifiedSamplerAggregation{v: types.NewDiversifiedSamplerAggregation()} + +} + +// The type of value used for de-duplication. +func (s *_diversifiedSamplerAggregation) ExecutionHint(executionhint sampleraggregationexecutionhint.SamplerAggregationExecutionHint) *_diversifiedSamplerAggregation { + + s.v.ExecutionHint = &executionhint + return s +} + +// The field used to provide values used for de-duplication. +func (s *_diversifiedSamplerAggregation) Field(field string) *_diversifiedSamplerAggregation { + + s.v.Field = &field + + return s +} + +// Limits how many documents are permitted per choice of de-duplicating value. +func (s *_diversifiedSamplerAggregation) MaxDocsPerValue(maxdocspervalue int) *_diversifiedSamplerAggregation { + + s.v.MaxDocsPerValue = &maxdocspervalue + + return s +} + +func (s *_diversifiedSamplerAggregation) Script(script types.ScriptVariant) *_diversifiedSamplerAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +// Limits how many top-scoring documents are collected in the sample processed +// on each shard. +func (s *_diversifiedSamplerAggregation) ShardSize(shardsize int) *_diversifiedSamplerAggregation { + + s.v.ShardSize = &shardsize + + return s +} + +func (s *_diversifiedSamplerAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.DiversifiedSampler = s.v + + return container +} + +func (s *_diversifiedSamplerAggregation) DiversifiedSamplerAggregationCaster() *types.DiversifiedSamplerAggregation { + return s.v +} diff --git a/typedapi/esdsl/document.go b/typedapi/esdsl/document.go new file mode 100644 index 0000000000..31fddc84d8 --- /dev/null +++ b/typedapi/esdsl/document.go @@ -0,0 +1,70 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _document struct { + v *types.Document +} + +func NewDocument(source_ json.RawMessage) *_document { + + tmp := &_document{v: types.NewDocument()} + + tmp.Source_(source_) + + return tmp + +} + +// Unique identifier for the document. +// This ID must be unique within the `_index`. +func (s *_document) Id_(id string) *_document { + + s.v.Id_ = &id + + return s +} + +// Name of the index containing the document. +func (s *_document) Index_(indexname string) *_document { + + s.v.Index_ = &indexname + + return s +} + +// JSON body for the document. +func (s *_document) Source_(source_ json.RawMessage) *_document { + + s.v.Source_ = source_ + + return s +} + +func (s *_document) DocumentCaster() *types.Document { + return s.v +} diff --git a/typedapi/esdsl/documentrating.go b/typedapi/esdsl/documentrating.go new file mode 100644 index 0000000000..a92e2c6bae --- /dev/null +++ b/typedapi/esdsl/documentrating.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _documentRating struct { + v *types.DocumentRating +} + +func NewDocumentRating(rating int) *_documentRating { + + tmp := &_documentRating{v: types.NewDocumentRating()} + + tmp.Rating(rating) + + return tmp + +} + +// The document ID. +func (s *_documentRating) Id_(id string) *_documentRating { + + s.v.Id_ = id + + return s +} + +// The document’s index. For data streams, this should be the document’s backing +// index. +func (s *_documentRating) Index_(indexname string) *_documentRating { + + s.v.Index_ = indexname + + return s +} + +// The document’s relevance with regard to this search request. +func (s *_documentRating) Rating(rating int) *_documentRating { + + s.v.Rating = rating + + return s +} + +func (s *_documentRating) DocumentRatingCaster() *types.DocumentRating { + return s.v +} diff --git a/typedapi/esdsl/dotexpanderprocessor.go b/typedapi/esdsl/dotexpanderprocessor.go new file mode 100644 index 0000000000..3af085e3c4 --- /dev/null +++ b/typedapi/esdsl/dotexpanderprocessor.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dotExpanderProcessor struct { + v *types.DotExpanderProcessor +} + +// Expands a field with dots into an object field. +// This processor allows fields with dots in the name to be accessible by other +// processors in the pipeline. +// Otherwise these fields can’t be accessed by any processor. +func NewDotExpanderProcessor() *_dotExpanderProcessor { + + return &_dotExpanderProcessor{v: types.NewDotExpanderProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_dotExpanderProcessor) Description(description string) *_dotExpanderProcessor { + + s.v.Description = &description + + return s +} + +// The field to expand into an object field. +// If set to `*`, all top-level fields will be expanded. +func (s *_dotExpanderProcessor) Field(field string) *_dotExpanderProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_dotExpanderProcessor) If(if_ types.ScriptVariant) *_dotExpanderProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_dotExpanderProcessor) IgnoreFailure(ignorefailure bool) *_dotExpanderProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// Handle failures for the processor. +func (s *_dotExpanderProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_dotExpanderProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Controls the behavior when there is already an existing nested object that +// conflicts with the expanded field. +// When `false`, the processor will merge conflicts by combining the old and the +// new values into an array. +// When `true`, the value from the expanded field will overwrite the existing +// value. +func (s *_dotExpanderProcessor) Override(override bool) *_dotExpanderProcessor { + + s.v.Override = &override + + return s +} + +// The field that contains the field to expand. +// Only required if the field to expand is part another object field, because +// the `field` option can only understand leaf fields. +func (s *_dotExpanderProcessor) Path(path string) *_dotExpanderProcessor { + + s.v.Path = &path + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_dotExpanderProcessor) Tag(tag string) *_dotExpanderProcessor { + + s.v.Tag = &tag + + return s +} + +func (s *_dotExpanderProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.DotExpander = s.v + + return container +} + +func (s *_dotExpanderProcessor) DotExpanderProcessorCaster() *types.DotExpanderProcessor { + return s.v +} diff --git a/typedapi/esdsl/doublenumberproperty.go b/typedapi/esdsl/doublenumberproperty.go new file mode 100644 index 0000000000..8bbef173ad --- /dev/null +++ b/typedapi/esdsl/doublenumberproperty.go @@ -0,0 +1,220 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" +) + +type _doubleNumberProperty struct { + v *types.DoubleNumberProperty +} + +func NewDoubleNumberProperty() *_doubleNumberProperty { + + return &_doubleNumberProperty{v: types.NewDoubleNumberProperty()} + +} + +func (s *_doubleNumberProperty) Boost(boost types.Float64) *_doubleNumberProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_doubleNumberProperty) Coerce(coerce bool) *_doubleNumberProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_doubleNumberProperty) CopyTo(fields ...string) *_doubleNumberProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_doubleNumberProperty) DocValues(docvalues bool) *_doubleNumberProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_doubleNumberProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_doubleNumberProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_doubleNumberProperty) Fields(fields map[string]types.Property) *_doubleNumberProperty { + + s.v.Fields = fields + return s +} + +func (s *_doubleNumberProperty) AddField(key string, value types.PropertyVariant) *_doubleNumberProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_doubleNumberProperty) IgnoreAbove(ignoreabove int) *_doubleNumberProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_doubleNumberProperty) IgnoreMalformed(ignoremalformed bool) *_doubleNumberProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_doubleNumberProperty) Index(index bool) *_doubleNumberProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_doubleNumberProperty) Meta(meta map[string]string) *_doubleNumberProperty { + + s.v.Meta = meta + return s +} + +func (s *_doubleNumberProperty) AddMeta(key string, value string) *_doubleNumberProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_doubleNumberProperty) NullValue(nullvalue types.Float64) *_doubleNumberProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_doubleNumberProperty) OnScriptError(onscripterror onscripterror.OnScriptError) *_doubleNumberProperty { + + s.v.OnScriptError = &onscripterror + return s +} + +func (s *_doubleNumberProperty) Properties(properties map[string]types.Property) *_doubleNumberProperty { + + s.v.Properties = properties + return s +} + +func (s *_doubleNumberProperty) AddProperty(key string, value types.PropertyVariant) *_doubleNumberProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_doubleNumberProperty) Script(script types.ScriptVariant) *_doubleNumberProperty { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_doubleNumberProperty) Store(store bool) *_doubleNumberProperty { + + s.v.Store = &store + + return s +} + +func (s *_doubleNumberProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_doubleNumberProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_doubleNumberProperty) TimeSeriesDimension(timeseriesdimension bool) *_doubleNumberProperty { + + s.v.TimeSeriesDimension = ×eriesdimension + + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_doubleNumberProperty) TimeSeriesMetric(timeseriesmetric timeseriesmetrictype.TimeSeriesMetricType) *_doubleNumberProperty { + + s.v.TimeSeriesMetric = ×eriesmetric + return s +} + +func (s *_doubleNumberProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_doubleNumberProperty) DoubleNumberPropertyCaster() *types.DoubleNumberProperty { + return s.v +} diff --git a/typedapi/esdsl/doublerangeproperty.go b/typedapi/esdsl/doublerangeproperty.go new file mode 100644 index 0000000000..47d68a0be2 --- /dev/null +++ b/typedapi/esdsl/doublerangeproperty.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _doubleRangeProperty struct { + v *types.DoubleRangeProperty +} + +func NewDoubleRangeProperty() *_doubleRangeProperty { + + return &_doubleRangeProperty{v: types.NewDoubleRangeProperty()} + +} + +func (s *_doubleRangeProperty) Boost(boost types.Float64) *_doubleRangeProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_doubleRangeProperty) Coerce(coerce bool) *_doubleRangeProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_doubleRangeProperty) CopyTo(fields ...string) *_doubleRangeProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_doubleRangeProperty) DocValues(docvalues bool) *_doubleRangeProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_doubleRangeProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_doubleRangeProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_doubleRangeProperty) Fields(fields map[string]types.Property) *_doubleRangeProperty { + + s.v.Fields = fields + return s +} + +func (s *_doubleRangeProperty) AddField(key string, value types.PropertyVariant) *_doubleRangeProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_doubleRangeProperty) IgnoreAbove(ignoreabove int) *_doubleRangeProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_doubleRangeProperty) Index(index bool) *_doubleRangeProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_doubleRangeProperty) Meta(meta map[string]string) *_doubleRangeProperty { + + s.v.Meta = meta + return s +} + +func (s *_doubleRangeProperty) AddMeta(key string, value string) *_doubleRangeProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_doubleRangeProperty) Properties(properties map[string]types.Property) *_doubleRangeProperty { + + s.v.Properties = properties + return s +} + +func (s *_doubleRangeProperty) AddProperty(key string, value types.PropertyVariant) *_doubleRangeProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_doubleRangeProperty) Store(store bool) *_doubleRangeProperty { + + s.v.Store = &store + + return s +} + +func (s *_doubleRangeProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_doubleRangeProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_doubleRangeProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_doubleRangeProperty) DoubleRangePropertyCaster() *types.DoubleRangeProperty { + return s.v +} diff --git a/typedapi/esdsl/downsampleaction.go b/typedapi/esdsl/downsampleaction.go new file mode 100644 index 0000000000..1537a8d65c --- /dev/null +++ b/typedapi/esdsl/downsampleaction.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _downsampleAction struct { + v *types.DownsampleAction +} + +func NewDownsampleAction() *_downsampleAction { + + return &_downsampleAction{v: types.NewDownsampleAction()} + +} + +func (s *_downsampleAction) FixedInterval(durationlarge string) *_downsampleAction { + + s.v.FixedInterval = durationlarge + + return s +} + +func (s *_downsampleAction) WaitTimeout(duration types.DurationVariant) *_downsampleAction { + + s.v.WaitTimeout = *duration.DurationCaster() + + return s +} + +func (s *_downsampleAction) DownsampleActionCaster() *types.DownsampleAction { + return s.v +} diff --git a/typedapi/esdsl/downsampleconfig.go b/typedapi/esdsl/downsampleconfig.go new file mode 100644 index 0000000000..5774efaf30 --- /dev/null +++ b/typedapi/esdsl/downsampleconfig.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _downsampleConfig struct { + v *types.DownsampleConfig +} + +func NewDownsampleConfig() *_downsampleConfig { + + return &_downsampleConfig{v: types.NewDownsampleConfig()} + +} + +// The interval at which to aggregate the original time series index. +func (s *_downsampleConfig) FixedInterval(durationlarge string) *_downsampleConfig { + + s.v.FixedInterval = durationlarge + + return s +} + +func (s *_downsampleConfig) DownsampleConfigCaster() *types.DownsampleConfig { + return s.v +} diff --git a/typedapi/esdsl/downsamplinground.go b/typedapi/esdsl/downsamplinground.go new file mode 100644 index 0000000000..a6d6ea7655 --- /dev/null +++ b/typedapi/esdsl/downsamplinground.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _downsamplingRound struct { + v *types.DownsamplingRound +} + +func NewDownsamplingRound(config types.DownsampleConfigVariant) *_downsamplingRound { + + tmp := &_downsamplingRound{v: types.NewDownsamplingRound()} + + tmp.Config(config) + + return tmp + +} + +// The duration since rollover when this downsampling round should execute +func (s *_downsamplingRound) After(duration types.DurationVariant) *_downsamplingRound { + + s.v.After = *duration.DurationCaster() + + return s +} + +// The downsample configuration to execute. +func (s *_downsamplingRound) Config(config types.DownsampleConfigVariant) *_downsamplingRound { + + s.v.Config = *config.DownsampleConfigCaster() + + return s +} + +func (s *_downsamplingRound) DownsamplingRoundCaster() *types.DownsamplingRound { + return s.v +} diff --git a/typedapi/esdsl/dropprocessor.go b/typedapi/esdsl/dropprocessor.go new file mode 100644 index 0000000000..ae8f120f91 --- /dev/null +++ b/typedapi/esdsl/dropprocessor.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dropProcessor struct { + v *types.DropProcessor +} + +// Drops the document without raising any errors. +// This is useful to prevent the document from getting indexed based on some +// condition. +func NewDropProcessor() *_dropProcessor { + + return &_dropProcessor{v: types.NewDropProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_dropProcessor) Description(description string) *_dropProcessor { + + s.v.Description = &description + + return s +} + +// Conditionally execute the processor. +func (s *_dropProcessor) If(if_ types.ScriptVariant) *_dropProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_dropProcessor) IgnoreFailure(ignorefailure bool) *_dropProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// Handle failures for the processor. +func (s *_dropProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_dropProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_dropProcessor) Tag(tag string) *_dropProcessor { + + s.v.Tag = &tag + + return s +} + +func (s *_dropProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Drop = s.v + + return container +} + +func (s *_dropProcessor) DropProcessorCaster() *types.DropProcessor { + return s.v +} diff --git a/typedapi/esdsl/duration.go b/typedapi/esdsl/duration.go new file mode 100644 index 0000000000..b846da5265 --- /dev/null +++ b/typedapi/esdsl/duration.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _duration struct { + v types.Duration +} + +func NewDuration() *_duration { + return &_duration{v: nil} +} + +func (u *_duration) String(string string) *_duration { + + u.v = &string + + return u +} + +func (u *_duration) DurationCaster() *types.Duration { + return &u.v +} diff --git a/typedapi/esdsl/durationvalueunitmillis.go b/typedapi/esdsl/durationvalueunitmillis.go new file mode 100644 index 0000000000..c5b4d3592c --- /dev/null +++ b/typedapi/esdsl/durationvalueunitmillis.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _durationValueUnitMillis struct { + v types.DurationValueUnitMillis +} + +func NewDurationValueUnitMillis(durationvalueunitmillis int64) *_durationValueUnitMillis { + + return &_durationValueUnitMillis{v: types.DurationValueUnitMillis(durationvalueunitmillis)} + +} + +func (u *_durationValueUnitMillis) DurationValueUnitMillisCaster() *types.DurationValueUnitMillis { + return &u.v +} diff --git a/typedapi/esdsl/dutchanalyzer.go b/typedapi/esdsl/dutchanalyzer.go new file mode 100644 index 0000000000..7540ec0825 --- /dev/null +++ b/typedapi/esdsl/dutchanalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _dutchAnalyzer struct { + v *types.DutchAnalyzer +} + +func NewDutchAnalyzer() *_dutchAnalyzer { + + return &_dutchAnalyzer{v: types.NewDutchAnalyzer()} + +} + +func (s *_dutchAnalyzer) StemExclusion(stemexclusions ...string) *_dutchAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_dutchAnalyzer) Stopwords(stopwords ...string) *_dutchAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_dutchAnalyzer) StopwordsPath(stopwordspath string) *_dutchAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_dutchAnalyzer) DutchAnalyzerCaster() *types.DutchAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/dynamicproperty.go b/typedapi/esdsl/dynamicproperty.go new file mode 100644 index 0000000000..123908ae79 --- /dev/null +++ b/typedapi/esdsl/dynamicproperty.go @@ -0,0 +1,307 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" +) + +type _dynamicProperty struct { + v *types.DynamicProperty +} + +func NewDynamicProperty() *_dynamicProperty { + + return &_dynamicProperty{v: types.NewDynamicProperty()} + +} + +func (s *_dynamicProperty) Analyzer(analyzer string) *_dynamicProperty { + + s.v.Analyzer = &analyzer + + return s +} + +func (s *_dynamicProperty) Boost(boost types.Float64) *_dynamicProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_dynamicProperty) Coerce(coerce bool) *_dynamicProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_dynamicProperty) CopyTo(fields ...string) *_dynamicProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_dynamicProperty) DocValues(docvalues bool) *_dynamicProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_dynamicProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_dynamicProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_dynamicProperty) EagerGlobalOrdinals(eagerglobalordinals bool) *_dynamicProperty { + + s.v.EagerGlobalOrdinals = &eagerglobalordinals + + return s +} + +func (s *_dynamicProperty) Enabled(enabled bool) *_dynamicProperty { + + s.v.Enabled = &enabled + + return s +} + +func (s *_dynamicProperty) Fields(fields map[string]types.Property) *_dynamicProperty { + + s.v.Fields = fields + return s +} + +func (s *_dynamicProperty) AddField(key string, value types.PropertyVariant) *_dynamicProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_dynamicProperty) Format(format string) *_dynamicProperty { + + s.v.Format = &format + + return s +} + +func (s *_dynamicProperty) IgnoreAbove(ignoreabove int) *_dynamicProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_dynamicProperty) IgnoreMalformed(ignoremalformed bool) *_dynamicProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_dynamicProperty) Index(index bool) *_dynamicProperty { + + s.v.Index = &index + + return s +} + +func (s *_dynamicProperty) IndexOptions(indexoptions indexoptions.IndexOptions) *_dynamicProperty { + + s.v.IndexOptions = &indexoptions + return s +} + +func (s *_dynamicProperty) IndexPhrases(indexphrases bool) *_dynamicProperty { + + s.v.IndexPhrases = &indexphrases + + return s +} + +func (s *_dynamicProperty) IndexPrefixes(indexprefixes types.TextIndexPrefixesVariant) *_dynamicProperty { + + s.v.IndexPrefixes = indexprefixes.TextIndexPrefixesCaster() + + return s +} + +func (s *_dynamicProperty) Locale(locale string) *_dynamicProperty { + + s.v.Locale = &locale + + return s +} + +// Metadata about the field. +func (s *_dynamicProperty) Meta(meta map[string]string) *_dynamicProperty { + + s.v.Meta = meta + return s +} + +func (s *_dynamicProperty) AddMeta(key string, value string) *_dynamicProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_dynamicProperty) Norms(norms bool) *_dynamicProperty { + + s.v.Norms = &norms + + return s +} + +func (s *_dynamicProperty) NullValue(fieldvalue types.FieldValueVariant) *_dynamicProperty { + + s.v.NullValue = *fieldvalue.FieldValueCaster() + + return s +} + +func (s *_dynamicProperty) OnScriptError(onscripterror onscripterror.OnScriptError) *_dynamicProperty { + + s.v.OnScriptError = &onscripterror + return s +} + +func (s *_dynamicProperty) PositionIncrementGap(positionincrementgap int) *_dynamicProperty { + + s.v.PositionIncrementGap = &positionincrementgap + + return s +} + +func (s *_dynamicProperty) PrecisionStep(precisionstep int) *_dynamicProperty { + + s.v.PrecisionStep = &precisionstep + + return s +} + +func (s *_dynamicProperty) Properties(properties map[string]types.Property) *_dynamicProperty { + + s.v.Properties = properties + return s +} + +func (s *_dynamicProperty) AddProperty(key string, value types.PropertyVariant) *_dynamicProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_dynamicProperty) Script(script types.ScriptVariant) *_dynamicProperty { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_dynamicProperty) SearchAnalyzer(searchanalyzer string) *_dynamicProperty { + + s.v.SearchAnalyzer = &searchanalyzer + + return s +} + +func (s *_dynamicProperty) SearchQuoteAnalyzer(searchquoteanalyzer string) *_dynamicProperty { + + s.v.SearchQuoteAnalyzer = &searchquoteanalyzer + + return s +} + +func (s *_dynamicProperty) Store(store bool) *_dynamicProperty { + + s.v.Store = &store + + return s +} + +func (s *_dynamicProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_dynamicProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_dynamicProperty) TermVector(termvector termvectoroption.TermVectorOption) *_dynamicProperty { + + s.v.TermVector = &termvector + return s +} + +func (s *_dynamicProperty) TimeSeriesMetric(timeseriesmetric timeseriesmetrictype.TimeSeriesMetricType) *_dynamicProperty { + + s.v.TimeSeriesMetric = ×eriesmetric + return s +} + +func (s *_dynamicProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_dynamicProperty) DynamicPropertyCaster() *types.DynamicProperty { + return s.v +} diff --git a/typedapi/esdsl/dynamictemplate.go b/typedapi/esdsl/dynamictemplate.go new file mode 100644 index 0000000000..18bb3a39cf --- /dev/null +++ b/typedapi/esdsl/dynamictemplate.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/matchtype" +) + +type _dynamicTemplate struct { + v *types.DynamicTemplate +} + +func NewDynamicTemplate() *_dynamicTemplate { + return &_dynamicTemplate{v: types.NewDynamicTemplate()} +} + +// AdditionalDynamicTemplateProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_dynamicTemplate) AdditionalDynamicTemplateProperty(key string, value json.RawMessage) *_dynamicTemplate { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalDynamicTemplateProperty = tmp + return s +} + +func (s *_dynamicTemplate) Mapping(property types.PropertyVariant) *_dynamicTemplate { + + s.v.Mapping = *property.PropertyCaster() + + return s +} + +func (s *_dynamicTemplate) Match(matches ...string) *_dynamicTemplate { + + s.v.Match = make([]string, len(matches)) + s.v.Match = matches + + return s +} + +func (s *_dynamicTemplate) MatchMappingType(matchmappingtypes ...string) *_dynamicTemplate { + + s.v.MatchMappingType = make([]string, len(matchmappingtypes)) + s.v.MatchMappingType = matchmappingtypes + + return s +} + +func (s *_dynamicTemplate) MatchPattern(matchpattern matchtype.MatchType) *_dynamicTemplate { + + s.v.MatchPattern = &matchpattern + return s +} + +func (s *_dynamicTemplate) PathMatch(pathmatches ...string) *_dynamicTemplate { + + s.v.PathMatch = make([]string, len(pathmatches)) + s.v.PathMatch = pathmatches + + return s +} + +func (s *_dynamicTemplate) PathUnmatch(pathunmatches ...string) *_dynamicTemplate { + + s.v.PathUnmatch = make([]string, len(pathunmatches)) + s.v.PathUnmatch = pathunmatches + + return s +} + +func (s *_dynamicTemplate) Runtime(runtime types.RuntimeFieldVariant) *_dynamicTemplate { + + s.v.Runtime = runtime.RuntimeFieldCaster() + + return s +} + +func (s *_dynamicTemplate) Unmatch(unmatches ...string) *_dynamicTemplate { + + s.v.Unmatch = make([]string, len(unmatches)) + s.v.Unmatch = unmatches + + return s +} + +func (s *_dynamicTemplate) UnmatchMappingType(unmatchmappingtypes ...string) *_dynamicTemplate { + + s.v.UnmatchMappingType = make([]string, len(unmatchmappingtypes)) + s.v.UnmatchMappingType = unmatchmappingtypes + + return s +} + +func (s *_dynamicTemplate) DynamicTemplateCaster() *types.DynamicTemplate { + return s.v +} diff --git a/typedapi/esdsl/edgengramtokenfilter.go b/typedapi/esdsl/edgengramtokenfilter.go new file mode 100644 index 0000000000..c81502010c --- /dev/null +++ b/typedapi/esdsl/edgengramtokenfilter.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/edgengramside" +) + +type _edgeNGramTokenFilter struct { + v *types.EdgeNGramTokenFilter +} + +func NewEdgeNGramTokenFilter() *_edgeNGramTokenFilter { + + return &_edgeNGramTokenFilter{v: types.NewEdgeNGramTokenFilter()} + +} + +func (s *_edgeNGramTokenFilter) MaxGram(maxgram int) *_edgeNGramTokenFilter { + + s.v.MaxGram = &maxgram + + return s +} + +func (s *_edgeNGramTokenFilter) MinGram(mingram int) *_edgeNGramTokenFilter { + + s.v.MinGram = &mingram + + return s +} + +func (s *_edgeNGramTokenFilter) PreserveOriginal(stringifiedboolean types.StringifiedbooleanVariant) *_edgeNGramTokenFilter { + + s.v.PreserveOriginal = *stringifiedboolean.StringifiedbooleanCaster() + + return s +} + +func (s *_edgeNGramTokenFilter) Side(side edgengramside.EdgeNGramSide) *_edgeNGramTokenFilter { + + s.v.Side = &side + return s +} + +func (s *_edgeNGramTokenFilter) Version(versionstring string) *_edgeNGramTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_edgeNGramTokenFilter) EdgeNGramTokenFilterCaster() *types.EdgeNGramTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/edgengramtokenizer.go b/typedapi/esdsl/edgengramtokenizer.go new file mode 100644 index 0000000000..8582d5dbb2 --- /dev/null +++ b/typedapi/esdsl/edgengramtokenizer.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenchar" +) + +type _edgeNGramTokenizer struct { + v *types.EdgeNGramTokenizer +} + +func NewEdgeNGramTokenizer() *_edgeNGramTokenizer { + + return &_edgeNGramTokenizer{v: types.NewEdgeNGramTokenizer()} + +} + +func (s *_edgeNGramTokenizer) CustomTokenChars(customtokenchars string) *_edgeNGramTokenizer { + + s.v.CustomTokenChars = &customtokenchars + + return s +} + +func (s *_edgeNGramTokenizer) MaxGram(maxgram int) *_edgeNGramTokenizer { + + s.v.MaxGram = &maxgram + + return s +} + +func (s *_edgeNGramTokenizer) MinGram(mingram int) *_edgeNGramTokenizer { + + s.v.MinGram = &mingram + + return s +} + +func (s *_edgeNGramTokenizer) TokenChars(tokenchars ...tokenchar.TokenChar) *_edgeNGramTokenizer { + + for _, v := range tokenchars { + + s.v.TokenChars = append(s.v.TokenChars, v) + + } + return s +} + +func (s *_edgeNGramTokenizer) Version(versionstring string) *_edgeNGramTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_edgeNGramTokenizer) EdgeNGramTokenizerCaster() *types.EdgeNGramTokenizer { + return s.v +} diff --git a/typedapi/esdsl/elisiontokenfilter.go b/typedapi/esdsl/elisiontokenfilter.go new file mode 100644 index 0000000000..6661acaab5 --- /dev/null +++ b/typedapi/esdsl/elisiontokenfilter.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _elisionTokenFilter struct { + v *types.ElisionTokenFilter +} + +func NewElisionTokenFilter() *_elisionTokenFilter { + + return &_elisionTokenFilter{v: types.NewElisionTokenFilter()} + +} + +func (s *_elisionTokenFilter) Articles(articles ...string) *_elisionTokenFilter { + + for _, v := range articles { + + s.v.Articles = append(s.v.Articles, v) + + } + return s +} + +func (s *_elisionTokenFilter) ArticlesCase(stringifiedboolean types.StringifiedbooleanVariant) *_elisionTokenFilter { + + s.v.ArticlesCase = *stringifiedboolean.StringifiedbooleanCaster() + + return s +} + +func (s *_elisionTokenFilter) ArticlesPath(articlespath string) *_elisionTokenFilter { + + s.v.ArticlesPath = &articlespath + + return s +} + +func (s *_elisionTokenFilter) Version(versionstring string) *_elisionTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_elisionTokenFilter) ElisionTokenFilterCaster() *types.ElisionTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/emailaction.go b/typedapi/esdsl/emailaction.go new file mode 100644 index 0000000000..5eb3baef83 --- /dev/null +++ b/typedapi/esdsl/emailaction.go @@ -0,0 +1,138 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/emailpriority" +) + +type _emailAction struct { + v *types.EmailAction +} + +func NewEmailAction(subject string) *_emailAction { + + tmp := &_emailAction{v: types.NewEmailAction()} + + tmp.Subject(subject) + + return tmp + +} + +func (s *_emailAction) Attachments(attachments map[string]types.EmailAttachmentContainer) *_emailAction { + + s.v.Attachments = attachments + return s +} + +func (s *_emailAction) AddAttachment(key string, value types.EmailAttachmentContainerVariant) *_emailAction { + + var tmp map[string]types.EmailAttachmentContainer + if s.v.Attachments == nil { + s.v.Attachments = make(map[string]types.EmailAttachmentContainer) + } else { + tmp = s.v.Attachments + } + + tmp[key] = *value.EmailAttachmentContainerCaster() + + s.v.Attachments = tmp + return s +} + +func (s *_emailAction) Bcc(bccs ...string) *_emailAction { + + s.v.Bcc = make([]string, len(bccs)) + s.v.Bcc = bccs + + return s +} + +func (s *_emailAction) Body(body types.EmailBodyVariant) *_emailAction { + + s.v.Body = body.EmailBodyCaster() + + return s +} + +func (s *_emailAction) Cc(ccs ...string) *_emailAction { + + s.v.Cc = make([]string, len(ccs)) + s.v.Cc = ccs + + return s +} + +func (s *_emailAction) From(from string) *_emailAction { + + s.v.From = &from + + return s +} + +func (s *_emailAction) Id(id string) *_emailAction { + + s.v.Id = &id + + return s +} + +func (s *_emailAction) Priority(priority emailpriority.EmailPriority) *_emailAction { + + s.v.Priority = &priority + return s +} + +func (s *_emailAction) ReplyTo(replytos ...string) *_emailAction { + + s.v.ReplyTo = make([]string, len(replytos)) + s.v.ReplyTo = replytos + + return s +} + +func (s *_emailAction) SentDate(datetime types.DateTimeVariant) *_emailAction { + + s.v.SentDate = *datetime.DateTimeCaster() + + return s +} + +func (s *_emailAction) Subject(subject string) *_emailAction { + + s.v.Subject = subject + + return s +} + +func (s *_emailAction) To(tos ...string) *_emailAction { + + s.v.To = make([]string, len(tos)) + s.v.To = tos + + return s +} + +func (s *_emailAction) EmailActionCaster() *types.EmailAction { + return s.v +} diff --git a/typedapi/esdsl/emailattachmentcontainer.go b/typedapi/esdsl/emailattachmentcontainer.go new file mode 100644 index 0000000000..10193e0e6c --- /dev/null +++ b/typedapi/esdsl/emailattachmentcontainer.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _emailAttachmentContainer struct { + v *types.EmailAttachmentContainer +} + +func NewEmailAttachmentContainer() *_emailAttachmentContainer { + return &_emailAttachmentContainer{v: types.NewEmailAttachmentContainer()} +} + +// AdditionalEmailAttachmentContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_emailAttachmentContainer) AdditionalEmailAttachmentContainerProperty(key string, value json.RawMessage) *_emailAttachmentContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalEmailAttachmentContainerProperty = tmp + return s +} + +func (s *_emailAttachmentContainer) Data(data types.DataEmailAttachmentVariant) *_emailAttachmentContainer { + + s.v.Data = data.DataEmailAttachmentCaster() + + return s +} + +func (s *_emailAttachmentContainer) Http(http types.HttpEmailAttachmentVariant) *_emailAttachmentContainer { + + s.v.Http = http.HttpEmailAttachmentCaster() + + return s +} + +func (s *_emailAttachmentContainer) Reporting(reporting types.ReportingEmailAttachmentVariant) *_emailAttachmentContainer { + + s.v.Reporting = reporting.ReportingEmailAttachmentCaster() + + return s +} + +func (s *_emailAttachmentContainer) EmailAttachmentContainerCaster() *types.EmailAttachmentContainer { + return s.v +} diff --git a/typedapi/esdsl/emailbody.go b/typedapi/esdsl/emailbody.go new file mode 100644 index 0000000000..b417cd533d --- /dev/null +++ b/typedapi/esdsl/emailbody.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _emailBody struct { + v *types.EmailBody +} + +func NewEmailBody() *_emailBody { + + return &_emailBody{v: types.NewEmailBody()} + +} + +func (s *_emailBody) Html(html string) *_emailBody { + + s.v.Html = &html + + return s +} + +func (s *_emailBody) Text(text string) *_emailBody { + + s.v.Text = &text + + return s +} + +func (s *_emailBody) EmailBodyCaster() *types.EmailBody { + return s.v +} diff --git a/typedapi/esdsl/emptyobject.go b/typedapi/esdsl/emptyobject.go new file mode 100644 index 0000000000..d32c15a295 --- /dev/null +++ b/typedapi/esdsl/emptyobject.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _emptyObject struct { + v *types.EmptyObject +} + +func NewEmptyObject() *_emptyObject { + + return &_emptyObject{v: types.NewEmptyObject()} + +} + +func (s *_emptyObject) EmptyObjectCaster() *types.EmptyObject { + return s.v +} diff --git a/typedapi/esdsl/englishanalyzer.go b/typedapi/esdsl/englishanalyzer.go new file mode 100644 index 0000000000..2d5f0e311a --- /dev/null +++ b/typedapi/esdsl/englishanalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _englishAnalyzer struct { + v *types.EnglishAnalyzer +} + +func NewEnglishAnalyzer() *_englishAnalyzer { + + return &_englishAnalyzer{v: types.NewEnglishAnalyzer()} + +} + +func (s *_englishAnalyzer) StemExclusion(stemexclusions ...string) *_englishAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_englishAnalyzer) Stopwords(stopwords ...string) *_englishAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_englishAnalyzer) StopwordsPath(stopwordspath string) *_englishAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_englishAnalyzer) EnglishAnalyzerCaster() *types.EnglishAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/enrichpolicy.go b/typedapi/esdsl/enrichpolicy.go new file mode 100644 index 0000000000..bbcd2cc19e --- /dev/null +++ b/typedapi/esdsl/enrichpolicy.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _enrichPolicy struct { + v *types.EnrichPolicy +} + +func NewEnrichPolicy() *_enrichPolicy { + + return &_enrichPolicy{v: types.NewEnrichPolicy()} + +} + +func (s *_enrichPolicy) ElasticsearchVersion(elasticsearchversion string) *_enrichPolicy { + + s.v.ElasticsearchVersion = &elasticsearchversion + + return s +} + +func (s *_enrichPolicy) EnrichFields(fields ...string) *_enrichPolicy { + + s.v.EnrichFields = fields + + return s +} + +func (s *_enrichPolicy) Indices(indices ...string) *_enrichPolicy { + + s.v.Indices = indices + + return s +} + +func (s *_enrichPolicy) MatchField(field string) *_enrichPolicy { + + s.v.MatchField = field + + return s +} + +func (s *_enrichPolicy) Name(name string) *_enrichPolicy { + + s.v.Name = &name + + return s +} + +func (s *_enrichPolicy) Query(query types.QueryVariant) *_enrichPolicy { + + s.v.Query = query.QueryCaster() + + return s +} + +func (s *_enrichPolicy) EnrichPolicyCaster() *types.EnrichPolicy { + return s.v +} diff --git a/typedapi/esdsl/enrichprocessor.go b/typedapi/esdsl/enrichprocessor.go new file mode 100644 index 0000000000..4481df5f0d --- /dev/null +++ b/typedapi/esdsl/enrichprocessor.go @@ -0,0 +1,166 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoshaperelation" +) + +type _enrichProcessor struct { + v *types.EnrichProcessor +} + +// The `enrich` processor can enrich documents with data from another index. +func NewEnrichProcessor(policyname string) *_enrichProcessor { + + tmp := &_enrichProcessor{v: types.NewEnrichProcessor()} + + tmp.PolicyName(policyname) + + return tmp + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_enrichProcessor) Description(description string) *_enrichProcessor { + + s.v.Description = &description + + return s +} + +// The field in the input document that matches the policies match_field used to +// retrieve the enrichment data. +// Supports template snippets. +func (s *_enrichProcessor) Field(field string) *_enrichProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_enrichProcessor) If(if_ types.ScriptVariant) *_enrichProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_enrichProcessor) IgnoreFailure(ignorefailure bool) *_enrichProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist, the processor quietly exits without +// modifying the document. +func (s *_enrichProcessor) IgnoreMissing(ignoremissing bool) *_enrichProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// The maximum number of matched documents to include under the configured +// target field. +// The `target_field` will be turned into a json array if `max_matches` is +// higher than 1, otherwise `target_field` will become a json object. +// In order to avoid documents getting too large, the maximum allowed value is +// 128. +func (s *_enrichProcessor) MaxMatches(maxmatches int) *_enrichProcessor { + + s.v.MaxMatches = &maxmatches + + return s +} + +// Handle failures for the processor. +func (s *_enrichProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_enrichProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// If processor will update fields with pre-existing non-null-valued field. +// When set to `false`, such fields will not be touched. +func (s *_enrichProcessor) Override(override bool) *_enrichProcessor { + + s.v.Override = &override + + return s +} + +// The name of the enrich policy to use. +func (s *_enrichProcessor) PolicyName(policyname string) *_enrichProcessor { + + s.v.PolicyName = policyname + + return s +} + +// A spatial relation operator used to match the geoshape of incoming documents +// to documents in the enrich index. +// This option is only used for `geo_match` enrich policy types. +func (s *_enrichProcessor) ShapeRelation(shaperelation geoshaperelation.GeoShapeRelation) *_enrichProcessor { + + s.v.ShapeRelation = &shaperelation + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_enrichProcessor) Tag(tag string) *_enrichProcessor { + + s.v.Tag = &tag + + return s +} + +// Field added to incoming documents to contain enrich data. This field contains +// both the `match_field` and `enrich_fields` specified in the enrich policy. +// Supports template snippets. +func (s *_enrichProcessor) TargetField(field string) *_enrichProcessor { + + s.v.TargetField = field + + return s +} + +func (s *_enrichProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Enrich = s.v + + return container +} + +func (s *_enrichProcessor) EnrichProcessorCaster() *types.EnrichProcessor { + return s.v +} diff --git a/typedapi/esdsl/ensemble.go b/typedapi/esdsl/ensemble.go new file mode 100644 index 0000000000..c4a1fc4134 --- /dev/null +++ b/typedapi/esdsl/ensemble.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _ensemble struct { + v *types.Ensemble +} + +func NewEnsemble() *_ensemble { + + return &_ensemble{v: types.NewEnsemble()} + +} + +func (s *_ensemble) AggregateOutput(aggregateoutput types.AggregateOutputVariant) *_ensemble { + + s.v.AggregateOutput = aggregateoutput.AggregateOutputCaster() + + return s +} + +func (s *_ensemble) ClassificationLabels(classificationlabels ...string) *_ensemble { + + for _, v := range classificationlabels { + + s.v.ClassificationLabels = append(s.v.ClassificationLabels, v) + + } + return s +} + +func (s *_ensemble) FeatureNames(featurenames ...string) *_ensemble { + + for _, v := range featurenames { + + s.v.FeatureNames = append(s.v.FeatureNames, v) + + } + return s +} + +func (s *_ensemble) TargetType(targettype string) *_ensemble { + + s.v.TargetType = &targettype + + return s +} + +func (s *_ensemble) TrainedModels(trainedmodels ...types.TrainedModelVariant) *_ensemble { + + for _, v := range trainedmodels { + + s.v.TrainedModels = append(s.v.TrainedModels, *v.TrainedModelCaster()) + + } + return s +} + +func (s *_ensemble) EnsembleCaster() *types.Ensemble { + return s.v +} diff --git a/typedapi/esdsl/epochtimeunitmillis.go b/typedapi/esdsl/epochtimeunitmillis.go new file mode 100644 index 0000000000..fbfac5350c --- /dev/null +++ b/typedapi/esdsl/epochtimeunitmillis.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _epochTimeUnitMillis struct { + v types.EpochTimeUnitMillis +} + +func NewEpochTimeUnitMillis(epochtimeunitmillis int64) *_epochTimeUnitMillis { + + return &_epochTimeUnitMillis{v: types.EpochTimeUnitMillis(epochtimeunitmillis)} + +} + +func (u *_epochTimeUnitMillis) EpochTimeUnitMillisCaster() *types.EpochTimeUnitMillis { + return &u.v +} diff --git a/typedapi/esdsl/epochtimeunitseconds.go b/typedapi/esdsl/epochtimeunitseconds.go new file mode 100644 index 0000000000..31b58cce91 --- /dev/null +++ b/typedapi/esdsl/epochtimeunitseconds.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _epochTimeUnitSeconds struct { + v types.EpochTimeUnitSeconds +} + +func NewEpochTimeUnitSeconds(epochtimeunitseconds int64) *_epochTimeUnitSeconds { + + return &_epochTimeUnitSeconds{v: types.EpochTimeUnitSeconds(epochtimeunitseconds)} + +} + +func (u *_epochTimeUnitSeconds) EpochTimeUnitSecondsCaster() *types.EpochTimeUnitSeconds { + return &u.v +} diff --git a/typedapi/esdsl/estoniananalyzer.go b/typedapi/esdsl/estoniananalyzer.go new file mode 100644 index 0000000000..bcec75f992 --- /dev/null +++ b/typedapi/esdsl/estoniananalyzer.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _estonianAnalyzer struct { + v *types.EstonianAnalyzer +} + +func NewEstonianAnalyzer() *_estonianAnalyzer { + + return &_estonianAnalyzer{v: types.NewEstonianAnalyzer()} + +} + +func (s *_estonianAnalyzer) Stopwords(stopwords ...string) *_estonianAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_estonianAnalyzer) StopwordsPath(stopwordspath string) *_estonianAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_estonianAnalyzer) EstonianAnalyzerCaster() *types.EstonianAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/ewmamodelsettings.go b/typedapi/esdsl/ewmamodelsettings.go new file mode 100644 index 0000000000..2347ffea84 --- /dev/null +++ b/typedapi/esdsl/ewmamodelsettings.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _ewmaModelSettings struct { + v *types.EwmaModelSettings +} + +func NewEwmaModelSettings() *_ewmaModelSettings { + + return &_ewmaModelSettings{v: types.NewEwmaModelSettings()} + +} + +func (s *_ewmaModelSettings) Alpha(alpha float32) *_ewmaModelSettings { + + s.v.Alpha = &alpha + + return s +} + +func (s *_ewmaModelSettings) EwmaModelSettingsCaster() *types.EwmaModelSettings { + return s.v +} diff --git a/typedapi/esdsl/ewmamovingaverageaggregation.go b/typedapi/esdsl/ewmamovingaverageaggregation.go new file mode 100644 index 0000000000..a105c920ad --- /dev/null +++ b/typedapi/esdsl/ewmamovingaverageaggregation.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _ewmaMovingAverageAggregation struct { + v *types.EwmaMovingAverageAggregation +} + +func NewEwmaMovingAverageAggregation(settings types.EwmaModelSettingsVariant) *_ewmaMovingAverageAggregation { + + tmp := &_ewmaMovingAverageAggregation{v: types.NewEwmaMovingAverageAggregation()} + + tmp.Settings(settings) + + return tmp + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_ewmaMovingAverageAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_ewmaMovingAverageAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_ewmaMovingAverageAggregation) Format(format string) *_ewmaMovingAverageAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_ewmaMovingAverageAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_ewmaMovingAverageAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +func (s *_ewmaMovingAverageAggregation) Minimize(minimize bool) *_ewmaMovingAverageAggregation { + + s.v.Minimize = &minimize + + return s +} + +func (s *_ewmaMovingAverageAggregation) Predict(predict int) *_ewmaMovingAverageAggregation { + + s.v.Predict = &predict + + return s +} + +func (s *_ewmaMovingAverageAggregation) Settings(settings types.EwmaModelSettingsVariant) *_ewmaMovingAverageAggregation { + + s.v.Settings = *settings.EwmaModelSettingsCaster() + + return s +} + +func (s *_ewmaMovingAverageAggregation) Window(window int) *_ewmaMovingAverageAggregation { + + s.v.Window = &window + + return s +} + +func (s *_ewmaMovingAverageAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.MovingAvg = s.v + + return container +} + +func (s *_ewmaMovingAverageAggregation) EwmaMovingAverageAggregationCaster() *types.EwmaMovingAverageAggregation { + return s.v +} diff --git a/typedapi/esdsl/executionstate.go b/typedapi/esdsl/executionstate.go new file mode 100644 index 0000000000..9879725e31 --- /dev/null +++ b/typedapi/esdsl/executionstate.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _executionState struct { + v *types.ExecutionState +} + +func NewExecutionState(successful bool) *_executionState { + + tmp := &_executionState{v: types.NewExecutionState()} + + tmp.Successful(successful) + + return tmp + +} + +func (s *_executionState) Reason(reason string) *_executionState { + + s.v.Reason = &reason + + return s +} + +func (s *_executionState) Successful(successful bool) *_executionState { + + s.v.Successful = successful + + return s +} + +func (s *_executionState) Timestamp(datetime types.DateTimeVariant) *_executionState { + + s.v.Timestamp = *datetime.DateTimeCaster() + + return s +} + +func (s *_executionState) ExecutionStateCaster() *types.ExecutionState { + return s.v +} diff --git a/typedapi/esdsl/existsquery.go b/typedapi/esdsl/existsquery.go new file mode 100644 index 0000000000..bd0b08b68e --- /dev/null +++ b/typedapi/esdsl/existsquery.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _existsQuery struct { + v *types.ExistsQuery +} + +// Returns documents that contain an indexed value for a field. +func NewExistsQuery() *_existsQuery { + + return &_existsQuery{v: types.NewExistsQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_existsQuery) Boost(boost float32) *_existsQuery { + + s.v.Boost = &boost + + return s +} + +// Name of the field you wish to search. +func (s *_existsQuery) Field(field string) *_existsQuery { + + s.v.Field = field + + return s +} + +func (s *_existsQuery) QueryName_(queryname_ string) *_existsQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_existsQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.Exists = s.v + + return container +} + +func (s *_existsQuery) ApiKeyQueryContainerCaster() *types.ApiKeyQueryContainer { + container := types.NewApiKeyQueryContainer() + + container.Exists = s.v + + return container +} + +func (s *_existsQuery) RoleQueryContainerCaster() *types.RoleQueryContainer { + container := types.NewRoleQueryContainer() + + container.Exists = s.v + + return container +} + +func (s *_existsQuery) UserQueryContainerCaster() *types.UserQueryContainer { + container := types.NewUserQueryContainer() + + container.Exists = s.v + + return container +} + +func (s *_existsQuery) ExistsQueryCaster() *types.ExistsQuery { + return s.v +} diff --git a/typedapi/esdsl/expandwildcards.go b/typedapi/esdsl/expandwildcards.go new file mode 100644 index 0000000000..8096d52cee --- /dev/null +++ b/typedapi/esdsl/expandwildcards.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" +) + +// This is provide an API for type alias. +type _expandWildcards struct { + v types.ExpandWildcards +} + +func NewExpandWildcards() *_expandWildcards { + return &_expandWildcards{v: []expandwildcard.ExpandWildcard{}} +} + +func (u *_expandWildcards) ExpandWildcardsCaster() *types.ExpandWildcards { + return &u.v +} diff --git a/typedapi/esdsl/explorecontrols.go b/typedapi/esdsl/explorecontrols.go new file mode 100644 index 0000000000..445fa9e5cd --- /dev/null +++ b/typedapi/esdsl/explorecontrols.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _exploreControls struct { + v *types.ExploreControls +} + +func NewExploreControls(usesignificance bool) *_exploreControls { + + tmp := &_exploreControls{v: types.NewExploreControls()} + + tmp.UseSignificance(usesignificance) + + return tmp + +} + +// To avoid the top-matching documents sample being dominated by a single source +// of results, it is sometimes necessary to request diversity in the sample. +// You can do this by selecting a single-value field and setting a maximum +// number of documents per value for that field. +func (s *_exploreControls) SampleDiversity(samplediversity types.SampleDiversityVariant) *_exploreControls { + + s.v.SampleDiversity = samplediversity.SampleDiversityCaster() + + return s +} + +// Each hop considers a sample of the best-matching documents on each shard. +// Using samples improves the speed of execution and keeps exploration focused +// on meaningfully-connected terms. +// Very small values (less than 50) might not provide sufficient +// weight-of-evidence to identify significant connections between terms. +// Very large sample sizes can dilute the quality of the results and increase +// execution times. +func (s *_exploreControls) SampleSize(samplesize int) *_exploreControls { + + s.v.SampleSize = &samplesize + + return s +} + +// The length of time in milliseconds after which exploration will be halted and +// the results gathered so far are returned. +// This timeout is honored on a best-effort basis. +// Execution might overrun this timeout if, for example, a long pause is +// encountered while FieldData is loaded for a field. +func (s *_exploreControls) Timeout(duration types.DurationVariant) *_exploreControls { + + s.v.Timeout = *duration.DurationCaster() + + return s +} + +// Filters associated terms so only those that are significantly associated with +// your query are included. +func (s *_exploreControls) UseSignificance(usesignificance bool) *_exploreControls { + + s.v.UseSignificance = usesignificance + + return s +} + +func (s *_exploreControls) ExploreControlsCaster() *types.ExploreControls { + return s.v +} diff --git a/typedapi/esdsl/extendedboundsdouble.go b/typedapi/esdsl/extendedboundsdouble.go new file mode 100644 index 0000000000..861b74bbab --- /dev/null +++ b/typedapi/esdsl/extendedboundsdouble.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _extendedBoundsdouble struct { + v *types.ExtendedBoundsdouble +} + +func NewExtendedBoundsdouble() *_extendedBoundsdouble { + + return &_extendedBoundsdouble{v: types.NewExtendedBoundsdouble()} + +} + +// Maximum value for the bound. +func (s *_extendedBoundsdouble) Max(max types.Float64) *_extendedBoundsdouble { + + s.v.Max = &max + + return s +} + +// Minimum value for the bound. +func (s *_extendedBoundsdouble) Min(min types.Float64) *_extendedBoundsdouble { + + s.v.Min = &min + + return s +} + +func (s *_extendedBoundsdouble) ExtendedBoundsdoubleCaster() *types.ExtendedBoundsdouble { + return s.v +} diff --git a/typedapi/esdsl/extendedboundsfielddatemath.go b/typedapi/esdsl/extendedboundsfielddatemath.go new file mode 100644 index 0000000000..28a89572ae --- /dev/null +++ b/typedapi/esdsl/extendedboundsfielddatemath.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _extendedBoundsFieldDateMath struct { + v *types.ExtendedBoundsFieldDateMath +} + +func NewExtendedBoundsFieldDateMath() *_extendedBoundsFieldDateMath { + + return &_extendedBoundsFieldDateMath{v: types.NewExtendedBoundsFieldDateMath()} + +} + +// Maximum value for the bound. +func (s *_extendedBoundsFieldDateMath) Max(fielddatemath types.FieldDateMathVariant) *_extendedBoundsFieldDateMath { + + s.v.Max = *fielddatemath.FieldDateMathCaster() + + return s +} + +// Minimum value for the bound. +func (s *_extendedBoundsFieldDateMath) Min(fielddatemath types.FieldDateMathVariant) *_extendedBoundsFieldDateMath { + + s.v.Min = *fielddatemath.FieldDateMathCaster() + + return s +} + +func (s *_extendedBoundsFieldDateMath) ExtendedBoundsFieldDateMathCaster() *types.ExtendedBoundsFieldDateMath { + return s.v +} diff --git a/typedapi/esdsl/extendedstatsaggregation.go b/typedapi/esdsl/extendedstatsaggregation.go new file mode 100644 index 0000000000..de3e1dea16 --- /dev/null +++ b/typedapi/esdsl/extendedstatsaggregation.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _extendedStatsAggregation struct { + v *types.ExtendedStatsAggregation +} + +// A multi-value metrics aggregation that computes stats over numeric values +// extracted from the aggregated documents. +func NewExtendedStatsAggregation() *_extendedStatsAggregation { + + return &_extendedStatsAggregation{v: types.NewExtendedStatsAggregation()} + +} + +// The field on which to run the aggregation. +func (s *_extendedStatsAggregation) Field(field string) *_extendedStatsAggregation { + + s.v.Field = &field + + return s +} + +func (s *_extendedStatsAggregation) Format(format string) *_extendedStatsAggregation { + + s.v.Format = &format + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_extendedStatsAggregation) Missing(missing types.MissingVariant) *_extendedStatsAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_extendedStatsAggregation) Script(script types.ScriptVariant) *_extendedStatsAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +// The number of standard deviations above/below the mean to display. +func (s *_extendedStatsAggregation) Sigma(sigma types.Float64) *_extendedStatsAggregation { + + s.v.Sigma = &sigma + + return s +} + +func (s *_extendedStatsAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.ExtendedStats = s.v + + return container +} + +func (s *_extendedStatsAggregation) ExtendedStatsAggregationCaster() *types.ExtendedStatsAggregation { + return s.v +} diff --git a/typedapi/esdsl/extendedstatsbucketaggregation.go b/typedapi/esdsl/extendedstatsbucketaggregation.go new file mode 100644 index 0000000000..30ee3511d9 --- /dev/null +++ b/typedapi/esdsl/extendedstatsbucketaggregation.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _extendedStatsBucketAggregation struct { + v *types.ExtendedStatsBucketAggregation +} + +// A sibling pipeline aggregation which calculates a variety of stats across all +// bucket of a specified metric in a sibling aggregation. +func NewExtendedStatsBucketAggregation() *_extendedStatsBucketAggregation { + + return &_extendedStatsBucketAggregation{v: types.NewExtendedStatsBucketAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_extendedStatsBucketAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_extendedStatsBucketAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_extendedStatsBucketAggregation) Format(format string) *_extendedStatsBucketAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_extendedStatsBucketAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_extendedStatsBucketAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +// The number of standard deviations above/below the mean to display. +func (s *_extendedStatsBucketAggregation) Sigma(sigma types.Float64) *_extendedStatsBucketAggregation { + + s.v.Sigma = &sigma + + return s +} + +func (s *_extendedStatsBucketAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.ExtendedStatsBucket = s.v + + return container +} + +func (s *_extendedStatsBucketAggregation) ExtendedStatsBucketAggregationCaster() *types.ExtendedStatsBucketAggregation { + return s.v +} diff --git a/typedapi/esdsl/failprocessor.go b/typedapi/esdsl/failprocessor.go new file mode 100644 index 0000000000..0aea86da1f --- /dev/null +++ b/typedapi/esdsl/failprocessor.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _failProcessor struct { + v *types.FailProcessor +} + +// Raises an exception. +// This is useful for when you expect a pipeline to fail and want to relay a +// specific message to the requester. +func NewFailProcessor(message string) *_failProcessor { + + tmp := &_failProcessor{v: types.NewFailProcessor()} + + tmp.Message(message) + + return tmp + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_failProcessor) Description(description string) *_failProcessor { + + s.v.Description = &description + + return s +} + +// Conditionally execute the processor. +func (s *_failProcessor) If(if_ types.ScriptVariant) *_failProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_failProcessor) IgnoreFailure(ignorefailure bool) *_failProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// The error message thrown by the processor. +// Supports template snippets. +func (s *_failProcessor) Message(message string) *_failProcessor { + + s.v.Message = message + + return s +} + +// Handle failures for the processor. +func (s *_failProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_failProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_failProcessor) Tag(tag string) *_failProcessor { + + s.v.Tag = &tag + + return s +} + +func (s *_failProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Fail = s.v + + return container +} + +func (s *_failProcessor) FailProcessorCaster() *types.FailProcessor { + return s.v +} diff --git a/typedapi/esdsl/featureenabled.go b/typedapi/esdsl/featureenabled.go new file mode 100644 index 0000000000..97494213b6 --- /dev/null +++ b/typedapi/esdsl/featureenabled.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _featureEnabled struct { + v *types.FeatureEnabled +} + +func NewFeatureEnabled(enabled bool) *_featureEnabled { + + tmp := &_featureEnabled{v: types.NewFeatureEnabled()} + + tmp.Enabled(enabled) + + return tmp + +} + +func (s *_featureEnabled) Enabled(enabled bool) *_featureEnabled { + + s.v.Enabled = enabled + + return s +} + +func (s *_featureEnabled) FeatureEnabledCaster() *types.FeatureEnabled { + return s.v +} diff --git a/typedapi/esdsl/fieldaliasproperty.go b/typedapi/esdsl/fieldaliasproperty.go new file mode 100644 index 0000000000..411616d806 --- /dev/null +++ b/typedapi/esdsl/fieldaliasproperty.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _fieldAliasProperty struct { + v *types.FieldAliasProperty +} + +func NewFieldAliasProperty() *_fieldAliasProperty { + + return &_fieldAliasProperty{v: types.NewFieldAliasProperty()} + +} + +func (s *_fieldAliasProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_fieldAliasProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_fieldAliasProperty) Fields(fields map[string]types.Property) *_fieldAliasProperty { + + s.v.Fields = fields + return s +} + +func (s *_fieldAliasProperty) AddField(key string, value types.PropertyVariant) *_fieldAliasProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_fieldAliasProperty) IgnoreAbove(ignoreabove int) *_fieldAliasProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +// Metadata about the field. +func (s *_fieldAliasProperty) Meta(meta map[string]string) *_fieldAliasProperty { + + s.v.Meta = meta + return s +} + +func (s *_fieldAliasProperty) AddMeta(key string, value string) *_fieldAliasProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_fieldAliasProperty) Path(field string) *_fieldAliasProperty { + + s.v.Path = &field + + return s +} + +func (s *_fieldAliasProperty) Properties(properties map[string]types.Property) *_fieldAliasProperty { + + s.v.Properties = properties + return s +} + +func (s *_fieldAliasProperty) AddProperty(key string, value types.PropertyVariant) *_fieldAliasProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_fieldAliasProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_fieldAliasProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_fieldAliasProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_fieldAliasProperty) FieldAliasPropertyCaster() *types.FieldAliasProperty { + return s.v +} diff --git a/typedapi/esdsl/fieldandformat.go b/typedapi/esdsl/fieldandformat.go new file mode 100644 index 0000000000..656848558d --- /dev/null +++ b/typedapi/esdsl/fieldandformat.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _fieldAndFormat struct { + v *types.FieldAndFormat +} + +func NewFieldAndFormat() *_fieldAndFormat { + + return &_fieldAndFormat{v: types.NewFieldAndFormat()} + +} + +// A wildcard pattern. The request returns values for field names matching this +// pattern. +func (s *_fieldAndFormat) Field(field string) *_fieldAndFormat { + + s.v.Field = field + + return s +} + +// The format in which the values are returned. +func (s *_fieldAndFormat) Format(format string) *_fieldAndFormat { + + s.v.Format = &format + + return s +} + +func (s *_fieldAndFormat) IncludeUnmapped(includeunmapped bool) *_fieldAndFormat { + + s.v.IncludeUnmapped = &includeunmapped + + return s +} + +func (s *_fieldAndFormat) FieldAndFormatCaster() *types.FieldAndFormat { + return s.v +} diff --git a/typedapi/esdsl/fieldcollapse.go b/typedapi/esdsl/fieldcollapse.go new file mode 100644 index 0000000000..1c91a48710 --- /dev/null +++ b/typedapi/esdsl/fieldcollapse.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _fieldCollapse struct { + v *types.FieldCollapse +} + +func NewFieldCollapse() *_fieldCollapse { + + return &_fieldCollapse{v: types.NewFieldCollapse()} + +} + +func (s *_fieldCollapse) Collapse(collapse types.FieldCollapseVariant) *_fieldCollapse { + + s.v.Collapse = collapse.FieldCollapseCaster() + + return s +} + +// The field to collapse the result set on +func (s *_fieldCollapse) Field(field string) *_fieldCollapse { + + s.v.Field = field + + return s +} + +// The number of inner hits and their sort order +func (s *_fieldCollapse) InnerHits(innerhits ...types.InnerHitsVariant) *_fieldCollapse { + + s.v.InnerHits = make([]types.InnerHits, len(innerhits)) + for i, v := range innerhits { + s.v.InnerHits[i] = *v.InnerHitsCaster() + } + + return s +} + +// The number of concurrent requests allowed to retrieve the inner_hits per +// group +func (s *_fieldCollapse) MaxConcurrentGroupSearches(maxconcurrentgroupsearches int) *_fieldCollapse { + + s.v.MaxConcurrentGroupSearches = &maxconcurrentgroupsearches + + return s +} + +func (s *_fieldCollapse) FieldCollapseCaster() *types.FieldCollapse { + return s.v +} diff --git a/typedapi/esdsl/fielddatafrequencyfilter.go b/typedapi/esdsl/fielddatafrequencyfilter.go new file mode 100644 index 0000000000..036cf570b4 --- /dev/null +++ b/typedapi/esdsl/fielddatafrequencyfilter.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _fielddataFrequencyFilter struct { + v *types.FielddataFrequencyFilter +} + +func NewFielddataFrequencyFilter(max types.Float64, min types.Float64, minsegmentsize int) *_fielddataFrequencyFilter { + + tmp := &_fielddataFrequencyFilter{v: types.NewFielddataFrequencyFilter()} + + tmp.Max(max) + + tmp.Min(min) + + tmp.MinSegmentSize(minsegmentsize) + + return tmp + +} + +func (s *_fielddataFrequencyFilter) Max(max types.Float64) *_fielddataFrequencyFilter { + + s.v.Max = max + + return s +} + +func (s *_fielddataFrequencyFilter) Min(min types.Float64) *_fielddataFrequencyFilter { + + s.v.Min = min + + return s +} + +func (s *_fielddataFrequencyFilter) MinSegmentSize(minsegmentsize int) *_fielddataFrequencyFilter { + + s.v.MinSegmentSize = minsegmentsize + + return s +} + +func (s *_fielddataFrequencyFilter) FielddataFrequencyFilterCaster() *types.FielddataFrequencyFilter { + return s.v +} diff --git a/typedapi/esdsl/fielddatemath.go b/typedapi/esdsl/fielddatemath.go new file mode 100644 index 0000000000..1f8755a8ea --- /dev/null +++ b/typedapi/esdsl/fielddatemath.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _fieldDateMath struct { + v types.FieldDateMath +} + +func NewFieldDateMath() *_fieldDateMath { + return &_fieldDateMath{v: nil} +} + +func (u *_fieldDateMath) DateMath(datemath string) *_fieldDateMath { + + u.v = &datemath + + return u +} + +func (u *_fieldDateMath) Float64(float64 types.Float64) *_fieldDateMath { + + u.v = &float64 + + return u +} + +func (u *_fieldDateMath) FieldDateMathCaster() *types.FieldDateMath { + return &u.v +} diff --git a/typedapi/esdsl/fieldlookup.go b/typedapi/esdsl/fieldlookup.go new file mode 100644 index 0000000000..5560b095f0 --- /dev/null +++ b/typedapi/esdsl/fieldlookup.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _fieldLookup struct { + v *types.FieldLookup +} + +func NewFieldLookup() *_fieldLookup { + + return &_fieldLookup{v: types.NewFieldLookup()} + +} + +// `id` of the document. +func (s *_fieldLookup) Id(id string) *_fieldLookup { + + s.v.Id = id + + return s +} + +// Index from which to retrieve the document. +func (s *_fieldLookup) Index(indexname string) *_fieldLookup { + + s.v.Index = &indexname + + return s +} + +// Name of the field. +func (s *_fieldLookup) Path(field string) *_fieldLookup { + + s.v.Path = &field + + return s +} + +// Custom routing value. +func (s *_fieldLookup) Routing(routing string) *_fieldLookup { + + s.v.Routing = &routing + + return s +} + +func (s *_fieldLookup) FieldLookupCaster() *types.FieldLookup { + return s.v +} diff --git a/typedapi/esdsl/fieldmetric.go b/typedapi/esdsl/fieldmetric.go new file mode 100644 index 0000000000..5f442fb5ff --- /dev/null +++ b/typedapi/esdsl/fieldmetric.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/metric" +) + +type _fieldMetric struct { + v *types.FieldMetric +} + +func NewFieldMetric() *_fieldMetric { + + return &_fieldMetric{v: types.NewFieldMetric()} + +} + +// The field to collect metrics for. This must be a numeric of some kind. +func (s *_fieldMetric) Field(field string) *_fieldMetric { + + s.v.Field = field + + return s +} + +// An array of metrics to collect for the field. At least one metric must be +// configured. +func (s *_fieldMetric) Metrics(metrics ...metric.Metric) *_fieldMetric { + + for _, v := range metrics { + + s.v.Metrics = append(s.v.Metrics, v) + + } + return s +} + +func (s *_fieldMetric) FieldMetricCaster() *types.FieldMetric { + return s.v +} diff --git a/typedapi/esdsl/fieldnamesfield.go b/typedapi/esdsl/fieldnamesfield.go new file mode 100644 index 0000000000..4e03f8366e --- /dev/null +++ b/typedapi/esdsl/fieldnamesfield.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _fieldNamesField struct { + v *types.FieldNamesField +} + +func NewFieldNamesField(enabled bool) *_fieldNamesField { + + tmp := &_fieldNamesField{v: types.NewFieldNamesField()} + + tmp.Enabled(enabled) + + return tmp + +} + +func (s *_fieldNamesField) Enabled(enabled bool) *_fieldNamesField { + + s.v.Enabled = enabled + + return s +} + +func (s *_fieldNamesField) FieldNamesFieldCaster() *types.FieldNamesField { + return s.v +} diff --git a/typedapi/esdsl/fields.go b/typedapi/esdsl/fields.go new file mode 100644 index 0000000000..053b9598c6 --- /dev/null +++ b/typedapi/esdsl/fields.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _fields struct { + v types.Fields +} + +func NewFields() *_fields { + return &_fields{v: []string{}} +} + +func (u *_fields) FieldsCaster() *types.Fields { + return &u.v +} diff --git a/typedapi/esdsl/fieldsecurity.go b/typedapi/esdsl/fieldsecurity.go new file mode 100644 index 0000000000..bd278d97e3 --- /dev/null +++ b/typedapi/esdsl/fieldsecurity.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _fieldSecurity struct { + v *types.FieldSecurity +} + +func NewFieldSecurity() *_fieldSecurity { + + return &_fieldSecurity{v: types.NewFieldSecurity()} + +} + +func (s *_fieldSecurity) Except(fields ...string) *_fieldSecurity { + + s.v.Except = fields + + return s +} + +func (s *_fieldSecurity) Grant(fields ...string) *_fieldSecurity { + + s.v.Grant = fields + + return s +} + +func (s *_fieldSecurity) FieldSecurityCaster() *types.FieldSecurity { + return s.v +} diff --git a/typedapi/esdsl/fieldsort.go b/typedapi/esdsl/fieldsort.go new file mode 100644 index 0000000000..a91a723f06 --- /dev/null +++ b/typedapi/esdsl/fieldsort.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldsortnumerictype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldtype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortmode" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" +) + +type _fieldSort struct { + v *types.FieldSort +} + +func NewFieldSort(order sortorder.SortOrder) *_fieldSort { + + tmp := &_fieldSort{v: types.NewFieldSort()} + + tmp.Order(order) + + return tmp + +} + +func (s *_fieldSort) Format(format string) *_fieldSort { + + s.v.Format = &format + + return s +} + +func (s *_fieldSort) Missing(missing types.MissingVariant) *_fieldSort { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_fieldSort) Mode(mode sortmode.SortMode) *_fieldSort { + + s.v.Mode = &mode + return s +} + +func (s *_fieldSort) Nested(nested types.NestedSortValueVariant) *_fieldSort { + + s.v.Nested = nested.NestedSortValueCaster() + + return s +} + +func (s *_fieldSort) NumericType(numerictype fieldsortnumerictype.FieldSortNumericType) *_fieldSort { + + s.v.NumericType = &numerictype + return s +} + +func (s *_fieldSort) Order(order sortorder.SortOrder) *_fieldSort { + + s.v.Order = &order + return s +} + +func (s *_fieldSort) UnmappedType(unmappedtype fieldtype.FieldType) *_fieldSort { + + s.v.UnmappedType = &unmappedtype + return s +} + +func (s *_fieldSort) FieldSortCaster() *types.FieldSort { + return s.v +} diff --git a/typedapi/esdsl/fieldsuggester.go b/typedapi/esdsl/fieldsuggester.go new file mode 100644 index 0000000000..b83993ca6c --- /dev/null +++ b/typedapi/esdsl/fieldsuggester.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _fieldSuggester struct { + v *types.FieldSuggester +} + +func NewFieldSuggester() *_fieldSuggester { + return &_fieldSuggester{v: types.NewFieldSuggester()} +} + +// AdditionalFieldSuggesterProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_fieldSuggester) AdditionalFieldSuggesterProperty(key string, value json.RawMessage) *_fieldSuggester { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalFieldSuggesterProperty = tmp + return s +} + +// Provides auto-complete/search-as-you-type functionality. +func (s *_fieldSuggester) Completion(completion types.CompletionSuggesterVariant) *_fieldSuggester { + + s.v.Completion = completion.CompletionSuggesterCaster() + + return s +} + +// Provides access to word alternatives on a per token basis within a certain +// string distance. +func (s *_fieldSuggester) Phrase(phrase types.PhraseSuggesterVariant) *_fieldSuggester { + + s.v.Phrase = phrase.PhraseSuggesterCaster() + + return s +} + +// Prefix used to search for suggestions. +func (s *_fieldSuggester) Prefix(prefix string) *_fieldSuggester { + + s.v.Prefix = &prefix + + return s +} + +// A prefix expressed as a regular expression. +func (s *_fieldSuggester) Regex(regex string) *_fieldSuggester { + + s.v.Regex = ®ex + + return s +} + +// Suggests terms based on edit distance. +func (s *_fieldSuggester) Term(term types.TermSuggesterVariant) *_fieldSuggester { + + s.v.Term = term.TermSuggesterCaster() + + return s +} + +// The text to use as input for the suggester. +// Needs to be set globally or per suggestion. +func (s *_fieldSuggester) Text(text string) *_fieldSuggester { + + s.v.Text = &text + + return s +} + +func (s *_fieldSuggester) FieldSuggesterCaster() *types.FieldSuggester { + return s.v +} diff --git a/typedapi/esdsl/fieldvalue.go b/typedapi/esdsl/fieldvalue.go new file mode 100644 index 0000000000..d73043b601 --- /dev/null +++ b/typedapi/esdsl/fieldvalue.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _fieldValue struct { + v types.FieldValue +} + +func NewFieldValue() *_fieldValue { + return &_fieldValue{v: nil} +} + +func (u *_fieldValue) Int64(int64 int64) *_fieldValue { + + u.v = &int64 + + return u +} + +func (u *_fieldValue) Float64(float64 types.Float64) *_fieldValue { + + u.v = &float64 + + return u +} + +func (u *_fieldValue) String(string string) *_fieldValue { + + u.v = &string + + return u +} + +func (u *_fieldValue) Bool(bool bool) *_fieldValue { + + u.v = &bool + + return u +} + +func (u *_fieldValue) Nil() *_fieldValue { + u.v = types.NullValue{} + return u +} + +func (u *_fieldValue) FieldValueCaster() *types.FieldValue { + return &u.v +} diff --git a/typedapi/esdsl/fieldvaluefactorscorefunction.go b/typedapi/esdsl/fieldvaluefactorscorefunction.go new file mode 100644 index 0000000000..5341a7499a --- /dev/null +++ b/typedapi/esdsl/fieldvaluefactorscorefunction.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldvaluefactormodifier" +) + +type _fieldValueFactorScoreFunction struct { + v *types.FieldValueFactorScoreFunction +} + +// Function allows you to use a field from a document to influence the score. +// It’s similar to using the script_score function, however, it avoids the +// overhead of scripting. +func NewFieldValueFactorScoreFunction() *_fieldValueFactorScoreFunction { + + return &_fieldValueFactorScoreFunction{v: types.NewFieldValueFactorScoreFunction()} + +} + +// Optional factor to multiply the field value with. +func (s *_fieldValueFactorScoreFunction) Factor(factor types.Float64) *_fieldValueFactorScoreFunction { + + s.v.Factor = &factor + + return s +} + +// Field to be extracted from the document. +func (s *_fieldValueFactorScoreFunction) Field(field string) *_fieldValueFactorScoreFunction { + + s.v.Field = field + + return s +} + +// Value used if the document doesn’t have that field. +// The modifier and factor are still applied to it as though it were read from +// the document. +func (s *_fieldValueFactorScoreFunction) Missing(missing types.Float64) *_fieldValueFactorScoreFunction { + + s.v.Missing = &missing + + return s +} + +// Modifier to apply to the field value. +func (s *_fieldValueFactorScoreFunction) Modifier(modifier fieldvaluefactormodifier.FieldValueFactorModifier) *_fieldValueFactorScoreFunction { + + s.v.Modifier = &modifier + return s +} + +func (s *_fieldValueFactorScoreFunction) FunctionScoreCaster() *types.FunctionScore { + container := types.NewFunctionScore() + + container.FieldValueFactor = s.v + + return container +} + +func (s *_fieldValueFactorScoreFunction) FieldValueFactorScoreFunctionCaster() *types.FieldValueFactorScoreFunction { + return s.v +} diff --git a/typedapi/esdsl/fillmaskinferenceoptions.go b/typedapi/esdsl/fillmaskinferenceoptions.go new file mode 100644 index 0000000000..f31bba9a02 --- /dev/null +++ b/typedapi/esdsl/fillmaskinferenceoptions.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _fillMaskInferenceOptions struct { + v *types.FillMaskInferenceOptions +} + +// Fill mask configuration for inference. +func NewFillMaskInferenceOptions(vocabulary types.VocabularyVariant) *_fillMaskInferenceOptions { + + tmp := &_fillMaskInferenceOptions{v: types.NewFillMaskInferenceOptions()} + + tmp.Vocabulary(vocabulary) + + return tmp + +} + +// The string/token which will be removed from incoming documents and replaced +// with the inference prediction(s). +// In a response, this field contains the mask token for the specified +// model/tokenizer. Each model and tokenizer +// has a predefined mask token which cannot be changed. Thus, it is recommended +// not to set this value in requests. +// However, if this field is present in a request, its value must match the +// predefined value for that model/tokenizer, +// otherwise the request will fail. +func (s *_fillMaskInferenceOptions) MaskToken(masktoken string) *_fillMaskInferenceOptions { + + s.v.MaskToken = &masktoken + + return s +} + +// Specifies the number of top class predictions to return. Defaults to 0. +func (s *_fillMaskInferenceOptions) NumTopClasses(numtopclasses int) *_fillMaskInferenceOptions { + + s.v.NumTopClasses = &numtopclasses + + return s +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_fillMaskInferenceOptions) ResultsField(resultsfield string) *_fillMaskInferenceOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +// The tokenization options to update when inferring +func (s *_fillMaskInferenceOptions) Tokenization(tokenization types.TokenizationConfigContainerVariant) *_fillMaskInferenceOptions { + + s.v.Tokenization = tokenization.TokenizationConfigContainerCaster() + + return s +} + +func (s *_fillMaskInferenceOptions) Vocabulary(vocabulary types.VocabularyVariant) *_fillMaskInferenceOptions { + + s.v.Vocabulary = *vocabulary.VocabularyCaster() + + return s +} + +func (s *_fillMaskInferenceOptions) InferenceConfigCreateContainerCaster() *types.InferenceConfigCreateContainer { + container := types.NewInferenceConfigCreateContainer() + + container.FillMask = s.v + + return container +} + +func (s *_fillMaskInferenceOptions) FillMaskInferenceOptionsCaster() *types.FillMaskInferenceOptions { + return s.v +} diff --git a/typedapi/esdsl/fillmaskinferenceupdateoptions.go b/typedapi/esdsl/fillmaskinferenceupdateoptions.go new file mode 100644 index 0000000000..283810d3c2 --- /dev/null +++ b/typedapi/esdsl/fillmaskinferenceupdateoptions.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _fillMaskInferenceUpdateOptions struct { + v *types.FillMaskInferenceUpdateOptions +} + +// Fill mask configuration for inference. +func NewFillMaskInferenceUpdateOptions() *_fillMaskInferenceUpdateOptions { + + return &_fillMaskInferenceUpdateOptions{v: types.NewFillMaskInferenceUpdateOptions()} + +} + +// Specifies the number of top class predictions to return. Defaults to 0. +func (s *_fillMaskInferenceUpdateOptions) NumTopClasses(numtopclasses int) *_fillMaskInferenceUpdateOptions { + + s.v.NumTopClasses = &numtopclasses + + return s +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_fillMaskInferenceUpdateOptions) ResultsField(resultsfield string) *_fillMaskInferenceUpdateOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +// The tokenization options to update when inferring +func (s *_fillMaskInferenceUpdateOptions) Tokenization(tokenization types.NlpTokenizationUpdateOptionsVariant) *_fillMaskInferenceUpdateOptions { + + s.v.Tokenization = tokenization.NlpTokenizationUpdateOptionsCaster() + + return s +} + +func (s *_fillMaskInferenceUpdateOptions) InferenceConfigUpdateContainerCaster() *types.InferenceConfigUpdateContainer { + container := types.NewInferenceConfigUpdateContainer() + + container.FillMask = s.v + + return container +} + +func (s *_fillMaskInferenceUpdateOptions) FillMaskInferenceUpdateOptionsCaster() *types.FillMaskInferenceUpdateOptions { + return s.v +} diff --git a/typedapi/esdsl/filteringadvancedsnippet.go b/typedapi/esdsl/filteringadvancedsnippet.go new file mode 100644 index 0000000000..d4a9c2f2dd --- /dev/null +++ b/typedapi/esdsl/filteringadvancedsnippet.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _filteringAdvancedSnippet struct { + v *types.FilteringAdvancedSnippet +} + +func NewFilteringAdvancedSnippet(value json.RawMessage) *_filteringAdvancedSnippet { + + tmp := &_filteringAdvancedSnippet{v: types.NewFilteringAdvancedSnippet()} + + tmp.Value(value) + + return tmp + +} + +func (s *_filteringAdvancedSnippet) CreatedAt(datetime types.DateTimeVariant) *_filteringAdvancedSnippet { + + s.v.CreatedAt = *datetime.DateTimeCaster() + + return s +} + +func (s *_filteringAdvancedSnippet) UpdatedAt(datetime types.DateTimeVariant) *_filteringAdvancedSnippet { + + s.v.UpdatedAt = *datetime.DateTimeCaster() + + return s +} + +func (s *_filteringAdvancedSnippet) Value(value json.RawMessage) *_filteringAdvancedSnippet { + + s.v.Value = value + + return s +} + +func (s *_filteringAdvancedSnippet) FilteringAdvancedSnippetCaster() *types.FilteringAdvancedSnippet { + return s.v +} diff --git a/typedapi/esdsl/filteringconfig.go b/typedapi/esdsl/filteringconfig.go new file mode 100644 index 0000000000..200843a1b2 --- /dev/null +++ b/typedapi/esdsl/filteringconfig.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _filteringConfig struct { + v *types.FilteringConfig +} + +func NewFilteringConfig(active types.FilteringRulesVariant, draft types.FilteringRulesVariant) *_filteringConfig { + + tmp := &_filteringConfig{v: types.NewFilteringConfig()} + + tmp.Active(active) + + tmp.Draft(draft) + + return tmp + +} + +func (s *_filteringConfig) Active(active types.FilteringRulesVariant) *_filteringConfig { + + s.v.Active = *active.FilteringRulesCaster() + + return s +} + +func (s *_filteringConfig) Domain(domain string) *_filteringConfig { + + s.v.Domain = &domain + + return s +} + +func (s *_filteringConfig) Draft(draft types.FilteringRulesVariant) *_filteringConfig { + + s.v.Draft = *draft.FilteringRulesCaster() + + return s +} + +func (s *_filteringConfig) FilteringConfigCaster() *types.FilteringConfig { + return s.v +} diff --git a/typedapi/esdsl/filteringrule.go b/typedapi/esdsl/filteringrule.go new file mode 100644 index 0000000000..f329b40db7 --- /dev/null +++ b/typedapi/esdsl/filteringrule.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/filteringpolicy" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/filteringrulerule" +) + +type _filteringRule struct { + v *types.FilteringRule +} + +func NewFilteringRule(order int, policy filteringpolicy.FilteringPolicy, rule filteringrulerule.FilteringRuleRule, value string) *_filteringRule { + + tmp := &_filteringRule{v: types.NewFilteringRule()} + + tmp.Order(order) + + tmp.Policy(policy) + + tmp.Rule(rule) + + tmp.Value(value) + + return tmp + +} + +func (s *_filteringRule) CreatedAt(datetime types.DateTimeVariant) *_filteringRule { + + s.v.CreatedAt = *datetime.DateTimeCaster() + + return s +} + +func (s *_filteringRule) Field(field string) *_filteringRule { + + s.v.Field = field + + return s +} + +func (s *_filteringRule) Id(id string) *_filteringRule { + + s.v.Id = id + + return s +} + +func (s *_filteringRule) Order(order int) *_filteringRule { + + s.v.Order = order + + return s +} + +func (s *_filteringRule) Policy(policy filteringpolicy.FilteringPolicy) *_filteringRule { + + s.v.Policy = policy + return s +} + +func (s *_filteringRule) Rule(rule filteringrulerule.FilteringRuleRule) *_filteringRule { + + s.v.Rule = rule + return s +} + +func (s *_filteringRule) UpdatedAt(datetime types.DateTimeVariant) *_filteringRule { + + s.v.UpdatedAt = *datetime.DateTimeCaster() + + return s +} + +func (s *_filteringRule) Value(value string) *_filteringRule { + + s.v.Value = value + + return s +} + +func (s *_filteringRule) FilteringRuleCaster() *types.FilteringRule { + return s.v +} diff --git a/typedapi/esdsl/filteringrules.go b/typedapi/esdsl/filteringrules.go new file mode 100644 index 0000000000..4afe999bdd --- /dev/null +++ b/typedapi/esdsl/filteringrules.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _filteringRules struct { + v *types.FilteringRules +} + +func NewFilteringRules(advancedsnippet types.FilteringAdvancedSnippetVariant, validation types.FilteringRulesValidationVariant) *_filteringRules { + + tmp := &_filteringRules{v: types.NewFilteringRules()} + + tmp.AdvancedSnippet(advancedsnippet) + + tmp.Validation(validation) + + return tmp + +} + +func (s *_filteringRules) AdvancedSnippet(advancedsnippet types.FilteringAdvancedSnippetVariant) *_filteringRules { + + s.v.AdvancedSnippet = *advancedsnippet.FilteringAdvancedSnippetCaster() + + return s +} + +func (s *_filteringRules) Rules(rules ...types.FilteringRuleVariant) *_filteringRules { + + for _, v := range rules { + + s.v.Rules = append(s.v.Rules, *v.FilteringRuleCaster()) + + } + return s +} + +func (s *_filteringRules) Validation(validation types.FilteringRulesValidationVariant) *_filteringRules { + + s.v.Validation = *validation.FilteringRulesValidationCaster() + + return s +} + +func (s *_filteringRules) FilteringRulesCaster() *types.FilteringRules { + return s.v +} diff --git a/typedapi/esdsl/filteringrulesvalidation.go b/typedapi/esdsl/filteringrulesvalidation.go new file mode 100644 index 0000000000..a5e131b694 --- /dev/null +++ b/typedapi/esdsl/filteringrulesvalidation.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/filteringvalidationstate" +) + +type _filteringRulesValidation struct { + v *types.FilteringRulesValidation +} + +func NewFilteringRulesValidation(state filteringvalidationstate.FilteringValidationState) *_filteringRulesValidation { + + tmp := &_filteringRulesValidation{v: types.NewFilteringRulesValidation()} + + tmp.State(state) + + return tmp + +} + +func (s *_filteringRulesValidation) Errors(errors ...types.FilteringValidationVariant) *_filteringRulesValidation { + + for _, v := range errors { + + s.v.Errors = append(s.v.Errors, *v.FilteringValidationCaster()) + + } + return s +} + +func (s *_filteringRulesValidation) State(state filteringvalidationstate.FilteringValidationState) *_filteringRulesValidation { + + s.v.State = state + return s +} + +func (s *_filteringRulesValidation) FilteringRulesValidationCaster() *types.FilteringRulesValidation { + return s.v +} diff --git a/typedapi/esdsl/filteringvalidation.go b/typedapi/esdsl/filteringvalidation.go new file mode 100644 index 0000000000..fbc88656b2 --- /dev/null +++ b/typedapi/esdsl/filteringvalidation.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _filteringValidation struct { + v *types.FilteringValidation +} + +func NewFilteringValidation() *_filteringValidation { + + return &_filteringValidation{v: types.NewFilteringValidation()} + +} + +func (s *_filteringValidation) Ids(ids ...string) *_filteringValidation { + + for _, v := range ids { + + s.v.Ids = append(s.v.Ids, v) + + } + return s +} + +func (s *_filteringValidation) Messages(messages ...string) *_filteringValidation { + + for _, v := range messages { + + s.v.Messages = append(s.v.Messages, v) + + } + return s +} + +func (s *_filteringValidation) FilteringValidationCaster() *types.FilteringValidation { + return s.v +} diff --git a/typedapi/esdsl/filterref.go b/typedapi/esdsl/filterref.go new file mode 100644 index 0000000000..fd17eb2a5d --- /dev/null +++ b/typedapi/esdsl/filterref.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/filtertype" +) + +type _filterRef struct { + v *types.FilterRef +} + +func NewFilterRef() *_filterRef { + + return &_filterRef{v: types.NewFilterRef()} + +} + +// The identifier for the filter. +func (s *_filterRef) FilterId(id string) *_filterRef { + + s.v.FilterId = id + + return s +} + +// If set to `include`, the rule applies for values in the filter. If set to +// `exclude`, the rule applies for values not in the filter. +func (s *_filterRef) FilterType(filtertype filtertype.FilterType) *_filterRef { + + s.v.FilterType = &filtertype + return s +} + +func (s *_filterRef) FilterRefCaster() *types.FilterRef { + return s.v +} diff --git a/typedapi/esdsl/filtersaggregation.go b/typedapi/esdsl/filtersaggregation.go new file mode 100644 index 0000000000..c784beb664 --- /dev/null +++ b/typedapi/esdsl/filtersaggregation.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _filtersAggregation struct { + v *types.FiltersAggregation +} + +// A multi-bucket aggregation where each bucket contains the documents that +// match a query. +func NewFiltersAggregation() *_filtersAggregation { + + return &_filtersAggregation{v: types.NewFiltersAggregation()} + +} + +// Collection of queries from which to build buckets. +func (s *_filtersAggregation) Filters(bucketsquery types.BucketsQueryVariant) *_filtersAggregation { + + s.v.Filters = *bucketsquery.BucketsQueryCaster() + + return s +} + +// By default, the named filters aggregation returns the buckets as an object. +// Set to `false` to return the buckets as an array of objects. +func (s *_filtersAggregation) Keyed(keyed bool) *_filtersAggregation { + + s.v.Keyed = &keyed + + return s +} + +// Set to `true` to add a bucket to the response which will contain all +// documents that do not match any of the given filters. +func (s *_filtersAggregation) OtherBucket(otherbucket bool) *_filtersAggregation { + + s.v.OtherBucket = &otherbucket + + return s +} + +// The key with which the other bucket is returned. +func (s *_filtersAggregation) OtherBucketKey(otherbucketkey string) *_filtersAggregation { + + s.v.OtherBucketKey = &otherbucketkey + + return s +} + +func (s *_filtersAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Filters = s.v + + return container +} + +func (s *_filtersAggregation) FiltersAggregationCaster() *types.FiltersAggregation { + return s.v +} diff --git a/typedapi/esdsl/fingerprintanalyzer.go b/typedapi/esdsl/fingerprintanalyzer.go new file mode 100644 index 0000000000..ba91f10ac2 --- /dev/null +++ b/typedapi/esdsl/fingerprintanalyzer.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _fingerprintAnalyzer struct { + v *types.FingerprintAnalyzer +} + +func NewFingerprintAnalyzer() *_fingerprintAnalyzer { + + return &_fingerprintAnalyzer{v: types.NewFingerprintAnalyzer()} + +} + +// The maximum token size to emit. Tokens larger than this size will be +// discarded. +// Defaults to `255` +func (s *_fingerprintAnalyzer) MaxOutputSize(maxoutputsize int) *_fingerprintAnalyzer { + + s.v.MaxOutputSize = &maxoutputsize + + return s +} + +// The character to use to concatenate the terms. +// Defaults to a space. +func (s *_fingerprintAnalyzer) Separator(separator string) *_fingerprintAnalyzer { + + s.v.Separator = &separator + + return s +} + +// A pre-defined stop words list like `_english_` or an array containing a list +// of stop words. +// Defaults to `_none_`. +func (s *_fingerprintAnalyzer) Stopwords(stopwords ...string) *_fingerprintAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +// The path to a file containing stop words. +func (s *_fingerprintAnalyzer) StopwordsPath(stopwordspath string) *_fingerprintAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_fingerprintAnalyzer) Version(versionstring string) *_fingerprintAnalyzer { + + s.v.Version = &versionstring + + return s +} + +func (s *_fingerprintAnalyzer) FingerprintAnalyzerCaster() *types.FingerprintAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/fingerprintprocessor.go b/typedapi/esdsl/fingerprintprocessor.go new file mode 100644 index 0000000000..5be5ac6474 --- /dev/null +++ b/typedapi/esdsl/fingerprintprocessor.go @@ -0,0 +1,138 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fingerprintdigest" +) + +type _fingerprintProcessor struct { + v *types.FingerprintProcessor +} + +// Computes a hash of the document’s content. You can use this hash for +// content fingerprinting. +func NewFingerprintProcessor() *_fingerprintProcessor { + + return &_fingerprintProcessor{v: types.NewFingerprintProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_fingerprintProcessor) Description(description string) *_fingerprintProcessor { + + s.v.Description = &description + + return s +} + +// Array of fields to include in the fingerprint. For objects, the processor +// hashes both the field key and value. For other fields, the processor hashes +// only the field value. +func (s *_fingerprintProcessor) Fields(fields ...string) *_fingerprintProcessor { + + s.v.Fields = fields + + return s +} + +// Conditionally execute the processor. +func (s *_fingerprintProcessor) If(if_ types.ScriptVariant) *_fingerprintProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_fingerprintProcessor) IgnoreFailure(ignorefailure bool) *_fingerprintProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If true, the processor ignores any missing fields. If all fields are +// missing, the processor silently exits without modifying the document. +func (s *_fingerprintProcessor) IgnoreMissing(ignoremissing bool) *_fingerprintProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// The hash method used to compute the fingerprint. Must be one of MD5, SHA-1, +// SHA-256, SHA-512, or MurmurHash3. +func (s *_fingerprintProcessor) Method(method fingerprintdigest.FingerprintDigest) *_fingerprintProcessor { + + s.v.Method = &method + return s +} + +// Handle failures for the processor. +func (s *_fingerprintProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_fingerprintProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Salt value for the hash function. +func (s *_fingerprintProcessor) Salt(salt string) *_fingerprintProcessor { + + s.v.Salt = &salt + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_fingerprintProcessor) Tag(tag string) *_fingerprintProcessor { + + s.v.Tag = &tag + + return s +} + +// Output field for the fingerprint. +func (s *_fingerprintProcessor) TargetField(field string) *_fingerprintProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_fingerprintProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Fingerprint = s.v + + return container +} + +func (s *_fingerprintProcessor) FingerprintProcessorCaster() *types.FingerprintProcessor { + return s.v +} diff --git a/typedapi/esdsl/fingerprinttokenfilter.go b/typedapi/esdsl/fingerprinttokenfilter.go new file mode 100644 index 0000000000..7eb45d4f1d --- /dev/null +++ b/typedapi/esdsl/fingerprinttokenfilter.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _fingerprintTokenFilter struct { + v *types.FingerprintTokenFilter +} + +func NewFingerprintTokenFilter() *_fingerprintTokenFilter { + + return &_fingerprintTokenFilter{v: types.NewFingerprintTokenFilter()} + +} + +func (s *_fingerprintTokenFilter) MaxOutputSize(maxoutputsize int) *_fingerprintTokenFilter { + + s.v.MaxOutputSize = &maxoutputsize + + return s +} + +func (s *_fingerprintTokenFilter) Separator(separator string) *_fingerprintTokenFilter { + + s.v.Separator = &separator + + return s +} + +func (s *_fingerprintTokenFilter) Version(versionstring string) *_fingerprintTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_fingerprintTokenFilter) FingerprintTokenFilterCaster() *types.FingerprintTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/finnishanalyzer.go b/typedapi/esdsl/finnishanalyzer.go new file mode 100644 index 0000000000..753ebb4d03 --- /dev/null +++ b/typedapi/esdsl/finnishanalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _finnishAnalyzer struct { + v *types.FinnishAnalyzer +} + +func NewFinnishAnalyzer() *_finnishAnalyzer { + + return &_finnishAnalyzer{v: types.NewFinnishAnalyzer()} + +} + +func (s *_finnishAnalyzer) StemExclusion(stemexclusions ...string) *_finnishAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_finnishAnalyzer) Stopwords(stopwords ...string) *_finnishAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_finnishAnalyzer) StopwordsPath(stopwordspath string) *_finnishAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_finnishAnalyzer) FinnishAnalyzerCaster() *types.FinnishAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/flattenedproperty.go b/typedapi/esdsl/flattenedproperty.go new file mode 100644 index 0000000000..0558030894 --- /dev/null +++ b/typedapi/esdsl/flattenedproperty.go @@ -0,0 +1,195 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _flattenedProperty struct { + v *types.FlattenedProperty +} + +func NewFlattenedProperty() *_flattenedProperty { + + return &_flattenedProperty{v: types.NewFlattenedProperty()} + +} + +func (s *_flattenedProperty) Boost(boost types.Float64) *_flattenedProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_flattenedProperty) DepthLimit(depthlimit int) *_flattenedProperty { + + s.v.DepthLimit = &depthlimit + + return s +} + +func (s *_flattenedProperty) DocValues(docvalues bool) *_flattenedProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_flattenedProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_flattenedProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_flattenedProperty) EagerGlobalOrdinals(eagerglobalordinals bool) *_flattenedProperty { + + s.v.EagerGlobalOrdinals = &eagerglobalordinals + + return s +} + +func (s *_flattenedProperty) Fields(fields map[string]types.Property) *_flattenedProperty { + + s.v.Fields = fields + return s +} + +func (s *_flattenedProperty) AddField(key string, value types.PropertyVariant) *_flattenedProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_flattenedProperty) IgnoreAbove(ignoreabove int) *_flattenedProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_flattenedProperty) Index(index bool) *_flattenedProperty { + + s.v.Index = &index + + return s +} + +func (s *_flattenedProperty) IndexOptions(indexoptions indexoptions.IndexOptions) *_flattenedProperty { + + s.v.IndexOptions = &indexoptions + return s +} + +// Metadata about the field. +func (s *_flattenedProperty) Meta(meta map[string]string) *_flattenedProperty { + + s.v.Meta = meta + return s +} + +func (s *_flattenedProperty) AddMeta(key string, value string) *_flattenedProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_flattenedProperty) NullValue(nullvalue string) *_flattenedProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_flattenedProperty) Properties(properties map[string]types.Property) *_flattenedProperty { + + s.v.Properties = properties + return s +} + +func (s *_flattenedProperty) AddProperty(key string, value types.PropertyVariant) *_flattenedProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_flattenedProperty) Similarity(similarity string) *_flattenedProperty { + + s.v.Similarity = &similarity + + return s +} + +func (s *_flattenedProperty) SplitQueriesOnWhitespace(splitqueriesonwhitespace bool) *_flattenedProperty { + + s.v.SplitQueriesOnWhitespace = &splitqueriesonwhitespace + + return s +} + +func (s *_flattenedProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_flattenedProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_flattenedProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_flattenedProperty) FlattenedPropertyCaster() *types.FlattenedProperty { + return s.v +} diff --git a/typedapi/esdsl/floatnumberproperty.go b/typedapi/esdsl/floatnumberproperty.go new file mode 100644 index 0000000000..1169468513 --- /dev/null +++ b/typedapi/esdsl/floatnumberproperty.go @@ -0,0 +1,220 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" +) + +type _floatNumberProperty struct { + v *types.FloatNumberProperty +} + +func NewFloatNumberProperty() *_floatNumberProperty { + + return &_floatNumberProperty{v: types.NewFloatNumberProperty()} + +} + +func (s *_floatNumberProperty) Boost(boost types.Float64) *_floatNumberProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_floatNumberProperty) Coerce(coerce bool) *_floatNumberProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_floatNumberProperty) CopyTo(fields ...string) *_floatNumberProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_floatNumberProperty) DocValues(docvalues bool) *_floatNumberProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_floatNumberProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_floatNumberProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_floatNumberProperty) Fields(fields map[string]types.Property) *_floatNumberProperty { + + s.v.Fields = fields + return s +} + +func (s *_floatNumberProperty) AddField(key string, value types.PropertyVariant) *_floatNumberProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_floatNumberProperty) IgnoreAbove(ignoreabove int) *_floatNumberProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_floatNumberProperty) IgnoreMalformed(ignoremalformed bool) *_floatNumberProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_floatNumberProperty) Index(index bool) *_floatNumberProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_floatNumberProperty) Meta(meta map[string]string) *_floatNumberProperty { + + s.v.Meta = meta + return s +} + +func (s *_floatNumberProperty) AddMeta(key string, value string) *_floatNumberProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_floatNumberProperty) NullValue(nullvalue float32) *_floatNumberProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_floatNumberProperty) OnScriptError(onscripterror onscripterror.OnScriptError) *_floatNumberProperty { + + s.v.OnScriptError = &onscripterror + return s +} + +func (s *_floatNumberProperty) Properties(properties map[string]types.Property) *_floatNumberProperty { + + s.v.Properties = properties + return s +} + +func (s *_floatNumberProperty) AddProperty(key string, value types.PropertyVariant) *_floatNumberProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_floatNumberProperty) Script(script types.ScriptVariant) *_floatNumberProperty { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_floatNumberProperty) Store(store bool) *_floatNumberProperty { + + s.v.Store = &store + + return s +} + +func (s *_floatNumberProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_floatNumberProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_floatNumberProperty) TimeSeriesDimension(timeseriesdimension bool) *_floatNumberProperty { + + s.v.TimeSeriesDimension = ×eriesdimension + + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_floatNumberProperty) TimeSeriesMetric(timeseriesmetric timeseriesmetrictype.TimeSeriesMetricType) *_floatNumberProperty { + + s.v.TimeSeriesMetric = ×eriesmetric + return s +} + +func (s *_floatNumberProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_floatNumberProperty) FloatNumberPropertyCaster() *types.FloatNumberProperty { + return s.v +} diff --git a/typedapi/esdsl/floatrangeproperty.go b/typedapi/esdsl/floatrangeproperty.go new file mode 100644 index 0000000000..b52cc2fac8 --- /dev/null +++ b/typedapi/esdsl/floatrangeproperty.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _floatRangeProperty struct { + v *types.FloatRangeProperty +} + +func NewFloatRangeProperty() *_floatRangeProperty { + + return &_floatRangeProperty{v: types.NewFloatRangeProperty()} + +} + +func (s *_floatRangeProperty) Boost(boost types.Float64) *_floatRangeProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_floatRangeProperty) Coerce(coerce bool) *_floatRangeProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_floatRangeProperty) CopyTo(fields ...string) *_floatRangeProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_floatRangeProperty) DocValues(docvalues bool) *_floatRangeProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_floatRangeProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_floatRangeProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_floatRangeProperty) Fields(fields map[string]types.Property) *_floatRangeProperty { + + s.v.Fields = fields + return s +} + +func (s *_floatRangeProperty) AddField(key string, value types.PropertyVariant) *_floatRangeProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_floatRangeProperty) IgnoreAbove(ignoreabove int) *_floatRangeProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_floatRangeProperty) Index(index bool) *_floatRangeProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_floatRangeProperty) Meta(meta map[string]string) *_floatRangeProperty { + + s.v.Meta = meta + return s +} + +func (s *_floatRangeProperty) AddMeta(key string, value string) *_floatRangeProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_floatRangeProperty) Properties(properties map[string]types.Property) *_floatRangeProperty { + + s.v.Properties = properties + return s +} + +func (s *_floatRangeProperty) AddProperty(key string, value types.PropertyVariant) *_floatRangeProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_floatRangeProperty) Store(store bool) *_floatRangeProperty { + + s.v.Store = &store + + return s +} + +func (s *_floatRangeProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_floatRangeProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_floatRangeProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_floatRangeProperty) FloatRangePropertyCaster() *types.FloatRangeProperty { + return s.v +} diff --git a/typedapi/esdsl/forcemergeaction.go b/typedapi/esdsl/forcemergeaction.go new file mode 100644 index 0000000000..cadc3e803f --- /dev/null +++ b/typedapi/esdsl/forcemergeaction.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _forceMergeAction struct { + v *types.ForceMergeAction +} + +func NewForceMergeAction(maxnumsegments int) *_forceMergeAction { + + tmp := &_forceMergeAction{v: types.NewForceMergeAction()} + + tmp.MaxNumSegments(maxnumsegments) + + return tmp + +} + +func (s *_forceMergeAction) IndexCodec(indexcodec string) *_forceMergeAction { + + s.v.IndexCodec = &indexcodec + + return s +} + +func (s *_forceMergeAction) MaxNumSegments(maxnumsegments int) *_forceMergeAction { + + s.v.MaxNumSegments = maxnumsegments + + return s +} + +func (s *_forceMergeAction) ForceMergeActionCaster() *types.ForceMergeAction { + return s.v +} diff --git a/typedapi/esdsl/foreachprocessor.go b/typedapi/esdsl/foreachprocessor.go new file mode 100644 index 0000000000..cafea8b704 --- /dev/null +++ b/typedapi/esdsl/foreachprocessor.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _foreachProcessor struct { + v *types.ForeachProcessor +} + +// Runs an ingest processor on each element of an array or object. +func NewForeachProcessor(processor types.ProcessorContainerVariant) *_foreachProcessor { + + tmp := &_foreachProcessor{v: types.NewForeachProcessor()} + + tmp.Processor(processor) + + return tmp + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_foreachProcessor) Description(description string) *_foreachProcessor { + + s.v.Description = &description + + return s +} + +// Field containing array or object values. +func (s *_foreachProcessor) Field(field string) *_foreachProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_foreachProcessor) If(if_ types.ScriptVariant) *_foreachProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_foreachProcessor) IgnoreFailure(ignorefailure bool) *_foreachProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true`, the processor silently exits without changing the document if the +// `field` is `null` or missing. +func (s *_foreachProcessor) IgnoreMissing(ignoremissing bool) *_foreachProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_foreachProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_foreachProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Ingest processor to run on each element. +func (s *_foreachProcessor) Processor(processor types.ProcessorContainerVariant) *_foreachProcessor { + + s.v.Processor = *processor.ProcessorContainerCaster() + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_foreachProcessor) Tag(tag string) *_foreachProcessor { + + s.v.Tag = &tag + + return s +} + +func (s *_foreachProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Foreach = s.v + + return container +} + +func (s *_foreachProcessor) ForeachProcessorCaster() *types.ForeachProcessor { + return s.v +} diff --git a/typedapi/esdsl/frenchanalyzer.go b/typedapi/esdsl/frenchanalyzer.go new file mode 100644 index 0000000000..3d24c8c597 --- /dev/null +++ b/typedapi/esdsl/frenchanalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _frenchAnalyzer struct { + v *types.FrenchAnalyzer +} + +func NewFrenchAnalyzer() *_frenchAnalyzer { + + return &_frenchAnalyzer{v: types.NewFrenchAnalyzer()} + +} + +func (s *_frenchAnalyzer) StemExclusion(stemexclusions ...string) *_frenchAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_frenchAnalyzer) Stopwords(stopwords ...string) *_frenchAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_frenchAnalyzer) StopwordsPath(stopwordspath string) *_frenchAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_frenchAnalyzer) FrenchAnalyzerCaster() *types.FrenchAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/frequencyencodingpreprocessor.go b/typedapi/esdsl/frequencyencodingpreprocessor.go new file mode 100644 index 0000000000..6e7c93e57e --- /dev/null +++ b/typedapi/esdsl/frequencyencodingpreprocessor.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _frequencyEncodingPreprocessor struct { + v *types.FrequencyEncodingPreprocessor +} + +func NewFrequencyEncodingPreprocessor(featurename string, field string) *_frequencyEncodingPreprocessor { + + tmp := &_frequencyEncodingPreprocessor{v: types.NewFrequencyEncodingPreprocessor()} + + tmp.FeatureName(featurename) + + tmp.Field(field) + + return tmp + +} + +func (s *_frequencyEncodingPreprocessor) FeatureName(featurename string) *_frequencyEncodingPreprocessor { + + s.v.FeatureName = featurename + + return s +} + +func (s *_frequencyEncodingPreprocessor) Field(field string) *_frequencyEncodingPreprocessor { + + s.v.Field = field + + return s +} + +func (s *_frequencyEncodingPreprocessor) FrequencyMap(frequencymap map[string]types.Float64) *_frequencyEncodingPreprocessor { + + s.v.FrequencyMap = frequencymap + return s +} + +func (s *_frequencyEncodingPreprocessor) AddFrequencyMap(key string, value types.Float64) *_frequencyEncodingPreprocessor { + + var tmp map[string]types.Float64 + if s.v.FrequencyMap == nil { + s.v.FrequencyMap = make(map[string]types.Float64) + } else { + tmp = s.v.FrequencyMap + } + + tmp[key] = value + + s.v.FrequencyMap = tmp + return s +} + +func (s *_frequencyEncodingPreprocessor) PreprocessorCaster() *types.Preprocessor { + container := types.NewPreprocessor() + + container.FrequencyEncoding = s.v + + return container +} + +func (s *_frequencyEncodingPreprocessor) FrequencyEncodingPreprocessorCaster() *types.FrequencyEncodingPreprocessor { + return s.v +} diff --git a/typedapi/esdsl/frequentitemsetsaggregation.go b/typedapi/esdsl/frequentitemsetsaggregation.go new file mode 100644 index 0000000000..6d3fb2ac00 --- /dev/null +++ b/typedapi/esdsl/frequentitemsetsaggregation.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _frequentItemSetsAggregation struct { + v *types.FrequentItemSetsAggregation +} + +// A bucket aggregation which finds frequent item sets, a form of association +// rules mining that identifies items that often occur together. +func NewFrequentItemSetsAggregation() *_frequentItemSetsAggregation { + + return &_frequentItemSetsAggregation{v: types.NewFrequentItemSetsAggregation()} + +} + +// Fields to analyze. +func (s *_frequentItemSetsAggregation) Fields(fields ...types.FrequentItemSetsFieldVariant) *_frequentItemSetsAggregation { + + for _, v := range fields { + + s.v.Fields = append(s.v.Fields, *v.FrequentItemSetsFieldCaster()) + + } + return s +} + +// Query that filters documents from analysis. +func (s *_frequentItemSetsAggregation) Filter(filter types.QueryVariant) *_frequentItemSetsAggregation { + + s.v.Filter = filter.QueryCaster() + + return s +} + +// The minimum size of one item set. +func (s *_frequentItemSetsAggregation) MinimumSetSize(minimumsetsize int) *_frequentItemSetsAggregation { + + s.v.MinimumSetSize = &minimumsetsize + + return s +} + +// The minimum support of one item set. +func (s *_frequentItemSetsAggregation) MinimumSupport(minimumsupport types.Float64) *_frequentItemSetsAggregation { + + s.v.MinimumSupport = &minimumsupport + + return s +} + +// The number of top item sets to return. +func (s *_frequentItemSetsAggregation) Size(size int) *_frequentItemSetsAggregation { + + s.v.Size = &size + + return s +} + +func (s *_frequentItemSetsAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.FrequentItemSets = s.v + + return container +} + +func (s *_frequentItemSetsAggregation) FrequentItemSetsAggregationCaster() *types.FrequentItemSetsAggregation { + return s.v +} diff --git a/typedapi/esdsl/frequentitemsetsfield.go b/typedapi/esdsl/frequentitemsetsfield.go new file mode 100644 index 0000000000..d5f9062af6 --- /dev/null +++ b/typedapi/esdsl/frequentitemsetsfield.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _frequentItemSetsField struct { + v *types.FrequentItemSetsField +} + +func NewFrequentItemSetsField() *_frequentItemSetsField { + + return &_frequentItemSetsField{v: types.NewFrequentItemSetsField()} + +} + +// Values to exclude. +// Can be regular expression strings or arrays of strings of exact terms. +func (s *_frequentItemSetsField) Exclude(termsexcludes ...string) *_frequentItemSetsField { + + s.v.Exclude = termsexcludes + + return s +} + +func (s *_frequentItemSetsField) Field(field string) *_frequentItemSetsField { + + s.v.Field = field + + return s +} + +// Values to include. +// Can be regular expression strings or arrays of strings of exact terms. +func (s *_frequentItemSetsField) Include(termsinclude types.TermsIncludeVariant) *_frequentItemSetsField { + + s.v.Include = *termsinclude.TermsIncludeCaster() + + return s +} + +func (s *_frequentItemSetsField) FrequentItemSetsFieldCaster() *types.FrequentItemSetsField { + return s.v +} diff --git a/typedapi/esdsl/functionscore.go b/typedapi/esdsl/functionscore.go new file mode 100644 index 0000000000..6290922dfa --- /dev/null +++ b/typedapi/esdsl/functionscore.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _functionScore struct { + v *types.FunctionScore +} + +func NewFunctionScore() *_functionScore { + return &_functionScore{v: types.NewFunctionScore()} +} + +// AdditionalFunctionScoreProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_functionScore) AdditionalFunctionScoreProperty(key string, value json.RawMessage) *_functionScore { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalFunctionScoreProperty = tmp + return s +} + +// Function that scores a document with a exponential decay, depending on the +// distance of a numeric field value of the document from an origin. +func (s *_functionScore) Exp(decayfunction types.DecayFunctionVariant) *_functionScore { + + s.v.Exp = *decayfunction.DecayFunctionCaster() + + return s +} + +// Function allows you to use a field from a document to influence the score. +// It’s similar to using the script_score function, however, it avoids the +// overhead of scripting. +func (s *_functionScore) FieldValueFactor(fieldvaluefactor types.FieldValueFactorScoreFunctionVariant) *_functionScore { + + s.v.FieldValueFactor = fieldvaluefactor.FieldValueFactorScoreFunctionCaster() + + return s +} + +func (s *_functionScore) Filter(filter types.QueryVariant) *_functionScore { + + s.v.Filter = filter.QueryCaster() + + return s +} + +// Function that scores a document with a normal decay, depending on the +// distance of a numeric field value of the document from an origin. +func (s *_functionScore) Gauss(decayfunction types.DecayFunctionVariant) *_functionScore { + + s.v.Gauss = *decayfunction.DecayFunctionCaster() + + return s +} + +// Function that scores a document with a linear decay, depending on the +// distance of a numeric field value of the document from an origin. +func (s *_functionScore) Linear(decayfunction types.DecayFunctionVariant) *_functionScore { + + s.v.Linear = *decayfunction.DecayFunctionCaster() + + return s +} + +// Generates scores that are uniformly distributed from 0 up to but not +// including 1. +// In case you want scores to be reproducible, it is possible to provide a +// `seed` and `field`. +func (s *_functionScore) RandomScore(randomscore types.RandomScoreFunctionVariant) *_functionScore { + + s.v.RandomScore = randomscore.RandomScoreFunctionCaster() + + return s +} + +// Enables you to wrap another query and customize the scoring of it optionally +// with a computation derived from other numeric field values in the doc using a +// script expression. +func (s *_functionScore) ScriptScore(scriptscore types.ScriptScoreFunctionVariant) *_functionScore { + + s.v.ScriptScore = scriptscore.ScriptScoreFunctionCaster() + + return s +} + +func (s *_functionScore) Weight(weight types.Float64) *_functionScore { + + s.v.Weight = &weight + + return s +} + +func (s *_functionScore) FunctionScoreCaster() *types.FunctionScore { + return s.v +} diff --git a/typedapi/esdsl/functionscorequery.go b/typedapi/esdsl/functionscorequery.go new file mode 100644 index 0000000000..a4398d26b3 --- /dev/null +++ b/typedapi/esdsl/functionscorequery.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/functionboostmode" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/functionscoremode" +) + +type _functionScoreQuery struct { + v *types.FunctionScoreQuery +} + +// The `function_score` enables you to modify the score of documents that are +// retrieved by a query. +func NewFunctionScoreQuery() *_functionScoreQuery { + + return &_functionScoreQuery{v: types.NewFunctionScoreQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_functionScoreQuery) Boost(boost float32) *_functionScoreQuery { + + s.v.Boost = &boost + + return s +} + +// Defines how he newly computed score is combined with the score of the query +func (s *_functionScoreQuery) BoostMode(boostmode functionboostmode.FunctionBoostMode) *_functionScoreQuery { + + s.v.BoostMode = &boostmode + return s +} + +// One or more functions that compute a new score for each document returned by +// the query. +func (s *_functionScoreQuery) Functions(functions ...types.FunctionScoreVariant) *_functionScoreQuery { + + for _, v := range functions { + + s.v.Functions = append(s.v.Functions, *v.FunctionScoreCaster()) + + } + return s +} + +// Restricts the new score to not exceed the provided limit. +func (s *_functionScoreQuery) MaxBoost(maxboost types.Float64) *_functionScoreQuery { + + s.v.MaxBoost = &maxboost + + return s +} + +// Excludes documents that do not meet the provided score threshold. +func (s *_functionScoreQuery) MinScore(minscore types.Float64) *_functionScoreQuery { + + s.v.MinScore = &minscore + + return s +} + +// A query that determines the documents for which a new score is computed. +func (s *_functionScoreQuery) Query(query types.QueryVariant) *_functionScoreQuery { + + s.v.Query = query.QueryCaster() + + return s +} + +func (s *_functionScoreQuery) QueryName_(queryname_ string) *_functionScoreQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Specifies how the computed scores are combined +func (s *_functionScoreQuery) ScoreMode(scoremode functionscoremode.FunctionScoreMode) *_functionScoreQuery { + + s.v.ScoreMode = &scoremode + return s +} + +func (s *_functionScoreQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.FunctionScore = s.v + + return container +} + +func (s *_functionScoreQuery) FunctionScoreQueryCaster() *types.FunctionScoreQuery { + return s.v +} diff --git a/typedapi/esdsl/fuzziness.go b/typedapi/esdsl/fuzziness.go new file mode 100644 index 0000000000..2ddb9bb149 --- /dev/null +++ b/typedapi/esdsl/fuzziness.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _fuzziness struct { + v types.Fuzziness +} + +func NewFuzziness() *_fuzziness { + return &_fuzziness{v: nil} +} + +func (u *_fuzziness) String(string string) *_fuzziness { + + u.v = &string + + return u +} + +func (u *_fuzziness) Int(int int) *_fuzziness { + + u.v = &int + + return u +} + +func (u *_fuzziness) FuzzinessCaster() *types.Fuzziness { + return &u.v +} diff --git a/typedapi/esdsl/fuzzyquery.go b/typedapi/esdsl/fuzzyquery.go new file mode 100644 index 0000000000..806c58dace --- /dev/null +++ b/typedapi/esdsl/fuzzyquery.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _fuzzyQuery struct { + k string + v *types.FuzzyQuery +} + +// Returns documents that contain terms similar to the search term, as measured +// by a Levenshtein edit distance. +func NewFuzzyQuery(field string, value string) *_fuzzyQuery { + tmp := &_fuzzyQuery{ + k: field, + v: types.NewFuzzyQuery(), + } + + tmp.Value(value) + return tmp +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_fuzzyQuery) Boost(boost float32) *_fuzzyQuery { + + s.v.Boost = &boost + + return s +} + +// Maximum edit distance allowed for matching. +func (s *_fuzzyQuery) Fuzziness(fuzziness types.FuzzinessVariant) *_fuzzyQuery { + + s.v.Fuzziness = *fuzziness.FuzzinessCaster() + + return s +} + +// Maximum number of variations created. +func (s *_fuzzyQuery) MaxExpansions(maxexpansions int) *_fuzzyQuery { + + s.v.MaxExpansions = &maxexpansions + + return s +} + +// Number of beginning characters left unchanged when creating expansions. +func (s *_fuzzyQuery) PrefixLength(prefixlength int) *_fuzzyQuery { + + s.v.PrefixLength = &prefixlength + + return s +} + +func (s *_fuzzyQuery) QueryName_(queryname_ string) *_fuzzyQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Number of beginning characters left unchanged when creating expansions. +func (s *_fuzzyQuery) Rewrite(multitermqueryrewrite string) *_fuzzyQuery { + + s.v.Rewrite = &multitermqueryrewrite + + return s +} + +// Indicates whether edits include transpositions of two adjacent characters +// (for example `ab` to `ba`). +func (s *_fuzzyQuery) Transpositions(transpositions bool) *_fuzzyQuery { + + s.v.Transpositions = &transpositions + + return s +} + +// Term you wish to find in the provided field. +func (s *_fuzzyQuery) Value(value string) *_fuzzyQuery { + + s.v.Value = value + + return s +} + +func (s *_fuzzyQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.Fuzzy = map[string]types.FuzzyQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleFuzzyQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleFuzzyQuery() *_fuzzyQuery { + return &_fuzzyQuery{ + k: "", + v: types.NewFuzzyQuery(), + } +} + +func (s *_fuzzyQuery) FuzzyQueryCaster() *types.FuzzyQuery { + return s.v.FuzzyQueryCaster() +} diff --git a/typedapi/esdsl/galiciananalyzer.go b/typedapi/esdsl/galiciananalyzer.go new file mode 100644 index 0000000000..5fc2828276 --- /dev/null +++ b/typedapi/esdsl/galiciananalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _galicianAnalyzer struct { + v *types.GalicianAnalyzer +} + +func NewGalicianAnalyzer() *_galicianAnalyzer { + + return &_galicianAnalyzer{v: types.NewGalicianAnalyzer()} + +} + +func (s *_galicianAnalyzer) StemExclusion(stemexclusions ...string) *_galicianAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_galicianAnalyzer) Stopwords(stopwords ...string) *_galicianAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_galicianAnalyzer) StopwordsPath(stopwordspath string) *_galicianAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_galicianAnalyzer) GalicianAnalyzerCaster() *types.GalicianAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/gcsrepository.go b/typedapi/esdsl/gcsrepository.go new file mode 100644 index 0000000000..8f1879de8e --- /dev/null +++ b/typedapi/esdsl/gcsrepository.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _gcsRepository struct { + v *types.GcsRepository +} + +func NewGcsRepository(settings types.GcsRepositorySettingsVariant) *_gcsRepository { + + tmp := &_gcsRepository{v: types.NewGcsRepository()} + + tmp.Settings(settings) + + return tmp + +} + +// The repository settings. +func (s *_gcsRepository) Settings(settings types.GcsRepositorySettingsVariant) *_gcsRepository { + + s.v.Settings = *settings.GcsRepositorySettingsCaster() + + return s +} + +func (s *_gcsRepository) Uuid(uuid string) *_gcsRepository { + + s.v.Uuid = &uuid + + return s +} + +func (s *_gcsRepository) GcsRepositoryCaster() *types.GcsRepository { + return s.v +} diff --git a/typedapi/esdsl/gcsrepositorysettings.go b/typedapi/esdsl/gcsrepositorysettings.go new file mode 100644 index 0000000000..db9d46bc56 --- /dev/null +++ b/typedapi/esdsl/gcsrepositorysettings.go @@ -0,0 +1,150 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _gcsRepositorySettings struct { + v *types.GcsRepositorySettings +} + +func NewGcsRepositorySettings(bucket string) *_gcsRepositorySettings { + + tmp := &_gcsRepositorySettings{v: types.NewGcsRepositorySettings()} + + tmp.Bucket(bucket) + + return tmp + +} + +// The name used by the client when it uses the Google Cloud Storage service. +func (s *_gcsRepositorySettings) ApplicationName(applicationname string) *_gcsRepositorySettings { + + s.v.ApplicationName = &applicationname + + return s +} + +// The path to the repository data within the bucket. +// It defaults to the root of the bucket. +// +// NOTE: Don't set `base_path` when configuring a snapshot repository for +// Elastic Cloud Enterprise. +// Elastic Cloud Enterprise automatically generates the `base_path` for each +// deployment so that multiple deployments can share the same bucket. +func (s *_gcsRepositorySettings) BasePath(basepath string) *_gcsRepositorySettings { + + s.v.BasePath = &basepath + + return s +} + +// The name of the bucket to be used for snapshots. +func (s *_gcsRepositorySettings) Bucket(bucket string) *_gcsRepositorySettings { + + s.v.Bucket = bucket + + return s +} + +// Big files can be broken down into multiple smaller blobs in the blob store +// during snapshotting. +// It is not recommended to change this value from its default unless there is +// an explicit reason for limiting the size of blobs in the repository. +// Setting a value lower than the default can result in an increased number of +// API calls to the blob store during snapshot create and restore operations +// compared to using the default value and thus make both operations slower and +// more costly. +// Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. +// The default varies by repository type. +func (s *_gcsRepositorySettings) ChunkSize(bytesize types.ByteSizeVariant) *_gcsRepositorySettings { + + s.v.ChunkSize = *bytesize.ByteSizeCaster() + + return s +} + +// The name of the client to use to connect to Google Cloud Storage. +func (s *_gcsRepositorySettings) Client(client string) *_gcsRepositorySettings { + + s.v.Client = &client + + return s +} + +// When set to `true`, metadata files are stored in compressed format. +// This setting doesn't affect index files that are already compressed by +// default. +func (s *_gcsRepositorySettings) Compress(compress bool) *_gcsRepositorySettings { + + s.v.Compress = &compress + + return s +} + +// The maximum snapshot restore rate per node. +// It defaults to unlimited. +// Note that restores are also throttled through recovery settings. +func (s *_gcsRepositorySettings) MaxRestoreBytesPerSec(bytesize types.ByteSizeVariant) *_gcsRepositorySettings { + + s.v.MaxRestoreBytesPerSec = *bytesize.ByteSizeCaster() + + return s +} + +// The maximum snapshot creation rate per node. +// It defaults to 40mb per second. +// Note that if the recovery settings for managed services are set, then it +// defaults to unlimited, and the rate is additionally throttled through +// recovery settings. +func (s *_gcsRepositorySettings) MaxSnapshotBytesPerSec(bytesize types.ByteSizeVariant) *_gcsRepositorySettings { + + s.v.MaxSnapshotBytesPerSec = *bytesize.ByteSizeCaster() + + return s +} + +// If `true`, the repository is read-only. +// The cluster can retrieve and restore snapshots from the repository but not +// write to the repository or create snapshots in it. +// +// Only a cluster with write access can create snapshots in the repository. +// All other clusters connected to the repository should have the `readonly` +// parameter set to `true`. +// +// If `false`, the cluster can write to the repository and create snapshots in +// it. +// +// IMPORTANT: If you register the same snapshot repository with multiple +// clusters, only one cluster should have write access to the repository. +// Having multiple clusters write to the repository at the same time risks +// corrupting the contents of the repository. +func (s *_gcsRepositorySettings) Readonly(readonly bool) *_gcsRepositorySettings { + + s.v.Readonly = &readonly + + return s +} + +func (s *_gcsRepositorySettings) GcsRepositorySettingsCaster() *types.GcsRepositorySettings { + return s.v +} diff --git a/typedapi/esdsl/geoboundingboxquery.go b/typedapi/esdsl/geoboundingboxquery.go new file mode 100644 index 0000000000..8a99e34aa4 --- /dev/null +++ b/typedapi/esdsl/geoboundingboxquery.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoexecution" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geovalidationmethod" +) + +type _geoBoundingBoxQuery struct { + v *types.GeoBoundingBoxQuery +} + +// Matches geo_point and geo_shape values that intersect a bounding box. +func NewGeoBoundingBoxQuery() *_geoBoundingBoxQuery { + + return &_geoBoundingBoxQuery{v: types.NewGeoBoundingBoxQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_geoBoundingBoxQuery) Boost(boost float32) *_geoBoundingBoxQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_geoBoundingBoxQuery) GeoBoundingBoxQuery(geoboundingboxquery map[string]types.GeoBounds) *_geoBoundingBoxQuery { + + s.v.GeoBoundingBoxQuery = geoboundingboxquery + return s +} + +func (s *_geoBoundingBoxQuery) AddGeoBoundingBoxQuery(key string, value types.GeoBoundsVariant) *_geoBoundingBoxQuery { + + var tmp map[string]types.GeoBounds + if s.v.GeoBoundingBoxQuery == nil { + s.v.GeoBoundingBoxQuery = make(map[string]types.GeoBounds) + } else { + tmp = s.v.GeoBoundingBoxQuery + } + + tmp[key] = *value.GeoBoundsCaster() + + s.v.GeoBoundingBoxQuery = tmp + return s +} + +// Set to `true` to ignore an unmapped field and not match any documents for +// this query. +// Set to `false` to throw an exception if the field is not mapped. +func (s *_geoBoundingBoxQuery) IgnoreUnmapped(ignoreunmapped bool) *_geoBoundingBoxQuery { + + s.v.IgnoreUnmapped = &ignoreunmapped + + return s +} + +func (s *_geoBoundingBoxQuery) QueryName_(queryname_ string) *_geoBoundingBoxQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_geoBoundingBoxQuery) Type(type_ geoexecution.GeoExecution) *_geoBoundingBoxQuery { + + s.v.Type = &type_ + return s +} + +// Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or +// longitude. +// Set to `COERCE` to also try to infer correct latitude or longitude. +func (s *_geoBoundingBoxQuery) ValidationMethod(validationmethod geovalidationmethod.GeoValidationMethod) *_geoBoundingBoxQuery { + + s.v.ValidationMethod = &validationmethod + return s +} + +func (s *_geoBoundingBoxQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.GeoBoundingBox = s.v + + return container +} + +func (s *_geoBoundingBoxQuery) GeoBoundingBoxQueryCaster() *types.GeoBoundingBoxQuery { + return s.v +} diff --git a/typedapi/esdsl/geobounds.go b/typedapi/esdsl/geobounds.go new file mode 100644 index 0000000000..321ac4c4bd --- /dev/null +++ b/typedapi/esdsl/geobounds.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _geoBounds struct { + v types.GeoBounds +} + +func NewGeoBounds() *_geoBounds { + return &_geoBounds{v: nil} +} + +func (u *_geoBounds) CoordsGeoBounds(coordsgeobounds types.CoordsGeoBoundsVariant) *_geoBounds { + + u.v = &coordsgeobounds + + return u +} + +// Interface implementation for CoordsGeoBounds in GeoBounds union +func (u *_coordsGeoBounds) GeoBoundsCaster() *types.GeoBounds { + t := types.GeoBounds(u.v) + return &t +} + +func (u *_geoBounds) TopLeftBottomRightGeoBounds(topleftbottomrightgeobounds types.TopLeftBottomRightGeoBoundsVariant) *_geoBounds { + + u.v = &topleftbottomrightgeobounds + + return u +} + +// Interface implementation for TopLeftBottomRightGeoBounds in GeoBounds union +func (u *_topLeftBottomRightGeoBounds) GeoBoundsCaster() *types.GeoBounds { + t := types.GeoBounds(u.v) + return &t +} + +func (u *_geoBounds) TopRightBottomLeftGeoBounds(toprightbottomleftgeobounds types.TopRightBottomLeftGeoBoundsVariant) *_geoBounds { + + u.v = &toprightbottomleftgeobounds + + return u +} + +// Interface implementation for TopRightBottomLeftGeoBounds in GeoBounds union +func (u *_topRightBottomLeftGeoBounds) GeoBoundsCaster() *types.GeoBounds { + t := types.GeoBounds(u.v) + return &t +} + +func (u *_geoBounds) WktGeoBounds(wktgeobounds types.WktGeoBoundsVariant) *_geoBounds { + + u.v = &wktgeobounds + + return u +} + +// Interface implementation for WktGeoBounds in GeoBounds union +func (u *_wktGeoBounds) GeoBoundsCaster() *types.GeoBounds { + t := types.GeoBounds(u.v) + return &t +} + +func (u *_geoBounds) GeoBoundsCaster() *types.GeoBounds { + return &u.v +} diff --git a/typedapi/esdsl/geoboundsaggregation.go b/typedapi/esdsl/geoboundsaggregation.go new file mode 100644 index 0000000000..abd40c3b05 --- /dev/null +++ b/typedapi/esdsl/geoboundsaggregation.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _geoBoundsAggregation struct { + v *types.GeoBoundsAggregation +} + +// A metric aggregation that computes the geographic bounding box containing all +// values for a Geopoint or Geoshape field. +func NewGeoBoundsAggregation() *_geoBoundsAggregation { + + return &_geoBoundsAggregation{v: types.NewGeoBoundsAggregation()} + +} + +// The field on which to run the aggregation. +func (s *_geoBoundsAggregation) Field(field string) *_geoBoundsAggregation { + + s.v.Field = &field + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_geoBoundsAggregation) Missing(missing types.MissingVariant) *_geoBoundsAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_geoBoundsAggregation) Script(script types.ScriptVariant) *_geoBoundsAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +// Specifies whether the bounding box should be allowed to overlap the +// international date line. +func (s *_geoBoundsAggregation) WrapLongitude(wraplongitude bool) *_geoBoundsAggregation { + + s.v.WrapLongitude = &wraplongitude + + return s +} + +func (s *_geoBoundsAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.GeoBounds = s.v + + return container +} + +func (s *_geoBoundsAggregation) GeoBoundsAggregationCaster() *types.GeoBoundsAggregation { + return s.v +} diff --git a/typedapi/esdsl/geocentroidaggregation.go b/typedapi/esdsl/geocentroidaggregation.go new file mode 100644 index 0000000000..4925d10fbc --- /dev/null +++ b/typedapi/esdsl/geocentroidaggregation.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _geoCentroidAggregation struct { + v *types.GeoCentroidAggregation +} + +// A metric aggregation that computes the weighted centroid from all coordinate +// values for geo fields. +func NewGeoCentroidAggregation() *_geoCentroidAggregation { + + return &_geoCentroidAggregation{v: types.NewGeoCentroidAggregation()} + +} + +func (s *_geoCentroidAggregation) Count(count int64) *_geoCentroidAggregation { + + s.v.Count = &count + + return s +} + +// The field on which to run the aggregation. +func (s *_geoCentroidAggregation) Field(field string) *_geoCentroidAggregation { + + s.v.Field = &field + + return s +} + +func (s *_geoCentroidAggregation) Location(geolocation types.GeoLocationVariant) *_geoCentroidAggregation { + + s.v.Location = *geolocation.GeoLocationCaster() + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_geoCentroidAggregation) Missing(missing types.MissingVariant) *_geoCentroidAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_geoCentroidAggregation) Script(script types.ScriptVariant) *_geoCentroidAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_geoCentroidAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.GeoCentroid = s.v + + return container +} + +func (s *_geoCentroidAggregation) GeoCentroidAggregationCaster() *types.GeoCentroidAggregation { + return s.v +} diff --git a/typedapi/esdsl/geodecayfunction.go b/typedapi/esdsl/geodecayfunction.go new file mode 100644 index 0000000000..559f97aca5 --- /dev/null +++ b/typedapi/esdsl/geodecayfunction.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode" +) + +type _geoDecayFunction struct { + v *types.GeoDecayFunction +} + +// Function that scores a document with a linear decay, depending on the +// distance of a numeric field value of the document from an origin. +func NewGeoDecayFunction() *_geoDecayFunction { + + return &_geoDecayFunction{v: types.NewGeoDecayFunction()} + +} + +func (s *_geoDecayFunction) DecayFunctionBaseGeoLocationDistance(decayfunctionbasegeolocationdistance map[string]types.DecayPlacementGeoLocationDistance) *_geoDecayFunction { + + s.v.DecayFunctionBaseGeoLocationDistance = decayfunctionbasegeolocationdistance + return s +} + +func (s *_geoDecayFunction) AddDecayFunctionBaseGeoLocationDistance(key string, value types.DecayPlacementGeoLocationDistanceVariant) *_geoDecayFunction { + + var tmp map[string]types.DecayPlacementGeoLocationDistance + if s.v.DecayFunctionBaseGeoLocationDistance == nil { + s.v.DecayFunctionBaseGeoLocationDistance = make(map[string]types.DecayPlacementGeoLocationDistance) + } else { + tmp = s.v.DecayFunctionBaseGeoLocationDistance + } + + tmp[key] = *value.DecayPlacementGeoLocationDistanceCaster() + + s.v.DecayFunctionBaseGeoLocationDistance = tmp + return s +} + +// Determines how the distance is calculated when a field used for computing the +// decay contains multiple values. +func (s *_geoDecayFunction) MultiValueMode(multivaluemode multivaluemode.MultiValueMode) *_geoDecayFunction { + + s.v.MultiValueMode = &multivaluemode + return s +} + +func (s *_geoDecayFunction) FunctionScoreCaster() *types.FunctionScore { + container := types.NewFunctionScore() + + container.Linear = s.v + + return container +} + +func (s *_geoDecayFunction) GeoDecayFunctionCaster() *types.GeoDecayFunction { + return s.v +} diff --git a/typedapi/esdsl/geodistanceaggregation.go b/typedapi/esdsl/geodistanceaggregation.go new file mode 100644 index 0000000000..9364c3fad3 --- /dev/null +++ b/typedapi/esdsl/geodistanceaggregation.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/distanceunit" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geodistancetype" +) + +type _geoDistanceAggregation struct { + v *types.GeoDistanceAggregation +} + +// A multi-bucket aggregation that works on `geo_point` fields. +// Evaluates the distance of each document value from an origin point and +// determines the buckets it belongs to, based on ranges defined in the request. +func NewGeoDistanceAggregation() *_geoDistanceAggregation { + + return &_geoDistanceAggregation{v: types.NewGeoDistanceAggregation()} + +} + +// The distance calculation type. +func (s *_geoDistanceAggregation) DistanceType(distancetype geodistancetype.GeoDistanceType) *_geoDistanceAggregation { + + s.v.DistanceType = &distancetype + return s +} + +// A field of type `geo_point` used to evaluate the distance. +func (s *_geoDistanceAggregation) Field(field string) *_geoDistanceAggregation { + + s.v.Field = &field + + return s +} + +// The origin used to evaluate the distance. +func (s *_geoDistanceAggregation) Origin(geolocation types.GeoLocationVariant) *_geoDistanceAggregation { + + s.v.Origin = *geolocation.GeoLocationCaster() + + return s +} + +// An array of ranges used to bucket documents. +func (s *_geoDistanceAggregation) Ranges(ranges ...types.AggregationRangeVariant) *_geoDistanceAggregation { + + for _, v := range ranges { + + s.v.Ranges = append(s.v.Ranges, *v.AggregationRangeCaster()) + + } + return s +} + +// The distance unit. +func (s *_geoDistanceAggregation) Unit(unit distanceunit.DistanceUnit) *_geoDistanceAggregation { + + s.v.Unit = &unit + return s +} + +func (s *_geoDistanceAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.GeoDistance = s.v + + return container +} + +func (s *_geoDistanceAggregation) GeoDistanceAggregationCaster() *types.GeoDistanceAggregation { + return s.v +} diff --git a/typedapi/esdsl/geodistancefeaturequery.go b/typedapi/esdsl/geodistancefeaturequery.go new file mode 100644 index 0000000000..9532585d1d --- /dev/null +++ b/typedapi/esdsl/geodistancefeaturequery.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _geoDistanceFeatureQuery struct { + v *types.GeoDistanceFeatureQuery +} + +// Boosts the relevance score of documents closer to a provided origin date or +// point. +// For example, you can use this query to give more weight to documents closer +// to a certain date or location. +func NewGeoDistanceFeatureQuery() *_geoDistanceFeatureQuery { + + return &_geoDistanceFeatureQuery{v: types.NewGeoDistanceFeatureQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_geoDistanceFeatureQuery) Boost(boost float32) *_geoDistanceFeatureQuery { + + s.v.Boost = &boost + + return s +} + +// Name of the field used to calculate distances. This field must meet the +// following criteria: +// be a `date`, `date_nanos` or `geo_point` field; +// have an `index` mapping parameter value of `true`, which is the default; +// have an `doc_values` mapping parameter value of `true`, which is the default. +func (s *_geoDistanceFeatureQuery) Field(field string) *_geoDistanceFeatureQuery { + + s.v.Field = field + + return s +} + +// Date or point of origin used to calculate distances. +// If the `field` value is a `date` or `date_nanos` field, the `origin` value +// must be a date. +// Date Math, such as `now-1h`, is supported. +// If the field value is a `geo_point` field, the `origin` value must be a +// geopoint. +func (s *_geoDistanceFeatureQuery) Origin(geolocation types.GeoLocationVariant) *_geoDistanceFeatureQuery { + + s.v.Origin = *geolocation.GeoLocationCaster() + + return s +} + +// Distance from the `origin` at which relevance scores receive half of the +// `boost` value. +// If the `field` value is a `date` or `date_nanos` field, the `pivot` value +// must be a time unit, such as `1h` or `10d`. If the `field` value is a +// `geo_point` field, the `pivot` value must be a distance unit, such as `1km` +// or `12m`. +func (s *_geoDistanceFeatureQuery) Pivot(distance string) *_geoDistanceFeatureQuery { + + s.v.Pivot = distance + + return s +} + +func (s *_geoDistanceFeatureQuery) QueryName_(queryname_ string) *_geoDistanceFeatureQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_geoDistanceFeatureQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.DistanceFeature = s.v + + return container +} + +func (s *_geoDistanceFeatureQuery) GeoDistanceFeatureQueryCaster() *types.GeoDistanceFeatureQuery { + return s.v +} diff --git a/typedapi/esdsl/geodistancequery.go b/typedapi/esdsl/geodistancequery.go new file mode 100644 index 0000000000..53d194e408 --- /dev/null +++ b/typedapi/esdsl/geodistancequery.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geodistancetype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geovalidationmethod" +) + +type _geoDistanceQuery struct { + v *types.GeoDistanceQuery +} + +// Matches `geo_point` and `geo_shape` values within a given distance of a +// geopoint. +func NewGeoDistanceQuery() *_geoDistanceQuery { + + return &_geoDistanceQuery{v: types.NewGeoDistanceQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_geoDistanceQuery) Boost(boost float32) *_geoDistanceQuery { + + s.v.Boost = &boost + + return s +} + +// The radius of the circle centred on the specified location. +// Points which fall into this circle are considered to be matches. +func (s *_geoDistanceQuery) Distance(distance string) *_geoDistanceQuery { + + s.v.Distance = distance + + return s +} + +// How to compute the distance. +// Set to `plane` for a faster calculation that's inaccurate on long distances +// and close to the poles. +func (s *_geoDistanceQuery) DistanceType(distancetype geodistancetype.GeoDistanceType) *_geoDistanceQuery { + + s.v.DistanceType = &distancetype + return s +} + +func (s *_geoDistanceQuery) GeoDistanceQuery(geodistancequery map[string]types.GeoLocation) *_geoDistanceQuery { + + s.v.GeoDistanceQuery = geodistancequery + return s +} + +func (s *_geoDistanceQuery) AddGeoDistanceQuery(key string, value types.GeoLocationVariant) *_geoDistanceQuery { + + var tmp map[string]types.GeoLocation + if s.v.GeoDistanceQuery == nil { + s.v.GeoDistanceQuery = make(map[string]types.GeoLocation) + } else { + tmp = s.v.GeoDistanceQuery + } + + tmp[key] = *value.GeoLocationCaster() + + s.v.GeoDistanceQuery = tmp + return s +} + +// Set to `true` to ignore an unmapped field and not match any documents for +// this query. +// Set to `false` to throw an exception if the field is not mapped. +func (s *_geoDistanceQuery) IgnoreUnmapped(ignoreunmapped bool) *_geoDistanceQuery { + + s.v.IgnoreUnmapped = &ignoreunmapped + + return s +} + +func (s *_geoDistanceQuery) QueryName_(queryname_ string) *_geoDistanceQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or +// longitude. +// Set to `COERCE` to also try to infer correct latitude or longitude. +func (s *_geoDistanceQuery) ValidationMethod(validationmethod geovalidationmethod.GeoValidationMethod) *_geoDistanceQuery { + + s.v.ValidationMethod = &validationmethod + return s +} + +func (s *_geoDistanceQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.GeoDistance = s.v + + return container +} + +func (s *_geoDistanceQuery) GeoDistanceQueryCaster() *types.GeoDistanceQuery { + return s.v +} diff --git a/typedapi/esdsl/geodistancesort.go b/typedapi/esdsl/geodistancesort.go new file mode 100644 index 0000000000..bf5cdd8fcf --- /dev/null +++ b/typedapi/esdsl/geodistancesort.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/distanceunit" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geodistancetype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortmode" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" +) + +type _geoDistanceSort struct { + v *types.GeoDistanceSort +} + +func NewGeoDistanceSort() *_geoDistanceSort { + + return &_geoDistanceSort{v: types.NewGeoDistanceSort()} + +} + +func (s *_geoDistanceSort) DistanceType(distancetype geodistancetype.GeoDistanceType) *_geoDistanceSort { + + s.v.DistanceType = &distancetype + return s +} + +func (s *_geoDistanceSort) GeoDistanceSort(geodistancesort map[string][]types.GeoLocation) *_geoDistanceSort { + + s.v.GeoDistanceSort = geodistancesort + return s +} + +func (s *_geoDistanceSort) IgnoreUnmapped(ignoreunmapped bool) *_geoDistanceSort { + + s.v.IgnoreUnmapped = &ignoreunmapped + + return s +} + +func (s *_geoDistanceSort) Mode(mode sortmode.SortMode) *_geoDistanceSort { + + s.v.Mode = &mode + return s +} + +func (s *_geoDistanceSort) Nested(nested types.NestedSortValueVariant) *_geoDistanceSort { + + s.v.Nested = nested.NestedSortValueCaster() + + return s +} + +func (s *_geoDistanceSort) Order(order sortorder.SortOrder) *_geoDistanceSort { + + s.v.Order = &order + return s +} + +func (s *_geoDistanceSort) Unit(unit distanceunit.DistanceUnit) *_geoDistanceSort { + + s.v.Unit = &unit + return s +} + +func (s *_geoDistanceSort) SortOptionsCaster() *types.SortOptions { + container := types.NewSortOptions() + + container.GeoDistance_ = s.v + + return container +} + +func (s *_geoDistanceSort) GeoDistanceSortCaster() *types.GeoDistanceSort { + return s.v +} diff --git a/typedapi/esdsl/geogridprocessor.go b/typedapi/esdsl/geogridprocessor.go new file mode 100644 index 0000000000..0881749563 --- /dev/null +++ b/typedapi/esdsl/geogridprocessor.go @@ -0,0 +1,179 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geogridtargetformat" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geogridtiletype" +) + +type _geoGridProcessor struct { + v *types.GeoGridProcessor +} + +// Converts geo-grid definitions of grid tiles or cells to regular bounding +// boxes or polygons which describe their shape. +// This is useful if there is a need to interact with the tile shapes as +// spatially indexable fields. +func NewGeoGridProcessor(field string, tiletype geogridtiletype.GeoGridTileType) *_geoGridProcessor { + + tmp := &_geoGridProcessor{v: types.NewGeoGridProcessor()} + + tmp.Field(field) + + tmp.TileType(tiletype) + + return tmp + +} + +// If specified and children tiles exist, save those tile addresses to this +// field as an array of strings. +func (s *_geoGridProcessor) ChildrenField(field string) *_geoGridProcessor { + + s.v.ChildrenField = &field + + return s +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_geoGridProcessor) Description(description string) *_geoGridProcessor { + + s.v.Description = &description + + return s +} + +// The field to interpret as a geo-tile.= +// The field format is determined by the `tile_type`. +func (s *_geoGridProcessor) Field(field string) *_geoGridProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_geoGridProcessor) If(if_ types.ScriptVariant) *_geoGridProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_geoGridProcessor) IgnoreFailure(ignorefailure bool) *_geoGridProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist, the processor quietly exits without +// modifying the document. +func (s *_geoGridProcessor) IgnoreMissing(ignoremissing bool) *_geoGridProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// If specified and intersecting non-child tiles exist, save their addresses to +// this field as an array of strings. +func (s *_geoGridProcessor) NonChildrenField(field string) *_geoGridProcessor { + + s.v.NonChildrenField = &field + + return s +} + +// Handle failures for the processor. +func (s *_geoGridProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_geoGridProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// If specified and a parent tile exists, save that tile address to this field. +func (s *_geoGridProcessor) ParentField(field string) *_geoGridProcessor { + + s.v.ParentField = &field + + return s +} + +// If specified, save the tile precision (zoom) as an integer to this field. +func (s *_geoGridProcessor) PrecisionField(field string) *_geoGridProcessor { + + s.v.PrecisionField = &field + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_geoGridProcessor) Tag(tag string) *_geoGridProcessor { + + s.v.Tag = &tag + + return s +} + +// The field to assign the polygon shape to, by default, the `field` is updated +// in-place. +func (s *_geoGridProcessor) TargetField(field string) *_geoGridProcessor { + + s.v.TargetField = &field + + return s +} + +// Which format to save the generated polygon in. +func (s *_geoGridProcessor) TargetFormat(targetformat geogridtargetformat.GeoGridTargetFormat) *_geoGridProcessor { + + s.v.TargetFormat = &targetformat + return s +} + +// Three tile formats are understood: geohash, geotile and geohex. +func (s *_geoGridProcessor) TileType(tiletype geogridtiletype.GeoGridTileType) *_geoGridProcessor { + + s.v.TileType = tiletype + return s +} + +func (s *_geoGridProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.GeoGrid = s.v + + return container +} + +func (s *_geoGridProcessor) GeoGridProcessorCaster() *types.GeoGridProcessor { + return s.v +} diff --git a/typedapi/esdsl/geogridquery.go b/typedapi/esdsl/geogridquery.go new file mode 100644 index 0000000000..f3c181fb4a --- /dev/null +++ b/typedapi/esdsl/geogridquery.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _geoGridQuery struct { + v *types.GeoGridQuery +} + +func NewGeoGridQuery() *_geoGridQuery { + return &_geoGridQuery{v: types.NewGeoGridQuery()} +} + +// AdditionalGeoGridQueryProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_geoGridQuery) AdditionalGeoGridQueryProperty(key string, value json.RawMessage) *_geoGridQuery { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalGeoGridQueryProperty = tmp + return s +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_geoGridQuery) Boost(boost float32) *_geoGridQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_geoGridQuery) Geogrid(geotile string) *_geoGridQuery { + + s.v.Geogrid = &geotile + + return s +} + +func (s *_geoGridQuery) Geohash(geohash string) *_geoGridQuery { + + s.v.Geohash = &geohash + + return s +} + +func (s *_geoGridQuery) Geohex(geohexcell string) *_geoGridQuery { + + s.v.Geohex = &geohexcell + + return s +} + +func (s *_geoGridQuery) QueryName_(queryname_ string) *_geoGridQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_geoGridQuery) GeoGridQueryCaster() *types.GeoGridQuery { + return s.v +} diff --git a/typedapi/esdsl/geohashgridaggregation.go b/typedapi/esdsl/geohashgridaggregation.go new file mode 100644 index 0000000000..38e1bb3b23 --- /dev/null +++ b/typedapi/esdsl/geohashgridaggregation.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _geoHashGridAggregation struct { + v *types.GeoHashGridAggregation +} + +// A multi-bucket aggregation that groups `geo_point` and `geo_shape` values +// into buckets that represent a grid. +// Each cell is labeled using a geohash which is of user-definable precision. +func NewGeoHashGridAggregation() *_geoHashGridAggregation { + + return &_geoHashGridAggregation{v: types.NewGeoHashGridAggregation()} + +} + +// The bounding box to filter the points in each bucket. +func (s *_geoHashGridAggregation) Bounds(geobounds types.GeoBoundsVariant) *_geoHashGridAggregation { + + s.v.Bounds = *geobounds.GeoBoundsCaster() + + return s +} + +// Field containing indexed `geo_point` or `geo_shape` values. +// If the field contains an array, `geohash_grid` aggregates all array values. +func (s *_geoHashGridAggregation) Field(field string) *_geoHashGridAggregation { + + s.v.Field = &field + + return s +} + +// The string length of the geohashes used to define cells/buckets in the +// results. +func (s *_geoHashGridAggregation) Precision(geohashprecision types.GeoHashPrecisionVariant) *_geoHashGridAggregation { + + s.v.Precision = *geohashprecision.GeoHashPrecisionCaster() + + return s +} + +// Allows for more accurate counting of the top cells returned in the final +// result the aggregation. +// Defaults to returning `max(10,(size x number-of-shards))` buckets from each +// shard. +func (s *_geoHashGridAggregation) ShardSize(shardsize int) *_geoHashGridAggregation { + + s.v.ShardSize = &shardsize + + return s +} + +// The maximum number of geohash buckets to return. +func (s *_geoHashGridAggregation) Size(size int) *_geoHashGridAggregation { + + s.v.Size = &size + + return s +} + +func (s *_geoHashGridAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.GeohashGrid = s.v + + return container +} + +func (s *_geoHashGridAggregation) GeoHashGridAggregationCaster() *types.GeoHashGridAggregation { + return s.v +} diff --git a/typedapi/esdsl/geohashlocation.go b/typedapi/esdsl/geohashlocation.go new file mode 100644 index 0000000000..327914444b --- /dev/null +++ b/typedapi/esdsl/geohashlocation.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _geoHashLocation struct { + v *types.GeoHashLocation +} + +func NewGeoHashLocation() *_geoHashLocation { + + return &_geoHashLocation{v: types.NewGeoHashLocation()} + +} + +func (s *_geoHashLocation) Geohash(geohash string) *_geoHashLocation { + + s.v.Geohash = geohash + + return s +} + +func (s *_geoHashLocation) GeoHashLocationCaster() *types.GeoHashLocation { + return s.v +} diff --git a/typedapi/esdsl/geohashprecision.go b/typedapi/esdsl/geohashprecision.go new file mode 100644 index 0000000000..4c67cd727e --- /dev/null +++ b/typedapi/esdsl/geohashprecision.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _geoHashPrecision struct { + v types.GeoHashPrecision +} + +func NewGeoHashPrecision() *_geoHashPrecision { + return &_geoHashPrecision{v: nil} +} + +func (u *_geoHashPrecision) Int(int int) *_geoHashPrecision { + + u.v = &int + + return u +} + +func (u *_geoHashPrecision) String(string string) *_geoHashPrecision { + + u.v = &string + + return u +} + +func (u *_geoHashPrecision) GeoHashPrecisionCaster() *types.GeoHashPrecision { + return &u.v +} diff --git a/typedapi/esdsl/geohexgridaggregation.go b/typedapi/esdsl/geohexgridaggregation.go new file mode 100644 index 0000000000..7310983b67 --- /dev/null +++ b/typedapi/esdsl/geohexgridaggregation.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _geohexGridAggregation struct { + v *types.GeohexGridAggregation +} + +// A multi-bucket aggregation that groups `geo_point` and `geo_shape` values +// into buckets that represent a grid. +// Each cell corresponds to a H3 cell index and is labeled using the H3Index +// representation. +func NewGeohexGridAggregation() *_geohexGridAggregation { + + return &_geohexGridAggregation{v: types.NewGeohexGridAggregation()} + +} + +// Bounding box used to filter the geo-points in each bucket. +func (s *_geohexGridAggregation) Bounds(geobounds types.GeoBoundsVariant) *_geohexGridAggregation { + + s.v.Bounds = *geobounds.GeoBoundsCaster() + + return s +} + +// Field containing indexed `geo_point` or `geo_shape` values. +// If the field contains an array, `geohex_grid` aggregates all array values. +func (s *_geohexGridAggregation) Field(field string) *_geohexGridAggregation { + + s.v.Field = field + + return s +} + +// Integer zoom of the key used to defined cells or buckets +// in the results. Value should be between 0-15. +func (s *_geohexGridAggregation) Precision(precision int) *_geohexGridAggregation { + + s.v.Precision = &precision + + return s +} + +// Number of buckets returned from each shard. +func (s *_geohexGridAggregation) ShardSize(shardsize int) *_geohexGridAggregation { + + s.v.ShardSize = &shardsize + + return s +} + +// Maximum number of buckets to return. +func (s *_geohexGridAggregation) Size(size int) *_geohexGridAggregation { + + s.v.Size = &size + + return s +} + +func (s *_geohexGridAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.GeohexGrid = s.v + + return container +} + +func (s *_geohexGridAggregation) GeohexGridAggregationCaster() *types.GeohexGridAggregation { + return s.v +} diff --git a/typedapi/esdsl/geoipprocessor.go b/typedapi/esdsl/geoipprocessor.go new file mode 100644 index 0000000000..1ac847adaa --- /dev/null +++ b/typedapi/esdsl/geoipprocessor.go @@ -0,0 +1,160 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _geoIpProcessor struct { + v *types.GeoIpProcessor +} + +// The `geoip` processor adds information about the geographical location of an +// IPv4 or IPv6 address. +func NewGeoIpProcessor() *_geoIpProcessor { + + return &_geoIpProcessor{v: types.NewGeoIpProcessor()} + +} + +// The database filename referring to a database the module ships with +// (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom +// database in the ingest-geoip config directory. +func (s *_geoIpProcessor) DatabaseFile(databasefile string) *_geoIpProcessor { + + s.v.DatabaseFile = &databasefile + + return s +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_geoIpProcessor) Description(description string) *_geoIpProcessor { + + s.v.Description = &description + + return s +} + +// If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the +// missing database is downloaded when the pipeline is created. +// Else, the download is triggered by when the pipeline is used as the +// `default_pipeline` or `final_pipeline` in an index. +func (s *_geoIpProcessor) DownloadDatabaseOnPipelineCreation(downloaddatabaseonpipelinecreation bool) *_geoIpProcessor { + + s.v.DownloadDatabaseOnPipelineCreation = &downloaddatabaseonpipelinecreation + + return s +} + +// The field to get the ip address from for the geographical lookup. +func (s *_geoIpProcessor) Field(field string) *_geoIpProcessor { + + s.v.Field = field + + return s +} + +// If `true`, only the first found geoip data will be returned, even if the +// field contains an array. +func (s *_geoIpProcessor) FirstOnly(firstonly bool) *_geoIpProcessor { + + s.v.FirstOnly = &firstonly + + return s +} + +// Conditionally execute the processor. +func (s *_geoIpProcessor) If(if_ types.ScriptVariant) *_geoIpProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_geoIpProcessor) IgnoreFailure(ignorefailure bool) *_geoIpProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist, the processor quietly exits without +// modifying the document. +func (s *_geoIpProcessor) IgnoreMissing(ignoremissing bool) *_geoIpProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_geoIpProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_geoIpProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Controls what properties are added to the `target_field` based on the geoip +// lookup. +func (s *_geoIpProcessor) Properties(properties ...string) *_geoIpProcessor { + + for _, v := range properties { + + s.v.Properties = append(s.v.Properties, v) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_geoIpProcessor) Tag(tag string) *_geoIpProcessor { + + s.v.Tag = &tag + + return s +} + +// The field that will hold the geographical information looked up from the +// MaxMind database. +func (s *_geoIpProcessor) TargetField(field string) *_geoIpProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_geoIpProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Geoip = s.v + + return container +} + +func (s *_geoIpProcessor) GeoIpProcessorCaster() *types.GeoIpProcessor { + return s.v +} diff --git a/typedapi/esdsl/geolineaggregation.go b/typedapi/esdsl/geolineaggregation.go new file mode 100644 index 0000000000..c7f02199c3 --- /dev/null +++ b/typedapi/esdsl/geolineaggregation.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" +) + +type _geoLineAggregation struct { + v *types.GeoLineAggregation +} + +func NewGeoLineAggregation(point types.GeoLinePointVariant, sort types.GeoLineSortVariant) *_geoLineAggregation { + + tmp := &_geoLineAggregation{v: types.NewGeoLineAggregation()} + + tmp.Point(point) + + tmp.Sort(sort) + + return tmp + +} + +// When `true`, returns an additional array of the sort values in the feature +// properties. +func (s *_geoLineAggregation) IncludeSort(includesort bool) *_geoLineAggregation { + + s.v.IncludeSort = &includesort + + return s +} + +// The name of the geo_point field. +func (s *_geoLineAggregation) Point(point types.GeoLinePointVariant) *_geoLineAggregation { + + s.v.Point = *point.GeoLinePointCaster() + + return s +} + +// The maximum length of the line represented in the aggregation. +// Valid sizes are between 1 and 10000. +func (s *_geoLineAggregation) Size(size int) *_geoLineAggregation { + + s.v.Size = &size + + return s +} + +// The name of the numeric field to use as the sort key for ordering the points. +// When the `geo_line` aggregation is nested inside a `time_series` aggregation, +// this field defaults to `@timestamp`, and any other value will result in +// error. +func (s *_geoLineAggregation) Sort(sort types.GeoLineSortVariant) *_geoLineAggregation { + + s.v.Sort = *sort.GeoLineSortCaster() + + return s +} + +// The order in which the line is sorted (ascending or descending). +func (s *_geoLineAggregation) SortOrder(sortorder sortorder.SortOrder) *_geoLineAggregation { + + s.v.SortOrder = &sortorder + return s +} + +func (s *_geoLineAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Line = s.v + + return container +} + +func (s *_geoLineAggregation) GeoLineAggregationCaster() *types.GeoLineAggregation { + return s.v +} diff --git a/typedapi/esdsl/geolinepoint.go b/typedapi/esdsl/geolinepoint.go new file mode 100644 index 0000000000..4afc5e0b6c --- /dev/null +++ b/typedapi/esdsl/geolinepoint.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _geoLinePoint struct { + v *types.GeoLinePoint +} + +func NewGeoLinePoint() *_geoLinePoint { + + return &_geoLinePoint{v: types.NewGeoLinePoint()} + +} + +// The name of the geo_point field. +func (s *_geoLinePoint) Field(field string) *_geoLinePoint { + + s.v.Field = field + + return s +} + +func (s *_geoLinePoint) GeoLinePointCaster() *types.GeoLinePoint { + return s.v +} diff --git a/typedapi/esdsl/geolinesort.go b/typedapi/esdsl/geolinesort.go new file mode 100644 index 0000000000..d2797f0e10 --- /dev/null +++ b/typedapi/esdsl/geolinesort.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _geoLineSort struct { + v *types.GeoLineSort +} + +func NewGeoLineSort() *_geoLineSort { + + return &_geoLineSort{v: types.NewGeoLineSort()} + +} + +// The name of the numeric field to use as the sort key for ordering the points. +func (s *_geoLineSort) Field(field string) *_geoLineSort { + + s.v.Field = field + + return s +} + +func (s *_geoLineSort) GeoLineSortCaster() *types.GeoLineSort { + return s.v +} diff --git a/typedapi/esdsl/geolocation.go b/typedapi/esdsl/geolocation.go new file mode 100644 index 0000000000..284e37feb4 --- /dev/null +++ b/typedapi/esdsl/geolocation.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _geoLocation struct { + v types.GeoLocation +} + +func NewGeoLocation() *_geoLocation { + return &_geoLocation{v: nil} +} + +func (u *_geoLocation) LatLonGeoLocation(latlongeolocation types.LatLonGeoLocationVariant) *_geoLocation { + + u.v = &latlongeolocation + + return u +} + +// Interface implementation for LatLonGeoLocation in GeoLocation union +func (u *_latLonGeoLocation) GeoLocationCaster() *types.GeoLocation { + t := types.GeoLocation(u.v) + return &t +} + +func (u *_geoLocation) GeoHashLocation(geohashlocation types.GeoHashLocationVariant) *_geoLocation { + + u.v = &geohashlocation + + return u +} + +// Interface implementation for GeoHashLocation in GeoLocation union +func (u *_geoHashLocation) GeoLocationCaster() *types.GeoLocation { + t := types.GeoLocation(u.v) + return &t +} + +func (u *_geoLocation) Doubles(doubles ...types.Float64) *_geoLocation { + + u.v = make([]types.Float64, len(doubles)) + u.v = doubles + + return u +} + +func (u *_geoLocation) String(string string) *_geoLocation { + + u.v = &string + + return u +} + +func (u *_geoLocation) GeoLocationCaster() *types.GeoLocation { + return &u.v +} diff --git a/typedapi/esdsl/geopointproperty.go b/typedapi/esdsl/geopointproperty.go new file mode 100644 index 0000000000..118f110155 --- /dev/null +++ b/typedapi/esdsl/geopointproperty.go @@ -0,0 +1,195 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _geoPointProperty struct { + v *types.GeoPointProperty +} + +func NewGeoPointProperty() *_geoPointProperty { + + return &_geoPointProperty{v: types.NewGeoPointProperty()} + +} + +func (s *_geoPointProperty) CopyTo(fields ...string) *_geoPointProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_geoPointProperty) DocValues(docvalues bool) *_geoPointProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_geoPointProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_geoPointProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_geoPointProperty) Fields(fields map[string]types.Property) *_geoPointProperty { + + s.v.Fields = fields + return s +} + +func (s *_geoPointProperty) AddField(key string, value types.PropertyVariant) *_geoPointProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_geoPointProperty) IgnoreAbove(ignoreabove int) *_geoPointProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_geoPointProperty) IgnoreMalformed(ignoremalformed bool) *_geoPointProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_geoPointProperty) IgnoreZValue(ignorezvalue bool) *_geoPointProperty { + + s.v.IgnoreZValue = &ignorezvalue + + return s +} + +func (s *_geoPointProperty) Index(index bool) *_geoPointProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_geoPointProperty) Meta(meta map[string]string) *_geoPointProperty { + + s.v.Meta = meta + return s +} + +func (s *_geoPointProperty) AddMeta(key string, value string) *_geoPointProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_geoPointProperty) NullValue(geolocation types.GeoLocationVariant) *_geoPointProperty { + + s.v.NullValue = *geolocation.GeoLocationCaster() + + return s +} + +func (s *_geoPointProperty) OnScriptError(onscripterror onscripterror.OnScriptError) *_geoPointProperty { + + s.v.OnScriptError = &onscripterror + return s +} + +func (s *_geoPointProperty) Properties(properties map[string]types.Property) *_geoPointProperty { + + s.v.Properties = properties + return s +} + +func (s *_geoPointProperty) AddProperty(key string, value types.PropertyVariant) *_geoPointProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_geoPointProperty) Script(script types.ScriptVariant) *_geoPointProperty { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_geoPointProperty) Store(store bool) *_geoPointProperty { + + s.v.Store = &store + + return s +} + +func (s *_geoPointProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_geoPointProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_geoPointProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_geoPointProperty) GeoPointPropertyCaster() *types.GeoPointProperty { + return s.v +} diff --git a/typedapi/esdsl/geopolygonpoints.go b/typedapi/esdsl/geopolygonpoints.go new file mode 100644 index 0000000000..992f01c384 --- /dev/null +++ b/typedapi/esdsl/geopolygonpoints.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _geoPolygonPoints struct { + v *types.GeoPolygonPoints +} + +func NewGeoPolygonPoints() *_geoPolygonPoints { + + return &_geoPolygonPoints{v: types.NewGeoPolygonPoints()} + +} + +func (s *_geoPolygonPoints) Points(points ...types.GeoLocationVariant) *_geoPolygonPoints { + + for _, v := range points { + + s.v.Points = append(s.v.Points, *v.GeoLocationCaster()) + + } + return s +} + +func (s *_geoPolygonPoints) GeoPolygonPointsCaster() *types.GeoPolygonPoints { + return s.v +} diff --git a/typedapi/esdsl/geopolygonquery.go b/typedapi/esdsl/geopolygonquery.go new file mode 100644 index 0000000000..d691a80106 --- /dev/null +++ b/typedapi/esdsl/geopolygonquery.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geovalidationmethod" +) + +type _geoPolygonQuery struct { + v *types.GeoPolygonQuery +} + +func NewGeoPolygonQuery() *_geoPolygonQuery { + + return &_geoPolygonQuery{v: types.NewGeoPolygonQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_geoPolygonQuery) Boost(boost float32) *_geoPolygonQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_geoPolygonQuery) GeoPolygonQuery(geopolygonquery map[string]types.GeoPolygonPoints) *_geoPolygonQuery { + + s.v.GeoPolygonQuery = geopolygonquery + return s +} + +func (s *_geoPolygonQuery) AddGeoPolygonQuery(key string, value types.GeoPolygonPointsVariant) *_geoPolygonQuery { + + var tmp map[string]types.GeoPolygonPoints + if s.v.GeoPolygonQuery == nil { + s.v.GeoPolygonQuery = make(map[string]types.GeoPolygonPoints) + } else { + tmp = s.v.GeoPolygonQuery + } + + tmp[key] = *value.GeoPolygonPointsCaster() + + s.v.GeoPolygonQuery = tmp + return s +} + +func (s *_geoPolygonQuery) IgnoreUnmapped(ignoreunmapped bool) *_geoPolygonQuery { + + s.v.IgnoreUnmapped = &ignoreunmapped + + return s +} + +func (s *_geoPolygonQuery) QueryName_(queryname_ string) *_geoPolygonQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_geoPolygonQuery) ValidationMethod(validationmethod geovalidationmethod.GeoValidationMethod) *_geoPolygonQuery { + + s.v.ValidationMethod = &validationmethod + return s +} + +func (s *_geoPolygonQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.GeoPolygon = s.v + + return container +} + +func (s *_geoPolygonQuery) GeoPolygonQueryCaster() *types.GeoPolygonQuery { + return s.v +} diff --git a/typedapi/esdsl/geoshapefieldquery.go b/typedapi/esdsl/geoshapefieldquery.go new file mode 100644 index 0000000000..1c8039d912 --- /dev/null +++ b/typedapi/esdsl/geoshapefieldquery.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoshaperelation" +) + +type _geoShapeFieldQuery struct { + v *types.GeoShapeFieldQuery +} + +func NewGeoShapeFieldQuery() *_geoShapeFieldQuery { + + return &_geoShapeFieldQuery{v: types.NewGeoShapeFieldQuery()} + +} + +// Query using an indexed shape retrieved from the the specified document and +// path. +func (s *_geoShapeFieldQuery) IndexedShape(indexedshape types.FieldLookupVariant) *_geoShapeFieldQuery { + + s.v.IndexedShape = indexedshape.FieldLookupCaster() + + return s +} + +// Spatial relation operator used to search a geo field. +func (s *_geoShapeFieldQuery) Relation(relation geoshaperelation.GeoShapeRelation) *_geoShapeFieldQuery { + + s.v.Relation = &relation + return s +} + +func (s *_geoShapeFieldQuery) Shape(geoshape json.RawMessage) *_geoShapeFieldQuery { + + s.v.Shape = geoshape + + return s +} + +func (s *_geoShapeFieldQuery) GeoShapeFieldQueryCaster() *types.GeoShapeFieldQuery { + return s.v +} diff --git a/typedapi/esdsl/geoshapeproperty.go b/typedapi/esdsl/geoshapeproperty.go new file mode 100644 index 0000000000..07022047b0 --- /dev/null +++ b/typedapi/esdsl/geoshapeproperty.go @@ -0,0 +1,188 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoorientation" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geostrategy" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _geoShapeProperty struct { + v *types.GeoShapeProperty +} + +func NewGeoShapeProperty() *_geoShapeProperty { + + return &_geoShapeProperty{v: types.NewGeoShapeProperty()} + +} + +func (s *_geoShapeProperty) Coerce(coerce bool) *_geoShapeProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_geoShapeProperty) CopyTo(fields ...string) *_geoShapeProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_geoShapeProperty) DocValues(docvalues bool) *_geoShapeProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_geoShapeProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_geoShapeProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_geoShapeProperty) Fields(fields map[string]types.Property) *_geoShapeProperty { + + s.v.Fields = fields + return s +} + +func (s *_geoShapeProperty) AddField(key string, value types.PropertyVariant) *_geoShapeProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_geoShapeProperty) IgnoreAbove(ignoreabove int) *_geoShapeProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_geoShapeProperty) IgnoreMalformed(ignoremalformed bool) *_geoShapeProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_geoShapeProperty) IgnoreZValue(ignorezvalue bool) *_geoShapeProperty { + + s.v.IgnoreZValue = &ignorezvalue + + return s +} + +// Metadata about the field. +func (s *_geoShapeProperty) Meta(meta map[string]string) *_geoShapeProperty { + + s.v.Meta = meta + return s +} + +func (s *_geoShapeProperty) AddMeta(key string, value string) *_geoShapeProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_geoShapeProperty) Orientation(orientation geoorientation.GeoOrientation) *_geoShapeProperty { + + s.v.Orientation = &orientation + return s +} + +func (s *_geoShapeProperty) Properties(properties map[string]types.Property) *_geoShapeProperty { + + s.v.Properties = properties + return s +} + +func (s *_geoShapeProperty) AddProperty(key string, value types.PropertyVariant) *_geoShapeProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_geoShapeProperty) Store(store bool) *_geoShapeProperty { + + s.v.Store = &store + + return s +} + +func (s *_geoShapeProperty) Strategy(strategy geostrategy.GeoStrategy) *_geoShapeProperty { + + s.v.Strategy = &strategy + return s +} + +func (s *_geoShapeProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_geoShapeProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_geoShapeProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_geoShapeProperty) GeoShapePropertyCaster() *types.GeoShapeProperty { + return s.v +} diff --git a/typedapi/esdsl/geoshapequery.go b/typedapi/esdsl/geoshapequery.go new file mode 100644 index 0000000000..ff9228bc31 --- /dev/null +++ b/typedapi/esdsl/geoshapequery.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _geoShapeQuery struct { + v *types.GeoShapeQuery +} + +// Filter documents indexed using either the `geo_shape` or the `geo_point` +// type. +func NewGeoShapeQuery() *_geoShapeQuery { + + return &_geoShapeQuery{v: types.NewGeoShapeQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_geoShapeQuery) Boost(boost float32) *_geoShapeQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_geoShapeQuery) GeoShapeQuery(geoshapequery map[string]types.GeoShapeFieldQuery) *_geoShapeQuery { + + s.v.GeoShapeQuery = geoshapequery + return s +} + +func (s *_geoShapeQuery) AddGeoShapeQuery(key string, value types.GeoShapeFieldQueryVariant) *_geoShapeQuery { + + var tmp map[string]types.GeoShapeFieldQuery + if s.v.GeoShapeQuery == nil { + s.v.GeoShapeQuery = make(map[string]types.GeoShapeFieldQuery) + } else { + tmp = s.v.GeoShapeQuery + } + + tmp[key] = *value.GeoShapeFieldQueryCaster() + + s.v.GeoShapeQuery = tmp + return s +} + +// Set to `true` to ignore an unmapped field and not match any documents for +// this query. +// Set to `false` to throw an exception if the field is not mapped. +func (s *_geoShapeQuery) IgnoreUnmapped(ignoreunmapped bool) *_geoShapeQuery { + + s.v.IgnoreUnmapped = &ignoreunmapped + + return s +} + +func (s *_geoShapeQuery) QueryName_(queryname_ string) *_geoShapeQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_geoShapeQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.GeoShape = s.v + + return container +} + +func (s *_geoShapeQuery) GeoShapeQueryCaster() *types.GeoShapeQuery { + return s.v +} diff --git a/typedapi/esdsl/geotilegridaggregation.go b/typedapi/esdsl/geotilegridaggregation.go new file mode 100644 index 0000000000..15d2328afb --- /dev/null +++ b/typedapi/esdsl/geotilegridaggregation.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _geoTileGridAggregation struct { + v *types.GeoTileGridAggregation +} + +func NewGeoTileGridAggregation() *_geoTileGridAggregation { + + return &_geoTileGridAggregation{v: types.NewGeoTileGridAggregation()} + +} + +// A bounding box to filter the geo-points or geo-shapes in each bucket. +func (s *_geoTileGridAggregation) Bounds(geobounds types.GeoBoundsVariant) *_geoTileGridAggregation { + + s.v.Bounds = *geobounds.GeoBoundsCaster() + + return s +} + +// Field containing indexed `geo_point` or `geo_shape` values. +// If the field contains an array, `geotile_grid` aggregates all array values. +func (s *_geoTileGridAggregation) Field(field string) *_geoTileGridAggregation { + + s.v.Field = &field + + return s +} + +// Integer zoom of the key used to define cells/buckets in the results. +// Values outside of the range [0,29] will be rejected. +func (s *_geoTileGridAggregation) Precision(geotileprecision int) *_geoTileGridAggregation { + + s.v.Precision = &geotileprecision + + return s +} + +// Allows for more accurate counting of the top cells returned in the final +// result the aggregation. +// Defaults to returning `max(10,(size x number-of-shards))` buckets from each +// shard. +func (s *_geoTileGridAggregation) ShardSize(shardsize int) *_geoTileGridAggregation { + + s.v.ShardSize = &shardsize + + return s +} + +// The maximum number of buckets to return. +func (s *_geoTileGridAggregation) Size(size int) *_geoTileGridAggregation { + + s.v.Size = &size + + return s +} + +func (s *_geoTileGridAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.GeotileGrid = s.v + + return container +} + +func (s *_geoTileGridAggregation) PivotGroupByContainerCaster() *types.PivotGroupByContainer { + container := types.NewPivotGroupByContainer() + + container.GeotileGrid = s.v + + return container +} + +func (s *_geoTileGridAggregation) GeoTileGridAggregationCaster() *types.GeoTileGridAggregation { + return s.v +} diff --git a/typedapi/esdsl/germananalyzer.go b/typedapi/esdsl/germananalyzer.go new file mode 100644 index 0000000000..f1a534df2a --- /dev/null +++ b/typedapi/esdsl/germananalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _germanAnalyzer struct { + v *types.GermanAnalyzer +} + +func NewGermanAnalyzer() *_germanAnalyzer { + + return &_germanAnalyzer{v: types.NewGermanAnalyzer()} + +} + +func (s *_germanAnalyzer) StemExclusion(stemexclusions ...string) *_germanAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_germanAnalyzer) Stopwords(stopwords ...string) *_germanAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_germanAnalyzer) StopwordsPath(stopwordspath string) *_germanAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_germanAnalyzer) GermanAnalyzerCaster() *types.GermanAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/globalaggregation.go b/typedapi/esdsl/globalaggregation.go new file mode 100644 index 0000000000..381876824b --- /dev/null +++ b/typedapi/esdsl/globalaggregation.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _globalAggregation struct { + v *types.GlobalAggregation +} + +// Defines a single bucket of all the documents within the search execution +// context. +// This context is defined by the indices and the document types you’re +// searching on, but is not influenced by the search query itself. +func NewGlobalAggregation() *_globalAggregation { + + return &_globalAggregation{v: types.NewGlobalAggregation()} + +} + +func (s *_globalAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Global = s.v + + return container +} + +func (s *_globalAggregation) GlobalAggregationCaster() *types.GlobalAggregation { + return s.v +} diff --git a/typedapi/esdsl/globalprivilege.go b/typedapi/esdsl/globalprivilege.go new file mode 100644 index 0000000000..249845e533 --- /dev/null +++ b/typedapi/esdsl/globalprivilege.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _globalPrivilege struct { + v *types.GlobalPrivilege +} + +func NewGlobalPrivilege(application types.ApplicationGlobalUserPrivilegesVariant) *_globalPrivilege { + + tmp := &_globalPrivilege{v: types.NewGlobalPrivilege()} + + tmp.Application(application) + + return tmp + +} + +func (s *_globalPrivilege) Application(application types.ApplicationGlobalUserPrivilegesVariant) *_globalPrivilege { + + s.v.Application = *application.ApplicationGlobalUserPrivilegesCaster() + + return s +} + +func (s *_globalPrivilege) GlobalPrivilegeCaster() *types.GlobalPrivilege { + return s.v +} diff --git a/typedapi/esdsl/googlenormalizeddistanceheuristic.go b/typedapi/esdsl/googlenormalizeddistanceheuristic.go new file mode 100644 index 0000000000..92e06789bc --- /dev/null +++ b/typedapi/esdsl/googlenormalizeddistanceheuristic.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _googleNormalizedDistanceHeuristic struct { + v *types.GoogleNormalizedDistanceHeuristic +} + +func NewGoogleNormalizedDistanceHeuristic() *_googleNormalizedDistanceHeuristic { + + return &_googleNormalizedDistanceHeuristic{v: types.NewGoogleNormalizedDistanceHeuristic()} + +} + +// Set to `false` if you defined a custom background filter that represents a +// different set of documents that you want to compare to. +func (s *_googleNormalizedDistanceHeuristic) BackgroundIsSuperset(backgroundissuperset bool) *_googleNormalizedDistanceHeuristic { + + s.v.BackgroundIsSuperset = &backgroundissuperset + + return s +} + +func (s *_googleNormalizedDistanceHeuristic) GoogleNormalizedDistanceHeuristicCaster() *types.GoogleNormalizedDistanceHeuristic { + return s.v +} diff --git a/typedapi/esdsl/grantapikey.go b/typedapi/esdsl/grantapikey.go new file mode 100644 index 0000000000..1452d2728d --- /dev/null +++ b/typedapi/esdsl/grantapikey.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _grantApiKey struct { + v *types.GrantApiKey +} + +func NewGrantApiKey() *_grantApiKey { + + return &_grantApiKey{v: types.NewGrantApiKey()} + +} + +// Expiration time for the API key. By default, API keys never expire. +func (s *_grantApiKey) Expiration(durationlarge string) *_grantApiKey { + + s.v.Expiration = &durationlarge + + return s +} + +// Arbitrary metadata that you want to associate with the API key. +// It supports nested data structure. +// Within the `metadata` object, keys beginning with `_` are reserved for system +// usage. +func (s *_grantApiKey) Metadata(metadata types.MetadataVariant) *_grantApiKey { + + s.v.Metadata = *metadata.MetadataCaster() + + return s +} + +func (s *_grantApiKey) Name(name string) *_grantApiKey { + + s.v.Name = name + + return s +} + +// The role descriptors for this API key. +// When it is not specified or is an empty array, the API key has a point in +// time snapshot of permissions of the specified user or access token. +// If you supply role descriptors, the resultant permissions are an intersection +// of API keys permissions and the permissions of the user or access token. +func (s *_grantApiKey) RoleDescriptors(roledescriptors []map[string]types.RoleDescriptor) *_grantApiKey { + + s.v.RoleDescriptors = roledescriptors + + return s +} + +func (s *_grantApiKey) GrantApiKeyCaster() *types.GrantApiKey { + return s.v +} diff --git a/typedapi/esdsl/greaterthanvalidation.go b/typedapi/esdsl/greaterthanvalidation.go new file mode 100644 index 0000000000..e3d0841a4a --- /dev/null +++ b/typedapi/esdsl/greaterthanvalidation.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _greaterThanValidation struct { + v *types.GreaterThanValidation +} + +func NewGreaterThanValidation(constraint types.Float64) *_greaterThanValidation { + + tmp := &_greaterThanValidation{v: types.NewGreaterThanValidation()} + + tmp.Constraint(constraint) + + return tmp + +} + +func (s *_greaterThanValidation) Constraint(constraint types.Float64) *_greaterThanValidation { + + s.v.Constraint = constraint + + return s +} + +func (s *_greaterThanValidation) GreaterThanValidationCaster() *types.GreaterThanValidation { + return s.v +} diff --git a/typedapi/esdsl/greekanalyzer.go b/typedapi/esdsl/greekanalyzer.go new file mode 100644 index 0000000000..63a198d9a2 --- /dev/null +++ b/typedapi/esdsl/greekanalyzer.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _greekAnalyzer struct { + v *types.GreekAnalyzer +} + +func NewGreekAnalyzer() *_greekAnalyzer { + + return &_greekAnalyzer{v: types.NewGreekAnalyzer()} + +} + +func (s *_greekAnalyzer) Stopwords(stopwords ...string) *_greekAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_greekAnalyzer) StopwordsPath(stopwordspath string) *_greekAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_greekAnalyzer) GreekAnalyzerCaster() *types.GreekAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/grokprocessor.go b/typedapi/esdsl/grokprocessor.go new file mode 100644 index 0000000000..3d2864a660 --- /dev/null +++ b/typedapi/esdsl/grokprocessor.go @@ -0,0 +1,167 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _grokProcessor struct { + v *types.GrokProcessor +} + +// Extracts structured fields out of a single text field within a document. +// You choose which field to extract matched fields from, as well as the grok +// pattern you expect will match. +// A grok pattern is like a regular expression that supports aliased expressions +// that can be reused. +func NewGrokProcessor() *_grokProcessor { + + return &_grokProcessor{v: types.NewGrokProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_grokProcessor) Description(description string) *_grokProcessor { + + s.v.Description = &description + + return s +} + +// Must be disabled or v1. If v1, the processor uses patterns with Elastic +// Common Schema (ECS) field names. +func (s *_grokProcessor) EcsCompatibility(ecscompatibility string) *_grokProcessor { + + s.v.EcsCompatibility = &ecscompatibility + + return s +} + +// The field to use for grok expression parsing. +func (s *_grokProcessor) Field(field string) *_grokProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_grokProcessor) If(if_ types.ScriptVariant) *_grokProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_grokProcessor) IgnoreFailure(ignorefailure bool) *_grokProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist or is `null`, the processor quietly +// exits without modifying the document. +func (s *_grokProcessor) IgnoreMissing(ignoremissing bool) *_grokProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_grokProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_grokProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// A map of pattern-name and pattern tuples defining custom patterns to be used +// by the current processor. +// Patterns matching existing names will override the pre-existing definition. +func (s *_grokProcessor) PatternDefinitions(patterndefinitions map[string]string) *_grokProcessor { + + s.v.PatternDefinitions = patterndefinitions + return s +} + +func (s *_grokProcessor) AddPatternDefinition(key string, value string) *_grokProcessor { + + var tmp map[string]string + if s.v.PatternDefinitions == nil { + s.v.PatternDefinitions = make(map[string]string) + } else { + tmp = s.v.PatternDefinitions + } + + tmp[key] = value + + s.v.PatternDefinitions = tmp + return s +} + +// An ordered list of grok expression to match and extract named captures with. +// Returns on the first expression in the list that matches. +func (s *_grokProcessor) Patterns(patterns ...string) *_grokProcessor { + + for _, v := range patterns { + + s.v.Patterns = append(s.v.Patterns, v) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_grokProcessor) Tag(tag string) *_grokProcessor { + + s.v.Tag = &tag + + return s +} + +// When `true`, `_ingest._grok_match_index` will be inserted into your matched +// document’s metadata with the index into the pattern found in `patterns` that +// matched. +func (s *_grokProcessor) TraceMatch(tracematch bool) *_grokProcessor { + + s.v.TraceMatch = &tracematch + + return s +} + +func (s *_grokProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Grok = s.v + + return container +} + +func (s *_grokProcessor) GrokProcessorCaster() *types.GrokProcessor { + return s.v +} diff --git a/typedapi/esdsl/groupings.go b/typedapi/esdsl/groupings.go new file mode 100644 index 0000000000..f5faee4d77 --- /dev/null +++ b/typedapi/esdsl/groupings.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _groupings struct { + v *types.Groupings +} + +func NewGroupings() *_groupings { + + return &_groupings{v: types.NewGroupings()} + +} + +// A date histogram group aggregates a date field into time-based buckets. +// This group is mandatory; you currently cannot roll up documents without a +// timestamp and a `date_histogram` group. +func (s *_groupings) DateHistogram(datehistogram types.DateHistogramGroupingVariant) *_groupings { + + s.v.DateHistogram = datehistogram.DateHistogramGroupingCaster() + + return s +} + +// The histogram group aggregates one or more numeric fields into numeric +// histogram intervals. +func (s *_groupings) Histogram(histogram types.HistogramGroupingVariant) *_groupings { + + s.v.Histogram = histogram.HistogramGroupingCaster() + + return s +} + +// The terms group can be used on keyword or numeric fields to allow bucketing +// via the terms aggregation at a later point. +// The indexer enumerates and stores all values of a field for each time-period. +// This can be potentially costly for high-cardinality groups such as IP +// addresses, especially if the time-bucket is particularly sparse. +func (s *_groupings) Terms(terms types.TermsGroupingVariant) *_groupings { + + s.v.Terms = terms.TermsGroupingCaster() + + return s +} + +func (s *_groupings) GroupingsCaster() *types.Groupings { + return s.v +} diff --git a/typedapi/esdsl/gsubprocessor.go b/typedapi/esdsl/gsubprocessor.go new file mode 100644 index 0000000000..db5755fa54 --- /dev/null +++ b/typedapi/esdsl/gsubprocessor.go @@ -0,0 +1,143 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _gsubProcessor struct { + v *types.GsubProcessor +} + +// Converts a string field by applying a regular expression and a replacement. +// If the field is an array of string, all members of the array will be +// converted. +// If any non-string values are encountered, the processor will throw an +// exception. +func NewGsubProcessor(pattern string, replacement string) *_gsubProcessor { + + tmp := &_gsubProcessor{v: types.NewGsubProcessor()} + + tmp.Pattern(pattern) + + tmp.Replacement(replacement) + + return tmp + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_gsubProcessor) Description(description string) *_gsubProcessor { + + s.v.Description = &description + + return s +} + +// The field to apply the replacement to. +func (s *_gsubProcessor) Field(field string) *_gsubProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_gsubProcessor) If(if_ types.ScriptVariant) *_gsubProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_gsubProcessor) IgnoreFailure(ignorefailure bool) *_gsubProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist or is `null`, the processor quietly +// exits without modifying the document. +func (s *_gsubProcessor) IgnoreMissing(ignoremissing bool) *_gsubProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_gsubProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_gsubProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// The pattern to be replaced. +func (s *_gsubProcessor) Pattern(pattern string) *_gsubProcessor { + + s.v.Pattern = pattern + + return s +} + +// The string to replace the matching patterns with. +func (s *_gsubProcessor) Replacement(replacement string) *_gsubProcessor { + + s.v.Replacement = replacement + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_gsubProcessor) Tag(tag string) *_gsubProcessor { + + s.v.Tag = &tag + + return s +} + +// The field to assign the converted value to +// By default, the `field` is updated in-place. +func (s *_gsubProcessor) TargetField(field string) *_gsubProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_gsubProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Gsub = s.v + + return container +} + +func (s *_gsubProcessor) GsubProcessorCaster() *types.GsubProcessor { + return s.v +} diff --git a/typedapi/esdsl/halffloatnumberproperty.go b/typedapi/esdsl/halffloatnumberproperty.go new file mode 100644 index 0000000000..eb08199149 --- /dev/null +++ b/typedapi/esdsl/halffloatnumberproperty.go @@ -0,0 +1,220 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" +) + +type _halfFloatNumberProperty struct { + v *types.HalfFloatNumberProperty +} + +func NewHalfFloatNumberProperty() *_halfFloatNumberProperty { + + return &_halfFloatNumberProperty{v: types.NewHalfFloatNumberProperty()} + +} + +func (s *_halfFloatNumberProperty) Boost(boost types.Float64) *_halfFloatNumberProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_halfFloatNumberProperty) Coerce(coerce bool) *_halfFloatNumberProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_halfFloatNumberProperty) CopyTo(fields ...string) *_halfFloatNumberProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_halfFloatNumberProperty) DocValues(docvalues bool) *_halfFloatNumberProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_halfFloatNumberProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_halfFloatNumberProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_halfFloatNumberProperty) Fields(fields map[string]types.Property) *_halfFloatNumberProperty { + + s.v.Fields = fields + return s +} + +func (s *_halfFloatNumberProperty) AddField(key string, value types.PropertyVariant) *_halfFloatNumberProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_halfFloatNumberProperty) IgnoreAbove(ignoreabove int) *_halfFloatNumberProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_halfFloatNumberProperty) IgnoreMalformed(ignoremalformed bool) *_halfFloatNumberProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_halfFloatNumberProperty) Index(index bool) *_halfFloatNumberProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_halfFloatNumberProperty) Meta(meta map[string]string) *_halfFloatNumberProperty { + + s.v.Meta = meta + return s +} + +func (s *_halfFloatNumberProperty) AddMeta(key string, value string) *_halfFloatNumberProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_halfFloatNumberProperty) NullValue(nullvalue float32) *_halfFloatNumberProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_halfFloatNumberProperty) OnScriptError(onscripterror onscripterror.OnScriptError) *_halfFloatNumberProperty { + + s.v.OnScriptError = &onscripterror + return s +} + +func (s *_halfFloatNumberProperty) Properties(properties map[string]types.Property) *_halfFloatNumberProperty { + + s.v.Properties = properties + return s +} + +func (s *_halfFloatNumberProperty) AddProperty(key string, value types.PropertyVariant) *_halfFloatNumberProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_halfFloatNumberProperty) Script(script types.ScriptVariant) *_halfFloatNumberProperty { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_halfFloatNumberProperty) Store(store bool) *_halfFloatNumberProperty { + + s.v.Store = &store + + return s +} + +func (s *_halfFloatNumberProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_halfFloatNumberProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_halfFloatNumberProperty) TimeSeriesDimension(timeseriesdimension bool) *_halfFloatNumberProperty { + + s.v.TimeSeriesDimension = ×eriesdimension + + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_halfFloatNumberProperty) TimeSeriesMetric(timeseriesmetric timeseriesmetrictype.TimeSeriesMetricType) *_halfFloatNumberProperty { + + s.v.TimeSeriesMetric = ×eriesmetric + return s +} + +func (s *_halfFloatNumberProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_halfFloatNumberProperty) HalfFloatNumberPropertyCaster() *types.HalfFloatNumberProperty { + return s.v +} diff --git a/typedapi/esdsl/haschildquery.go b/typedapi/esdsl/haschildquery.go new file mode 100644 index 0000000000..29237249a0 --- /dev/null +++ b/typedapi/esdsl/haschildquery.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/childscoremode" +) + +type _hasChildQuery struct { + v *types.HasChildQuery +} + +// Returns parent documents whose joined child documents match a provided query. +func NewHasChildQuery(query types.QueryVariant) *_hasChildQuery { + + tmp := &_hasChildQuery{v: types.NewHasChildQuery()} + + tmp.Query(query) + + return tmp + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_hasChildQuery) Boost(boost float32) *_hasChildQuery { + + s.v.Boost = &boost + + return s +} + +// Indicates whether to ignore an unmapped `type` and not return any documents +// instead of an error. +func (s *_hasChildQuery) IgnoreUnmapped(ignoreunmapped bool) *_hasChildQuery { + + s.v.IgnoreUnmapped = &ignoreunmapped + + return s +} + +// If defined, each search hit will contain inner hits. +func (s *_hasChildQuery) InnerHits(innerhits types.InnerHitsVariant) *_hasChildQuery { + + s.v.InnerHits = innerhits.InnerHitsCaster() + + return s +} + +// Maximum number of child documents that match the query allowed for a returned +// parent document. +// If the parent document exceeds this limit, it is excluded from the search +// results. +func (s *_hasChildQuery) MaxChildren(maxchildren int) *_hasChildQuery { + + s.v.MaxChildren = &maxchildren + + return s +} + +// Minimum number of child documents that match the query required to match the +// query for a returned parent document. +// If the parent document does not meet this limit, it is excluded from the +// search results. +func (s *_hasChildQuery) MinChildren(minchildren int) *_hasChildQuery { + + s.v.MinChildren = &minchildren + + return s +} + +// Query you wish to run on child documents of the `type` field. +// If a child document matches the search, the query returns the parent +// document. +func (s *_hasChildQuery) Query(query types.QueryVariant) *_hasChildQuery { + + s.v.Query = *query.QueryCaster() + + return s +} + +func (s *_hasChildQuery) QueryName_(queryname_ string) *_hasChildQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Indicates how scores for matching child documents affect the root parent +// document’s relevance score. +func (s *_hasChildQuery) ScoreMode(scoremode childscoremode.ChildScoreMode) *_hasChildQuery { + + s.v.ScoreMode = &scoremode + return s +} + +// Name of the child relationship mapped for the `join` field. +func (s *_hasChildQuery) Type(relationname string) *_hasChildQuery { + + s.v.Type = relationname + + return s +} + +func (s *_hasChildQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.HasChild = s.v + + return container +} + +func (s *_hasChildQuery) HasChildQueryCaster() *types.HasChildQuery { + return s.v +} diff --git a/typedapi/esdsl/hasparentquery.go b/typedapi/esdsl/hasparentquery.go new file mode 100644 index 0000000000..21286a3525 --- /dev/null +++ b/typedapi/esdsl/hasparentquery.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _hasParentQuery struct { + v *types.HasParentQuery +} + +// Returns child documents whose joined parent document matches a provided +// query. +func NewHasParentQuery(query types.QueryVariant) *_hasParentQuery { + + tmp := &_hasParentQuery{v: types.NewHasParentQuery()} + + tmp.Query(query) + + return tmp + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_hasParentQuery) Boost(boost float32) *_hasParentQuery { + + s.v.Boost = &boost + + return s +} + +// Indicates whether to ignore an unmapped `parent_type` and not return any +// documents instead of an error. +// You can use this parameter to query multiple indices that may not contain the +// `parent_type`. +func (s *_hasParentQuery) IgnoreUnmapped(ignoreunmapped bool) *_hasParentQuery { + + s.v.IgnoreUnmapped = &ignoreunmapped + + return s +} + +// If defined, each search hit will contain inner hits. +func (s *_hasParentQuery) InnerHits(innerhits types.InnerHitsVariant) *_hasParentQuery { + + s.v.InnerHits = innerhits.InnerHitsCaster() + + return s +} + +// Name of the parent relationship mapped for the `join` field. +func (s *_hasParentQuery) ParentType(relationname string) *_hasParentQuery { + + s.v.ParentType = relationname + + return s +} + +// Query you wish to run on parent documents of the `parent_type` field. +// If a parent document matches the search, the query returns its child +// documents. +func (s *_hasParentQuery) Query(query types.QueryVariant) *_hasParentQuery { + + s.v.Query = *query.QueryCaster() + + return s +} + +func (s *_hasParentQuery) QueryName_(queryname_ string) *_hasParentQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Indicates whether the relevance score of a matching parent document is +// aggregated into its child documents. +func (s *_hasParentQuery) Score(score bool) *_hasParentQuery { + + s.v.Score = &score + + return s +} + +func (s *_hasParentQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.HasParent = s.v + + return container +} + +func (s *_hasParentQuery) HasParentQueryCaster() *types.HasParentQuery { + return s.v +} diff --git a/typedapi/esdsl/hdrmethod.go b/typedapi/esdsl/hdrmethod.go new file mode 100644 index 0000000000..f4e028627a --- /dev/null +++ b/typedapi/esdsl/hdrmethod.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _hdrMethod struct { + v *types.HdrMethod +} + +func NewHdrMethod() *_hdrMethod { + + return &_hdrMethod{v: types.NewHdrMethod()} + +} + +// Specifies the resolution of values for the histogram in number of significant +// digits. +func (s *_hdrMethod) NumberOfSignificantValueDigits(numberofsignificantvaluedigits int) *_hdrMethod { + + s.v.NumberOfSignificantValueDigits = &numberofsignificantvaluedigits + + return s +} + +func (s *_hdrMethod) HdrMethodCaster() *types.HdrMethod { + return s.v +} diff --git a/typedapi/esdsl/highlight.go b/typedapi/esdsl/highlight.go new file mode 100644 index 0000000000..9d74ce9386 --- /dev/null +++ b/typedapi/esdsl/highlight.go @@ -0,0 +1,287 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/boundaryscanner" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterencoder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterfragmenter" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlightertagsschema" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlightertype" +) + +type _highlight struct { + v *types.Highlight +} + +func NewHighlight() *_highlight { + + return &_highlight{v: types.NewHighlight()} + +} + +// A string that contains each boundary character. +func (s *_highlight) BoundaryChars(boundarychars string) *_highlight { + + s.v.BoundaryChars = &boundarychars + + return s +} + +// How far to scan for boundary characters. +func (s *_highlight) BoundaryMaxScan(boundarymaxscan int) *_highlight { + + s.v.BoundaryMaxScan = &boundarymaxscan + + return s +} + +// Specifies how to break the highlighted fragments: chars, sentence, or word. +// Only valid for the unified and fvh highlighters. +// Defaults to `sentence` for the `unified` highlighter. Defaults to `chars` for +// the `fvh` highlighter. +func (s *_highlight) BoundaryScanner(boundaryscanner boundaryscanner.BoundaryScanner) *_highlight { + + s.v.BoundaryScanner = &boundaryscanner + return s +} + +// Controls which locale is used to search for sentence and word boundaries. +// This parameter takes a form of a language tag, for example: `"en-US"`, +// `"fr-FR"`, `"ja-JP"`. +func (s *_highlight) BoundaryScannerLocale(boundaryscannerlocale string) *_highlight { + + s.v.BoundaryScannerLocale = &boundaryscannerlocale + + return s +} + +func (s *_highlight) Encoder(encoder highlighterencoder.HighlighterEncoder) *_highlight { + + s.v.Encoder = &encoder + return s +} + +func (s *_highlight) Fields(fields map[string]types.HighlightField) *_highlight { + + s.v.Fields = fields + return s +} + +func (s *_highlight) AddField(key string, value types.HighlightFieldVariant) *_highlight { + + var tmp map[string]types.HighlightField + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.HighlightField) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.HighlightFieldCaster() + + s.v.Fields = tmp + return s +} + +func (s *_highlight) ForceSource(forcesource bool) *_highlight { + + s.v.ForceSource = &forcesource + + return s +} + +// The size of the highlighted fragment in characters. +func (s *_highlight) FragmentSize(fragmentsize int) *_highlight { + + s.v.FragmentSize = &fragmentsize + + return s +} + +// Specifies how text should be broken up in highlight snippets: `simple` or +// `span`. +// Only valid for the `plain` highlighter. +func (s *_highlight) Fragmenter(fragmenter highlighterfragmenter.HighlighterFragmenter) *_highlight { + + s.v.Fragmenter = &fragmenter + return s +} + +func (s *_highlight) HighlightFilter(highlightfilter bool) *_highlight { + + s.v.HighlightFilter = &highlightfilter + + return s +} + +// Highlight matches for a query other than the search query. +// This is especially useful if you use a rescore query because those are not +// taken into account by highlighting by default. +func (s *_highlight) HighlightQuery(highlightquery types.QueryVariant) *_highlight { + + s.v.HighlightQuery = highlightquery.QueryCaster() + + return s +} + +// If set to a non-negative value, highlighting stops at this defined maximum +// limit. +// The rest of the text is not processed, thus not highlighted and no error is +// returned +// The `max_analyzed_offset` query setting does not override the +// `index.highlight.max_analyzed_offset` setting, which prevails when it’s set +// to lower value than the query setting. +func (s *_highlight) MaxAnalyzedOffset(maxanalyzedoffset int) *_highlight { + + s.v.MaxAnalyzedOffset = &maxanalyzedoffset + + return s +} + +func (s *_highlight) MaxFragmentLength(maxfragmentlength int) *_highlight { + + s.v.MaxFragmentLength = &maxfragmentlength + + return s +} + +// The amount of text you want to return from the beginning of the field if +// there are no matching fragments to highlight. +func (s *_highlight) NoMatchSize(nomatchsize int) *_highlight { + + s.v.NoMatchSize = &nomatchsize + + return s +} + +// The maximum number of fragments to return. +// If the number of fragments is set to `0`, no fragments are returned. +// Instead, the entire field contents are highlighted and returned. +// This can be handy when you need to highlight short texts such as a title or +// address, but fragmentation is not required. +// If `number_of_fragments` is `0`, `fragment_size` is ignored. +func (s *_highlight) NumberOfFragments(numberoffragments int) *_highlight { + + s.v.NumberOfFragments = &numberoffragments + + return s +} + +func (s *_highlight) Options(options map[string]json.RawMessage) *_highlight { + + s.v.Options = options + return s +} + +func (s *_highlight) AddOption(key string, value json.RawMessage) *_highlight { + + var tmp map[string]json.RawMessage + if s.v.Options == nil { + s.v.Options = make(map[string]json.RawMessage) + } else { + tmp = s.v.Options + } + + tmp[key] = value + + s.v.Options = tmp + return s +} + +// Sorts highlighted fragments by score when set to `score`. +// By default, fragments will be output in the order they appear in the field +// (order: `none`). +// Setting this option to `score` will output the most relevant fragments first. +// Each highlighter applies its own logic to compute relevancy scores. +func (s *_highlight) Order(order highlighterorder.HighlighterOrder) *_highlight { + + s.v.Order = &order + return s +} + +// Controls the number of matching phrases in a document that are considered. +// Prevents the `fvh` highlighter from analyzing too many phrases and consuming +// too much memory. +// When using `matched_fields`, `phrase_limit` phrases per matched field are +// considered. Raising the limit increases query time and consumes more memory. +// Only supported by the `fvh` highlighter. +func (s *_highlight) PhraseLimit(phraselimit int) *_highlight { + + s.v.PhraseLimit = &phraselimit + + return s +} + +// Use in conjunction with `pre_tags` to define the HTML tags to use for the +// highlighted text. +// By default, highlighted text is wrapped in `` and `` tags. +func (s *_highlight) PostTags(posttags ...string) *_highlight { + + for _, v := range posttags { + + s.v.PostTags = append(s.v.PostTags, v) + + } + return s +} + +// Use in conjunction with `post_tags` to define the HTML tags to use for the +// highlighted text. +// By default, highlighted text is wrapped in `` and `` tags. +func (s *_highlight) PreTags(pretags ...string) *_highlight { + + for _, v := range pretags { + + s.v.PreTags = append(s.v.PreTags, v) + + } + return s +} + +// By default, only fields that contains a query match are highlighted. +// Set to `false` to highlight all fields. +func (s *_highlight) RequireFieldMatch(requirefieldmatch bool) *_highlight { + + s.v.RequireFieldMatch = &requirefieldmatch + + return s +} + +// Set to `styled` to use the built-in tag schema. +func (s *_highlight) TagsSchema(tagsschema highlightertagsschema.HighlighterTagsSchema) *_highlight { + + s.v.TagsSchema = &tagsschema + return s +} + +func (s *_highlight) Type(type_ highlightertype.HighlighterType) *_highlight { + + s.v.Type = &type_ + return s +} + +func (s *_highlight) HighlightCaster() *types.Highlight { + return s.v +} diff --git a/typedapi/esdsl/highlightfield.go b/typedapi/esdsl/highlightfield.go new file mode 100644 index 0000000000..6e8357b467 --- /dev/null +++ b/typedapi/esdsl/highlightfield.go @@ -0,0 +1,273 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/boundaryscanner" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterfragmenter" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlightertagsschema" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlightertype" +) + +type _highlightField struct { + v *types.HighlightField +} + +func NewHighlightField() *_highlightField { + + return &_highlightField{v: types.NewHighlightField()} + +} + +// A string that contains each boundary character. +func (s *_highlightField) BoundaryChars(boundarychars string) *_highlightField { + + s.v.BoundaryChars = &boundarychars + + return s +} + +// How far to scan for boundary characters. +func (s *_highlightField) BoundaryMaxScan(boundarymaxscan int) *_highlightField { + + s.v.BoundaryMaxScan = &boundarymaxscan + + return s +} + +// Specifies how to break the highlighted fragments: chars, sentence, or word. +// Only valid for the unified and fvh highlighters. +// Defaults to `sentence` for the `unified` highlighter. Defaults to `chars` for +// the `fvh` highlighter. +func (s *_highlightField) BoundaryScanner(boundaryscanner boundaryscanner.BoundaryScanner) *_highlightField { + + s.v.BoundaryScanner = &boundaryscanner + return s +} + +// Controls which locale is used to search for sentence and word boundaries. +// This parameter takes a form of a language tag, for example: `"en-US"`, +// `"fr-FR"`, `"ja-JP"`. +func (s *_highlightField) BoundaryScannerLocale(boundaryscannerlocale string) *_highlightField { + + s.v.BoundaryScannerLocale = &boundaryscannerlocale + + return s +} + +func (s *_highlightField) ForceSource(forcesource bool) *_highlightField { + + s.v.ForceSource = &forcesource + + return s +} + +func (s *_highlightField) FragmentOffset(fragmentoffset int) *_highlightField { + + s.v.FragmentOffset = &fragmentoffset + + return s +} + +// The size of the highlighted fragment in characters. +func (s *_highlightField) FragmentSize(fragmentsize int) *_highlightField { + + s.v.FragmentSize = &fragmentsize + + return s +} + +// Specifies how text should be broken up in highlight snippets: `simple` or +// `span`. +// Only valid for the `plain` highlighter. +func (s *_highlightField) Fragmenter(fragmenter highlighterfragmenter.HighlighterFragmenter) *_highlightField { + + s.v.Fragmenter = &fragmenter + return s +} + +func (s *_highlightField) HighlightFilter(highlightfilter bool) *_highlightField { + + s.v.HighlightFilter = &highlightfilter + + return s +} + +// Highlight matches for a query other than the search query. +// This is especially useful if you use a rescore query because those are not +// taken into account by highlighting by default. +func (s *_highlightField) HighlightQuery(highlightquery types.QueryVariant) *_highlightField { + + s.v.HighlightQuery = highlightquery.QueryCaster() + + return s +} + +func (s *_highlightField) MatchedFields(fields ...string) *_highlightField { + + s.v.MatchedFields = fields + + return s +} + +// If set to a non-negative value, highlighting stops at this defined maximum +// limit. +// The rest of the text is not processed, thus not highlighted and no error is +// returned +// The `max_analyzed_offset` query setting does not override the +// `index.highlight.max_analyzed_offset` setting, which prevails when it’s set +// to lower value than the query setting. +func (s *_highlightField) MaxAnalyzedOffset(maxanalyzedoffset int) *_highlightField { + + s.v.MaxAnalyzedOffset = &maxanalyzedoffset + + return s +} + +func (s *_highlightField) MaxFragmentLength(maxfragmentlength int) *_highlightField { + + s.v.MaxFragmentLength = &maxfragmentlength + + return s +} + +// The amount of text you want to return from the beginning of the field if +// there are no matching fragments to highlight. +func (s *_highlightField) NoMatchSize(nomatchsize int) *_highlightField { + + s.v.NoMatchSize = &nomatchsize + + return s +} + +// The maximum number of fragments to return. +// If the number of fragments is set to `0`, no fragments are returned. +// Instead, the entire field contents are highlighted and returned. +// This can be handy when you need to highlight short texts such as a title or +// address, but fragmentation is not required. +// If `number_of_fragments` is `0`, `fragment_size` is ignored. +func (s *_highlightField) NumberOfFragments(numberoffragments int) *_highlightField { + + s.v.NumberOfFragments = &numberoffragments + + return s +} + +func (s *_highlightField) Options(options map[string]json.RawMessage) *_highlightField { + + s.v.Options = options + return s +} + +func (s *_highlightField) AddOption(key string, value json.RawMessage) *_highlightField { + + var tmp map[string]json.RawMessage + if s.v.Options == nil { + s.v.Options = make(map[string]json.RawMessage) + } else { + tmp = s.v.Options + } + + tmp[key] = value + + s.v.Options = tmp + return s +} + +// Sorts highlighted fragments by score when set to `score`. +// By default, fragments will be output in the order they appear in the field +// (order: `none`). +// Setting this option to `score` will output the most relevant fragments first. +// Each highlighter applies its own logic to compute relevancy scores. +func (s *_highlightField) Order(order highlighterorder.HighlighterOrder) *_highlightField { + + s.v.Order = &order + return s +} + +// Controls the number of matching phrases in a document that are considered. +// Prevents the `fvh` highlighter from analyzing too many phrases and consuming +// too much memory. +// When using `matched_fields`, `phrase_limit` phrases per matched field are +// considered. Raising the limit increases query time and consumes more memory. +// Only supported by the `fvh` highlighter. +func (s *_highlightField) PhraseLimit(phraselimit int) *_highlightField { + + s.v.PhraseLimit = &phraselimit + + return s +} + +// Use in conjunction with `pre_tags` to define the HTML tags to use for the +// highlighted text. +// By default, highlighted text is wrapped in `` and `` tags. +func (s *_highlightField) PostTags(posttags ...string) *_highlightField { + + for _, v := range posttags { + + s.v.PostTags = append(s.v.PostTags, v) + + } + return s +} + +// Use in conjunction with `post_tags` to define the HTML tags to use for the +// highlighted text. +// By default, highlighted text is wrapped in `` and `` tags. +func (s *_highlightField) PreTags(pretags ...string) *_highlightField { + + for _, v := range pretags { + + s.v.PreTags = append(s.v.PreTags, v) + + } + return s +} + +// By default, only fields that contains a query match are highlighted. +// Set to `false` to highlight all fields. +func (s *_highlightField) RequireFieldMatch(requirefieldmatch bool) *_highlightField { + + s.v.RequireFieldMatch = &requirefieldmatch + + return s +} + +// Set to `styled` to use the built-in tag schema. +func (s *_highlightField) TagsSchema(tagsschema highlightertagsschema.HighlighterTagsSchema) *_highlightField { + + s.v.TagsSchema = &tagsschema + return s +} + +func (s *_highlightField) Type(type_ highlightertype.HighlighterType) *_highlightField { + + s.v.Type = &type_ + return s +} + +func (s *_highlightField) HighlightFieldCaster() *types.HighlightField { + return s.v +} diff --git a/typedapi/esdsl/hindianalyzer.go b/typedapi/esdsl/hindianalyzer.go new file mode 100644 index 0000000000..e7be15f7a8 --- /dev/null +++ b/typedapi/esdsl/hindianalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _hindiAnalyzer struct { + v *types.HindiAnalyzer +} + +func NewHindiAnalyzer() *_hindiAnalyzer { + + return &_hindiAnalyzer{v: types.NewHindiAnalyzer()} + +} + +func (s *_hindiAnalyzer) StemExclusion(stemexclusions ...string) *_hindiAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_hindiAnalyzer) Stopwords(stopwords ...string) *_hindiAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_hindiAnalyzer) StopwordsPath(stopwordspath string) *_hindiAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_hindiAnalyzer) HindiAnalyzerCaster() *types.HindiAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/hint.go b/typedapi/esdsl/hint.go new file mode 100644 index 0000000000..5659c7a4f3 --- /dev/null +++ b/typedapi/esdsl/hint.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _hint struct { + v *types.Hint +} + +func NewHint() *_hint { + + return &_hint{v: types.NewHint()} + +} + +// A single key-value pair to match against the labels section +// of a profile. A profile is considered matching if it matches +// at least one of the strings. +func (s *_hint) Labels(labels map[string][]string) *_hint { + + s.v.Labels = labels + return s +} + +// A list of profile UIDs to match against. +func (s *_hint) Uids(uids ...string) *_hint { + + for _, v := range uids { + + s.v.Uids = append(s.v.Uids, v) + + } + return s +} + +func (s *_hint) HintCaster() *types.Hint { + return s.v +} diff --git a/typedapi/esdsl/histogramaggregation.go b/typedapi/esdsl/histogramaggregation.go new file mode 100644 index 0000000000..152283acfd --- /dev/null +++ b/typedapi/esdsl/histogramaggregation.go @@ -0,0 +1,151 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _histogramAggregation struct { + v *types.HistogramAggregation +} + +// A multi-bucket values source based aggregation that can be applied on numeric +// values or numeric range values extracted from the documents. +// It dynamically builds fixed size (interval) buckets over the values. +func NewHistogramAggregation() *_histogramAggregation { + + return &_histogramAggregation{v: types.NewHistogramAggregation()} + +} + +// Enables extending the bounds of the histogram beyond the data itself. +func (s *_histogramAggregation) ExtendedBounds(extendedbounds types.ExtendedBoundsdoubleVariant) *_histogramAggregation { + + s.v.ExtendedBounds = extendedbounds.ExtendedBoundsdoubleCaster() + + return s +} + +// The name of the field to aggregate on. +func (s *_histogramAggregation) Field(field string) *_histogramAggregation { + + s.v.Field = &field + + return s +} + +func (s *_histogramAggregation) Format(format string) *_histogramAggregation { + + s.v.Format = &format + + return s +} + +// Limits the range of buckets in the histogram. +// It is particularly useful in the case of open data ranges that can result in +// a very large number of buckets. +func (s *_histogramAggregation) HardBounds(hardbounds types.ExtendedBoundsdoubleVariant) *_histogramAggregation { + + s.v.HardBounds = hardbounds.ExtendedBoundsdoubleCaster() + + return s +} + +// The interval for the buckets. +// Must be a positive decimal. +func (s *_histogramAggregation) Interval(interval types.Float64) *_histogramAggregation { + + s.v.Interval = &interval + + return s +} + +// If `true`, returns buckets as a hash instead of an array, keyed by the bucket +// keys. +func (s *_histogramAggregation) Keyed(keyed bool) *_histogramAggregation { + + s.v.Keyed = &keyed + + return s +} + +// Only returns buckets that have `min_doc_count` number of documents. +// By default, the response will fill gaps in the histogram with empty buckets. +func (s *_histogramAggregation) MinDocCount(mindoccount int) *_histogramAggregation { + + s.v.MinDocCount = &mindoccount + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_histogramAggregation) Missing(missing types.Float64) *_histogramAggregation { + + s.v.Missing = &missing + + return s +} + +// By default, the bucket keys start with 0 and then continue in even spaced +// steps of `interval`. +// The bucket boundaries can be shifted by using the `offset` option. +func (s *_histogramAggregation) Offset(offset types.Float64) *_histogramAggregation { + + s.v.Offset = &offset + + return s +} + +// The sort order of the returned buckets. +// By default, the returned buckets are sorted by their key ascending. +func (s *_histogramAggregation) Order(aggregateorder types.AggregateOrderVariant) *_histogramAggregation { + + s.v.Order = *aggregateorder.AggregateOrderCaster() + + return s +} + +func (s *_histogramAggregation) Script(script types.ScriptVariant) *_histogramAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_histogramAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Histogram = s.v + + return container +} + +func (s *_histogramAggregation) PivotGroupByContainerCaster() *types.PivotGroupByContainer { + container := types.NewPivotGroupByContainer() + + container.Histogram = s.v + + return container +} + +func (s *_histogramAggregation) HistogramAggregationCaster() *types.HistogramAggregation { + return s.v +} diff --git a/typedapi/esdsl/histogramgrouping.go b/typedapi/esdsl/histogramgrouping.go new file mode 100644 index 0000000000..50b3665e2d --- /dev/null +++ b/typedapi/esdsl/histogramgrouping.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _histogramGrouping struct { + v *types.HistogramGrouping +} + +func NewHistogramGrouping(interval int64) *_histogramGrouping { + + tmp := &_histogramGrouping{v: types.NewHistogramGrouping()} + + tmp.Interval(interval) + + return tmp + +} + +// The set of fields that you wish to build histograms for. +// All fields specified must be some kind of numeric. +// Order does not matter. +func (s *_histogramGrouping) Fields(fields ...string) *_histogramGrouping { + + s.v.Fields = fields + + return s +} + +// The interval of histogram buckets to be generated when rolling up. +// For example, a value of `5` creates buckets that are five units wide (`0-5`, +// `5-10`, etc). +// Note that only one interval can be specified in the histogram group, meaning +// that all fields being grouped via the histogram must share the same interval. +func (s *_histogramGrouping) Interval(interval int64) *_histogramGrouping { + + s.v.Interval = interval + + return s +} + +func (s *_histogramGrouping) HistogramGroupingCaster() *types.HistogramGrouping { + return s.v +} diff --git a/typedapi/esdsl/histogramproperty.go b/typedapi/esdsl/histogramproperty.go new file mode 100644 index 0000000000..06157ac3e2 --- /dev/null +++ b/typedapi/esdsl/histogramproperty.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _histogramProperty struct { + v *types.HistogramProperty +} + +func NewHistogramProperty() *_histogramProperty { + + return &_histogramProperty{v: types.NewHistogramProperty()} + +} + +func (s *_histogramProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_histogramProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_histogramProperty) Fields(fields map[string]types.Property) *_histogramProperty { + + s.v.Fields = fields + return s +} + +func (s *_histogramProperty) AddField(key string, value types.PropertyVariant) *_histogramProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_histogramProperty) IgnoreAbove(ignoreabove int) *_histogramProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_histogramProperty) IgnoreMalformed(ignoremalformed bool) *_histogramProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +// Metadata about the field. +func (s *_histogramProperty) Meta(meta map[string]string) *_histogramProperty { + + s.v.Meta = meta + return s +} + +func (s *_histogramProperty) AddMeta(key string, value string) *_histogramProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_histogramProperty) Properties(properties map[string]types.Property) *_histogramProperty { + + s.v.Properties = properties + return s +} + +func (s *_histogramProperty) AddProperty(key string, value types.PropertyVariant) *_histogramProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_histogramProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_histogramProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_histogramProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_histogramProperty) HistogramPropertyCaster() *types.HistogramProperty { + return s.v +} diff --git a/typedapi/esdsl/holtlinearmodelsettings.go b/typedapi/esdsl/holtlinearmodelsettings.go new file mode 100644 index 0000000000..07acf02d7b --- /dev/null +++ b/typedapi/esdsl/holtlinearmodelsettings.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _holtLinearModelSettings struct { + v *types.HoltLinearModelSettings +} + +func NewHoltLinearModelSettings() *_holtLinearModelSettings { + + return &_holtLinearModelSettings{v: types.NewHoltLinearModelSettings()} + +} + +func (s *_holtLinearModelSettings) Alpha(alpha float32) *_holtLinearModelSettings { + + s.v.Alpha = &alpha + + return s +} + +func (s *_holtLinearModelSettings) Beta(beta float32) *_holtLinearModelSettings { + + s.v.Beta = &beta + + return s +} + +func (s *_holtLinearModelSettings) HoltLinearModelSettingsCaster() *types.HoltLinearModelSettings { + return s.v +} diff --git a/typedapi/esdsl/holtmovingaverageaggregation.go b/typedapi/esdsl/holtmovingaverageaggregation.go new file mode 100644 index 0000000000..da93b95960 --- /dev/null +++ b/typedapi/esdsl/holtmovingaverageaggregation.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _holtMovingAverageAggregation struct { + v *types.HoltMovingAverageAggregation +} + +func NewHoltMovingAverageAggregation(settings types.HoltLinearModelSettingsVariant) *_holtMovingAverageAggregation { + + tmp := &_holtMovingAverageAggregation{v: types.NewHoltMovingAverageAggregation()} + + tmp.Settings(settings) + + return tmp + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_holtMovingAverageAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_holtMovingAverageAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_holtMovingAverageAggregation) Format(format string) *_holtMovingAverageAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_holtMovingAverageAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_holtMovingAverageAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +func (s *_holtMovingAverageAggregation) Minimize(minimize bool) *_holtMovingAverageAggregation { + + s.v.Minimize = &minimize + + return s +} + +func (s *_holtMovingAverageAggregation) Predict(predict int) *_holtMovingAverageAggregation { + + s.v.Predict = &predict + + return s +} + +func (s *_holtMovingAverageAggregation) Settings(settings types.HoltLinearModelSettingsVariant) *_holtMovingAverageAggregation { + + s.v.Settings = *settings.HoltLinearModelSettingsCaster() + + return s +} + +func (s *_holtMovingAverageAggregation) Window(window int) *_holtMovingAverageAggregation { + + s.v.Window = &window + + return s +} + +func (s *_holtMovingAverageAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.MovingAvg = s.v + + return container +} + +func (s *_holtMovingAverageAggregation) HoltMovingAverageAggregationCaster() *types.HoltMovingAverageAggregation { + return s.v +} diff --git a/typedapi/esdsl/holtwintersmodelsettings.go b/typedapi/esdsl/holtwintersmodelsettings.go new file mode 100644 index 0000000000..3c80770076 --- /dev/null +++ b/typedapi/esdsl/holtwintersmodelsettings.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/holtwinterstype" +) + +type _holtWintersModelSettings struct { + v *types.HoltWintersModelSettings +} + +func NewHoltWintersModelSettings() *_holtWintersModelSettings { + + return &_holtWintersModelSettings{v: types.NewHoltWintersModelSettings()} + +} + +func (s *_holtWintersModelSettings) Alpha(alpha float32) *_holtWintersModelSettings { + + s.v.Alpha = &alpha + + return s +} + +func (s *_holtWintersModelSettings) Beta(beta float32) *_holtWintersModelSettings { + + s.v.Beta = &beta + + return s +} + +func (s *_holtWintersModelSettings) Gamma(gamma float32) *_holtWintersModelSettings { + + s.v.Gamma = &gamma + + return s +} + +func (s *_holtWintersModelSettings) Pad(pad bool) *_holtWintersModelSettings { + + s.v.Pad = &pad + + return s +} + +func (s *_holtWintersModelSettings) Period(period int) *_holtWintersModelSettings { + + s.v.Period = &period + + return s +} + +func (s *_holtWintersModelSettings) Type(type_ holtwinterstype.HoltWintersType) *_holtWintersModelSettings { + + s.v.Type = &type_ + return s +} + +func (s *_holtWintersModelSettings) HoltWintersModelSettingsCaster() *types.HoltWintersModelSettings { + return s.v +} diff --git a/typedapi/esdsl/holtwintersmovingaverageaggregation.go b/typedapi/esdsl/holtwintersmovingaverageaggregation.go new file mode 100644 index 0000000000..f6cd0e405d --- /dev/null +++ b/typedapi/esdsl/holtwintersmovingaverageaggregation.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _holtWintersMovingAverageAggregation struct { + v *types.HoltWintersMovingAverageAggregation +} + +func NewHoltWintersMovingAverageAggregation(settings types.HoltWintersModelSettingsVariant) *_holtWintersMovingAverageAggregation { + + tmp := &_holtWintersMovingAverageAggregation{v: types.NewHoltWintersMovingAverageAggregation()} + + tmp.Settings(settings) + + return tmp + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_holtWintersMovingAverageAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_holtWintersMovingAverageAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_holtWintersMovingAverageAggregation) Format(format string) *_holtWintersMovingAverageAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_holtWintersMovingAverageAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_holtWintersMovingAverageAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +func (s *_holtWintersMovingAverageAggregation) Minimize(minimize bool) *_holtWintersMovingAverageAggregation { + + s.v.Minimize = &minimize + + return s +} + +func (s *_holtWintersMovingAverageAggregation) Predict(predict int) *_holtWintersMovingAverageAggregation { + + s.v.Predict = &predict + + return s +} + +func (s *_holtWintersMovingAverageAggregation) Settings(settings types.HoltWintersModelSettingsVariant) *_holtWintersMovingAverageAggregation { + + s.v.Settings = *settings.HoltWintersModelSettingsCaster() + + return s +} + +func (s *_holtWintersMovingAverageAggregation) Window(window int) *_holtWintersMovingAverageAggregation { + + s.v.Window = &window + + return s +} + +func (s *_holtWintersMovingAverageAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.MovingAvg = s.v + + return container +} + +func (s *_holtWintersMovingAverageAggregation) HoltWintersMovingAverageAggregationCaster() *types.HoltWintersMovingAverageAggregation { + return s.v +} diff --git a/typedapi/esdsl/hop.go b/typedapi/esdsl/hop.go new file mode 100644 index 0000000000..60fbc93ec9 --- /dev/null +++ b/typedapi/esdsl/hop.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _hop struct { + v *types.Hop +} + +func NewHop() *_hop { + + return &_hop{v: types.NewHop()} + +} + +// Specifies one or more fields from which you want to extract terms that are +// associated with the specified vertices. +func (s *_hop) Connections(connections types.HopVariant) *_hop { + + s.v.Connections = connections.HopCaster() + + return s +} + +// An optional guiding query that constrains the Graph API as it explores +// connected terms. +func (s *_hop) Query(query types.QueryVariant) *_hop { + + s.v.Query = query.QueryCaster() + + return s +} + +// Contains the fields you are interested in. +func (s *_hop) Vertices(vertices ...types.VertexDefinitionVariant) *_hop { + + for _, v := range vertices { + + s.v.Vertices = append(s.v.Vertices, *v.VertexDefinitionCaster()) + + } + return s +} + +func (s *_hop) HopCaster() *types.Hop { + return s.v +} diff --git a/typedapi/esdsl/hourandminute.go b/typedapi/esdsl/hourandminute.go new file mode 100644 index 0000000000..97535b56a1 --- /dev/null +++ b/typedapi/esdsl/hourandminute.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _hourAndMinute struct { + v *types.HourAndMinute +} + +func NewHourAndMinute() *_hourAndMinute { + + return &_hourAndMinute{v: types.NewHourAndMinute()} + +} + +func (s *_hourAndMinute) Hour(hours ...int) *_hourAndMinute { + + for _, v := range hours { + + s.v.Hour = append(s.v.Hour, v) + + } + return s +} + +func (s *_hourAndMinute) Minute(minutes ...int) *_hourAndMinute { + + for _, v := range minutes { + + s.v.Minute = append(s.v.Minute, v) + + } + return s +} + +func (s *_hourAndMinute) HourAndMinuteCaster() *types.HourAndMinute { + return s.v +} diff --git a/typedapi/esdsl/hourlyschedule.go b/typedapi/esdsl/hourlyschedule.go new file mode 100644 index 0000000000..09e6046931 --- /dev/null +++ b/typedapi/esdsl/hourlyschedule.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _hourlySchedule struct { + v *types.HourlySchedule +} + +func NewHourlySchedule() *_hourlySchedule { + + return &_hourlySchedule{v: types.NewHourlySchedule()} + +} + +func (s *_hourlySchedule) Minute(minutes ...int) *_hourlySchedule { + + for _, v := range minutes { + + s.v.Minute = append(s.v.Minute, v) + + } + return s +} + +func (s *_hourlySchedule) ScheduleContainerCaster() *types.ScheduleContainer { + container := types.NewScheduleContainer() + + container.Hourly = s.v + + return container +} + +func (s *_hourlySchedule) HourlyScheduleCaster() *types.HourlySchedule { + return s.v +} diff --git a/typedapi/esdsl/htmlstripcharfilter.go b/typedapi/esdsl/htmlstripcharfilter.go new file mode 100644 index 0000000000..5b12609ca7 --- /dev/null +++ b/typedapi/esdsl/htmlstripcharfilter.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _htmlStripCharFilter struct { + v *types.HtmlStripCharFilter +} + +func NewHtmlStripCharFilter() *_htmlStripCharFilter { + + return &_htmlStripCharFilter{v: types.NewHtmlStripCharFilter()} + +} + +func (s *_htmlStripCharFilter) EscapedTags(escapedtags ...string) *_htmlStripCharFilter { + + for _, v := range escapedtags { + + s.v.EscapedTags = append(s.v.EscapedTags, v) + + } + return s +} + +func (s *_htmlStripCharFilter) Version(versionstring string) *_htmlStripCharFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_htmlStripCharFilter) HtmlStripCharFilterCaster() *types.HtmlStripCharFilter { + return s.v +} diff --git a/typedapi/esdsl/htmlstripprocessor.go b/typedapi/esdsl/htmlstripprocessor.go new file mode 100644 index 0000000000..39c747ef81 --- /dev/null +++ b/typedapi/esdsl/htmlstripprocessor.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _htmlStripProcessor struct { + v *types.HtmlStripProcessor +} + +// Removes HTML tags from the field. +// If the field is an array of strings, HTML tags will be removed from all +// members of the array. +func NewHtmlStripProcessor() *_htmlStripProcessor { + + return &_htmlStripProcessor{v: types.NewHtmlStripProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_htmlStripProcessor) Description(description string) *_htmlStripProcessor { + + s.v.Description = &description + + return s +} + +// The string-valued field to remove HTML tags from. +func (s *_htmlStripProcessor) Field(field string) *_htmlStripProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_htmlStripProcessor) If(if_ types.ScriptVariant) *_htmlStripProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_htmlStripProcessor) IgnoreFailure(ignorefailure bool) *_htmlStripProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist or is `null`, the processor quietly +// exits without modifying the document, +func (s *_htmlStripProcessor) IgnoreMissing(ignoremissing bool) *_htmlStripProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_htmlStripProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_htmlStripProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_htmlStripProcessor) Tag(tag string) *_htmlStripProcessor { + + s.v.Tag = &tag + + return s +} + +// The field to assign the converted value to +// By default, the `field` is updated in-place. +func (s *_htmlStripProcessor) TargetField(field string) *_htmlStripProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_htmlStripProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.HtmlStrip = s.v + + return container +} + +func (s *_htmlStripProcessor) HtmlStripProcessorCaster() *types.HtmlStripProcessor { + return s.v +} diff --git a/typedapi/esdsl/httpemailattachment.go b/typedapi/esdsl/httpemailattachment.go new file mode 100644 index 0000000000..4eb59ec6d2 --- /dev/null +++ b/typedapi/esdsl/httpemailattachment.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _httpEmailAttachment struct { + v *types.HttpEmailAttachment +} + +func NewHttpEmailAttachment() *_httpEmailAttachment { + + return &_httpEmailAttachment{v: types.NewHttpEmailAttachment()} + +} + +func (s *_httpEmailAttachment) ContentType(contenttype string) *_httpEmailAttachment { + + s.v.ContentType = &contenttype + + return s +} + +func (s *_httpEmailAttachment) Inline(inline bool) *_httpEmailAttachment { + + s.v.Inline = &inline + + return s +} + +func (s *_httpEmailAttachment) Request(request types.HttpInputRequestDefinitionVariant) *_httpEmailAttachment { + + s.v.Request = request.HttpInputRequestDefinitionCaster() + + return s +} + +func (s *_httpEmailAttachment) EmailAttachmentContainerCaster() *types.EmailAttachmentContainer { + container := types.NewEmailAttachmentContainer() + + container.Http = s.v + + return container +} + +func (s *_httpEmailAttachment) HttpEmailAttachmentCaster() *types.HttpEmailAttachment { + return s.v +} diff --git a/typedapi/esdsl/httpheaders.go b/typedapi/esdsl/httpheaders.go new file mode 100644 index 0000000000..30a8b260e6 --- /dev/null +++ b/typedapi/esdsl/httpheaders.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _httpHeaders struct { + v types.HttpHeaders +} + +func NewHttpHeaders(httpheaders map[string][]string) *_httpHeaders { + return &_httpHeaders{v: make(map[string][]string, 0)} +} + +func (u *_httpHeaders) HttpHeadersCaster() *types.HttpHeaders { + return &u.v +} diff --git a/typedapi/esdsl/httpinput.go b/typedapi/esdsl/httpinput.go new file mode 100644 index 0000000000..faa0ab7a9e --- /dev/null +++ b/typedapi/esdsl/httpinput.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/responsecontenttype" +) + +type _httpInput struct { + v *types.HttpInput +} + +func NewHttpInput() *_httpInput { + + return &_httpInput{v: types.NewHttpInput()} + +} + +func (s *_httpInput) Extract(extracts ...string) *_httpInput { + + for _, v := range extracts { + + s.v.Extract = append(s.v.Extract, v) + + } + return s +} + +func (s *_httpInput) Request(request types.HttpInputRequestDefinitionVariant) *_httpInput { + + s.v.Request = request.HttpInputRequestDefinitionCaster() + + return s +} + +func (s *_httpInput) ResponseContentType(responsecontenttype responsecontenttype.ResponseContentType) *_httpInput { + + s.v.ResponseContentType = &responsecontenttype + return s +} + +func (s *_httpInput) WatcherInputCaster() *types.WatcherInput { + container := types.NewWatcherInput() + + container.Http = s.v + + return container +} + +func (s *_httpInput) HttpInputCaster() *types.HttpInput { + return s.v +} diff --git a/typedapi/esdsl/httpinputauthentication.go b/typedapi/esdsl/httpinputauthentication.go new file mode 100644 index 0000000000..d814b4fa6f --- /dev/null +++ b/typedapi/esdsl/httpinputauthentication.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _httpInputAuthentication struct { + v *types.HttpInputAuthentication +} + +func NewHttpInputAuthentication(basic types.HttpInputBasicAuthenticationVariant) *_httpInputAuthentication { + + tmp := &_httpInputAuthentication{v: types.NewHttpInputAuthentication()} + + tmp.Basic(basic) + + return tmp + +} + +func (s *_httpInputAuthentication) Basic(basic types.HttpInputBasicAuthenticationVariant) *_httpInputAuthentication { + + s.v.Basic = *basic.HttpInputBasicAuthenticationCaster() + + return s +} + +func (s *_httpInputAuthentication) HttpInputAuthenticationCaster() *types.HttpInputAuthentication { + return s.v +} diff --git a/typedapi/esdsl/httpinputbasicauthentication.go b/typedapi/esdsl/httpinputbasicauthentication.go new file mode 100644 index 0000000000..2290336193 --- /dev/null +++ b/typedapi/esdsl/httpinputbasicauthentication.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _httpInputBasicAuthentication struct { + v *types.HttpInputBasicAuthentication +} + +func NewHttpInputBasicAuthentication() *_httpInputBasicAuthentication { + + return &_httpInputBasicAuthentication{v: types.NewHttpInputBasicAuthentication()} + +} + +func (s *_httpInputBasicAuthentication) Password(password string) *_httpInputBasicAuthentication { + + s.v.Password = password + + return s +} + +func (s *_httpInputBasicAuthentication) Username(username string) *_httpInputBasicAuthentication { + + s.v.Username = username + + return s +} + +func (s *_httpInputBasicAuthentication) HttpInputBasicAuthenticationCaster() *types.HttpInputBasicAuthentication { + return s.v +} diff --git a/typedapi/esdsl/httpinputproxy.go b/typedapi/esdsl/httpinputproxy.go new file mode 100644 index 0000000000..4c0246a7c6 --- /dev/null +++ b/typedapi/esdsl/httpinputproxy.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _httpInputProxy struct { + v *types.HttpInputProxy +} + +func NewHttpInputProxy(port uint) *_httpInputProxy { + + tmp := &_httpInputProxy{v: types.NewHttpInputProxy()} + + tmp.Port(port) + + return tmp + +} + +func (s *_httpInputProxy) Host(host string) *_httpInputProxy { + + s.v.Host = host + + return s +} + +func (s *_httpInputProxy) Port(port uint) *_httpInputProxy { + + s.v.Port = port + + return s +} + +func (s *_httpInputProxy) HttpInputProxyCaster() *types.HttpInputProxy { + return s.v +} diff --git a/typedapi/esdsl/httpinputrequestdefinition.go b/typedapi/esdsl/httpinputrequestdefinition.go new file mode 100644 index 0000000000..aeaf9f2be6 --- /dev/null +++ b/typedapi/esdsl/httpinputrequestdefinition.go @@ -0,0 +1,158 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/connectionscheme" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/httpinputmethod" +) + +type _httpInputRequestDefinition struct { + v *types.HttpInputRequestDefinition +} + +func NewHttpInputRequestDefinition() *_httpInputRequestDefinition { + + return &_httpInputRequestDefinition{v: types.NewHttpInputRequestDefinition()} + +} + +func (s *_httpInputRequestDefinition) Auth(auth types.HttpInputAuthenticationVariant) *_httpInputRequestDefinition { + + s.v.Auth = auth.HttpInputAuthenticationCaster() + + return s +} + +func (s *_httpInputRequestDefinition) Body(body string) *_httpInputRequestDefinition { + + s.v.Body = &body + + return s +} + +func (s *_httpInputRequestDefinition) ConnectionTimeout(duration types.DurationVariant) *_httpInputRequestDefinition { + + s.v.ConnectionTimeout = *duration.DurationCaster() + + return s +} + +func (s *_httpInputRequestDefinition) Headers(headers map[string]string) *_httpInputRequestDefinition { + + s.v.Headers = headers + return s +} + +func (s *_httpInputRequestDefinition) AddHeader(key string, value string) *_httpInputRequestDefinition { + + var tmp map[string]string + if s.v.Headers == nil { + s.v.Headers = make(map[string]string) + } else { + tmp = s.v.Headers + } + + tmp[key] = value + + s.v.Headers = tmp + return s +} + +func (s *_httpInputRequestDefinition) Host(host string) *_httpInputRequestDefinition { + + s.v.Host = &host + + return s +} + +func (s *_httpInputRequestDefinition) Method(method httpinputmethod.HttpInputMethod) *_httpInputRequestDefinition { + + s.v.Method = &method + return s +} + +func (s *_httpInputRequestDefinition) Params(params map[string]string) *_httpInputRequestDefinition { + + s.v.Params = params + return s +} + +func (s *_httpInputRequestDefinition) AddParam(key string, value string) *_httpInputRequestDefinition { + + var tmp map[string]string + if s.v.Params == nil { + s.v.Params = make(map[string]string) + } else { + tmp = s.v.Params + } + + tmp[key] = value + + s.v.Params = tmp + return s +} + +func (s *_httpInputRequestDefinition) Path(path string) *_httpInputRequestDefinition { + + s.v.Path = &path + + return s +} + +func (s *_httpInputRequestDefinition) Port(port uint) *_httpInputRequestDefinition { + + s.v.Port = &port + + return s +} + +func (s *_httpInputRequestDefinition) Proxy(proxy types.HttpInputProxyVariant) *_httpInputRequestDefinition { + + s.v.Proxy = proxy.HttpInputProxyCaster() + + return s +} + +func (s *_httpInputRequestDefinition) ReadTimeout(duration types.DurationVariant) *_httpInputRequestDefinition { + + s.v.ReadTimeout = *duration.DurationCaster() + + return s +} + +func (s *_httpInputRequestDefinition) Scheme(scheme connectionscheme.ConnectionScheme) *_httpInputRequestDefinition { + + s.v.Scheme = &scheme + return s +} + +func (s *_httpInputRequestDefinition) Url(url string) *_httpInputRequestDefinition { + + s.v.Url = &url + + return s +} + +func (s *_httpInputRequestDefinition) HttpInputRequestDefinitionCaster() *types.HttpInputRequestDefinition { + return s.v +} diff --git a/typedapi/esdsl/hungariananalyzer.go b/typedapi/esdsl/hungariananalyzer.go new file mode 100644 index 0000000000..ab51fde119 --- /dev/null +++ b/typedapi/esdsl/hungariananalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _hungarianAnalyzer struct { + v *types.HungarianAnalyzer +} + +func NewHungarianAnalyzer() *_hungarianAnalyzer { + + return &_hungarianAnalyzer{v: types.NewHungarianAnalyzer()} + +} + +func (s *_hungarianAnalyzer) StemExclusion(stemexclusions ...string) *_hungarianAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_hungarianAnalyzer) Stopwords(stopwords ...string) *_hungarianAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_hungarianAnalyzer) StopwordsPath(stopwordspath string) *_hungarianAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_hungarianAnalyzer) HungarianAnalyzerCaster() *types.HungarianAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/hunspelltokenfilter.go b/typedapi/esdsl/hunspelltokenfilter.go new file mode 100644 index 0000000000..8463e56aac --- /dev/null +++ b/typedapi/esdsl/hunspelltokenfilter.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _hunspellTokenFilter struct { + v *types.HunspellTokenFilter +} + +func NewHunspellTokenFilter(locale string) *_hunspellTokenFilter { + + tmp := &_hunspellTokenFilter{v: types.NewHunspellTokenFilter()} + + tmp.Locale(locale) + + return tmp + +} + +func (s *_hunspellTokenFilter) Dedup(dedup bool) *_hunspellTokenFilter { + + s.v.Dedup = &dedup + + return s +} + +func (s *_hunspellTokenFilter) Dictionary(dictionary string) *_hunspellTokenFilter { + + s.v.Dictionary = &dictionary + + return s +} + +func (s *_hunspellTokenFilter) Locale(locale string) *_hunspellTokenFilter { + + s.v.Locale = locale + + return s +} + +func (s *_hunspellTokenFilter) LongestOnly(longestonly bool) *_hunspellTokenFilter { + + s.v.LongestOnly = &longestonly + + return s +} + +func (s *_hunspellTokenFilter) Version(versionstring string) *_hunspellTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_hunspellTokenFilter) HunspellTokenFilterCaster() *types.HunspellTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/hyphenationdecompoundertokenfilter.go b/typedapi/esdsl/hyphenationdecompoundertokenfilter.go new file mode 100644 index 0000000000..425975f24c --- /dev/null +++ b/typedapi/esdsl/hyphenationdecompoundertokenfilter.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _hyphenationDecompounderTokenFilter struct { + v *types.HyphenationDecompounderTokenFilter +} + +func NewHyphenationDecompounderTokenFilter() *_hyphenationDecompounderTokenFilter { + + return &_hyphenationDecompounderTokenFilter{v: types.NewHyphenationDecompounderTokenFilter()} + +} + +func (s *_hyphenationDecompounderTokenFilter) HyphenationPatternsPath(hyphenationpatternspath string) *_hyphenationDecompounderTokenFilter { + + s.v.HyphenationPatternsPath = &hyphenationpatternspath + + return s +} + +func (s *_hyphenationDecompounderTokenFilter) MaxSubwordSize(maxsubwordsize int) *_hyphenationDecompounderTokenFilter { + + s.v.MaxSubwordSize = &maxsubwordsize + + return s +} + +func (s *_hyphenationDecompounderTokenFilter) MinSubwordSize(minsubwordsize int) *_hyphenationDecompounderTokenFilter { + + s.v.MinSubwordSize = &minsubwordsize + + return s +} + +func (s *_hyphenationDecompounderTokenFilter) MinWordSize(minwordsize int) *_hyphenationDecompounderTokenFilter { + + s.v.MinWordSize = &minwordsize + + return s +} + +func (s *_hyphenationDecompounderTokenFilter) OnlyLongestMatch(onlylongestmatch bool) *_hyphenationDecompounderTokenFilter { + + s.v.OnlyLongestMatch = &onlylongestmatch + + return s +} + +func (s *_hyphenationDecompounderTokenFilter) Version(versionstring string) *_hyphenationDecompounderTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_hyphenationDecompounderTokenFilter) WordList(wordlists ...string) *_hyphenationDecompounderTokenFilter { + + for _, v := range wordlists { + + s.v.WordList = append(s.v.WordList, v) + + } + return s +} + +func (s *_hyphenationDecompounderTokenFilter) WordListPath(wordlistpath string) *_hyphenationDecompounderTokenFilter { + + s.v.WordListPath = &wordlistpath + + return s +} + +func (s *_hyphenationDecompounderTokenFilter) HyphenationDecompounderTokenFilterCaster() *types.HyphenationDecompounderTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/icuanalyzer.go b/typedapi/esdsl/icuanalyzer.go new file mode 100644 index 0000000000..db6941a423 --- /dev/null +++ b/typedapi/esdsl/icuanalyzer.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationmode" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationtype" +) + +type _icuAnalyzer struct { + v *types.IcuAnalyzer +} + +func NewIcuAnalyzer(method icunormalizationtype.IcuNormalizationType, mode icunormalizationmode.IcuNormalizationMode) *_icuAnalyzer { + + tmp := &_icuAnalyzer{v: types.NewIcuAnalyzer()} + + tmp.Method(method) + + tmp.Mode(mode) + + return tmp + +} + +func (s *_icuAnalyzer) Method(method icunormalizationtype.IcuNormalizationType) *_icuAnalyzer { + + s.v.Method = method + return s +} + +func (s *_icuAnalyzer) Mode(mode icunormalizationmode.IcuNormalizationMode) *_icuAnalyzer { + + s.v.Mode = mode + return s +} + +func (s *_icuAnalyzer) IcuAnalyzerCaster() *types.IcuAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/icucollationproperty.go b/typedapi/esdsl/icucollationproperty.go new file mode 100644 index 0000000000..a309d33959 --- /dev/null +++ b/typedapi/esdsl/icucollationproperty.go @@ -0,0 +1,268 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationalternate" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationcasefirst" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationdecomposition" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationstrength" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _icuCollationProperty struct { + v *types.IcuCollationProperty +} + +func NewIcuCollationProperty() *_icuCollationProperty { + + return &_icuCollationProperty{v: types.NewIcuCollationProperty()} + +} + +func (s *_icuCollationProperty) Alternate(alternate icucollationalternate.IcuCollationAlternate) *_icuCollationProperty { + + s.v.Alternate = &alternate + return s +} + +func (s *_icuCollationProperty) CaseFirst(casefirst icucollationcasefirst.IcuCollationCaseFirst) *_icuCollationProperty { + + s.v.CaseFirst = &casefirst + return s +} + +func (s *_icuCollationProperty) CaseLevel(caselevel bool) *_icuCollationProperty { + + s.v.CaseLevel = &caselevel + + return s +} + +func (s *_icuCollationProperty) CopyTo(fields ...string) *_icuCollationProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_icuCollationProperty) Country(country string) *_icuCollationProperty { + + s.v.Country = &country + + return s +} + +func (s *_icuCollationProperty) Decomposition(decomposition icucollationdecomposition.IcuCollationDecomposition) *_icuCollationProperty { + + s.v.Decomposition = &decomposition + return s +} + +func (s *_icuCollationProperty) DocValues(docvalues bool) *_icuCollationProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_icuCollationProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_icuCollationProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_icuCollationProperty) Fields(fields map[string]types.Property) *_icuCollationProperty { + + s.v.Fields = fields + return s +} + +func (s *_icuCollationProperty) AddField(key string, value types.PropertyVariant) *_icuCollationProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_icuCollationProperty) HiraganaQuaternaryMode(hiraganaquaternarymode bool) *_icuCollationProperty { + + s.v.HiraganaQuaternaryMode = &hiraganaquaternarymode + + return s +} + +func (s *_icuCollationProperty) IgnoreAbove(ignoreabove int) *_icuCollationProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +// Should the field be searchable? +func (s *_icuCollationProperty) Index(index bool) *_icuCollationProperty { + + s.v.Index = &index + + return s +} + +func (s *_icuCollationProperty) IndexOptions(indexoptions indexoptions.IndexOptions) *_icuCollationProperty { + + s.v.IndexOptions = &indexoptions + return s +} + +func (s *_icuCollationProperty) Language(language string) *_icuCollationProperty { + + s.v.Language = &language + + return s +} + +// Metadata about the field. +func (s *_icuCollationProperty) Meta(meta map[string]string) *_icuCollationProperty { + + s.v.Meta = meta + return s +} + +func (s *_icuCollationProperty) AddMeta(key string, value string) *_icuCollationProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_icuCollationProperty) Norms(norms bool) *_icuCollationProperty { + + s.v.Norms = &norms + + return s +} + +// Accepts a string value which is substituted for any explicit null values. +// Defaults to null, which means the field is treated as missing. +func (s *_icuCollationProperty) NullValue(nullvalue string) *_icuCollationProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_icuCollationProperty) Numeric(numeric bool) *_icuCollationProperty { + + s.v.Numeric = &numeric + + return s +} + +func (s *_icuCollationProperty) Properties(properties map[string]types.Property) *_icuCollationProperty { + + s.v.Properties = properties + return s +} + +func (s *_icuCollationProperty) AddProperty(key string, value types.PropertyVariant) *_icuCollationProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_icuCollationProperty) Rules(rules string) *_icuCollationProperty { + + s.v.Rules = &rules + + return s +} + +func (s *_icuCollationProperty) Store(store bool) *_icuCollationProperty { + + s.v.Store = &store + + return s +} + +func (s *_icuCollationProperty) Strength(strength icucollationstrength.IcuCollationStrength) *_icuCollationProperty { + + s.v.Strength = &strength + return s +} + +func (s *_icuCollationProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_icuCollationProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_icuCollationProperty) VariableTop(variabletop string) *_icuCollationProperty { + + s.v.VariableTop = &variabletop + + return s +} + +func (s *_icuCollationProperty) Variant(variant string) *_icuCollationProperty { + + s.v.Variant = &variant + + return s +} + +func (s *_icuCollationProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_icuCollationProperty) IcuCollationPropertyCaster() *types.IcuCollationProperty { + return s.v +} diff --git a/typedapi/esdsl/icucollationtokenfilter.go b/typedapi/esdsl/icucollationtokenfilter.go new file mode 100644 index 0000000000..2c9914373e --- /dev/null +++ b/typedapi/esdsl/icucollationtokenfilter.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationalternate" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationcasefirst" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationdecomposition" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationstrength" +) + +type _icuCollationTokenFilter struct { + v *types.IcuCollationTokenFilter +} + +func NewIcuCollationTokenFilter() *_icuCollationTokenFilter { + + return &_icuCollationTokenFilter{v: types.NewIcuCollationTokenFilter()} + +} + +func (s *_icuCollationTokenFilter) Alternate(alternate icucollationalternate.IcuCollationAlternate) *_icuCollationTokenFilter { + + s.v.Alternate = &alternate + return s +} + +func (s *_icuCollationTokenFilter) CaseFirst(casefirst icucollationcasefirst.IcuCollationCaseFirst) *_icuCollationTokenFilter { + + s.v.CaseFirst = &casefirst + return s +} + +func (s *_icuCollationTokenFilter) CaseLevel(caselevel bool) *_icuCollationTokenFilter { + + s.v.CaseLevel = &caselevel + + return s +} + +func (s *_icuCollationTokenFilter) Country(country string) *_icuCollationTokenFilter { + + s.v.Country = &country + + return s +} + +func (s *_icuCollationTokenFilter) Decomposition(decomposition icucollationdecomposition.IcuCollationDecomposition) *_icuCollationTokenFilter { + + s.v.Decomposition = &decomposition + return s +} + +func (s *_icuCollationTokenFilter) HiraganaQuaternaryMode(hiraganaquaternarymode bool) *_icuCollationTokenFilter { + + s.v.HiraganaQuaternaryMode = &hiraganaquaternarymode + + return s +} + +func (s *_icuCollationTokenFilter) Language(language string) *_icuCollationTokenFilter { + + s.v.Language = &language + + return s +} + +func (s *_icuCollationTokenFilter) Numeric(numeric bool) *_icuCollationTokenFilter { + + s.v.Numeric = &numeric + + return s +} + +func (s *_icuCollationTokenFilter) Rules(rules string) *_icuCollationTokenFilter { + + s.v.Rules = &rules + + return s +} + +func (s *_icuCollationTokenFilter) Strength(strength icucollationstrength.IcuCollationStrength) *_icuCollationTokenFilter { + + s.v.Strength = &strength + return s +} + +func (s *_icuCollationTokenFilter) VariableTop(variabletop string) *_icuCollationTokenFilter { + + s.v.VariableTop = &variabletop + + return s +} + +func (s *_icuCollationTokenFilter) Variant(variant string) *_icuCollationTokenFilter { + + s.v.Variant = &variant + + return s +} + +func (s *_icuCollationTokenFilter) Version(versionstring string) *_icuCollationTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_icuCollationTokenFilter) IcuCollationTokenFilterCaster() *types.IcuCollationTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/icufoldingtokenfilter.go b/typedapi/esdsl/icufoldingtokenfilter.go new file mode 100644 index 0000000000..bd0d9444ff --- /dev/null +++ b/typedapi/esdsl/icufoldingtokenfilter.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _icuFoldingTokenFilter struct { + v *types.IcuFoldingTokenFilter +} + +func NewIcuFoldingTokenFilter(unicodesetfilter string) *_icuFoldingTokenFilter { + + tmp := &_icuFoldingTokenFilter{v: types.NewIcuFoldingTokenFilter()} + + tmp.UnicodeSetFilter(unicodesetfilter) + + return tmp + +} + +func (s *_icuFoldingTokenFilter) UnicodeSetFilter(unicodesetfilter string) *_icuFoldingTokenFilter { + + s.v.UnicodeSetFilter = unicodesetfilter + + return s +} + +func (s *_icuFoldingTokenFilter) Version(versionstring string) *_icuFoldingTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_icuFoldingTokenFilter) IcuFoldingTokenFilterCaster() *types.IcuFoldingTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/icunormalizationcharfilter.go b/typedapi/esdsl/icunormalizationcharfilter.go new file mode 100644 index 0000000000..7c9ad32527 --- /dev/null +++ b/typedapi/esdsl/icunormalizationcharfilter.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationmode" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationtype" +) + +type _icuNormalizationCharFilter struct { + v *types.IcuNormalizationCharFilter +} + +func NewIcuNormalizationCharFilter() *_icuNormalizationCharFilter { + + return &_icuNormalizationCharFilter{v: types.NewIcuNormalizationCharFilter()} + +} + +func (s *_icuNormalizationCharFilter) Mode(mode icunormalizationmode.IcuNormalizationMode) *_icuNormalizationCharFilter { + + s.v.Mode = &mode + return s +} + +func (s *_icuNormalizationCharFilter) Name(name icunormalizationtype.IcuNormalizationType) *_icuNormalizationCharFilter { + + s.v.Name = &name + return s +} + +func (s *_icuNormalizationCharFilter) Version(versionstring string) *_icuNormalizationCharFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_icuNormalizationCharFilter) IcuNormalizationCharFilterCaster() *types.IcuNormalizationCharFilter { + return s.v +} diff --git a/typedapi/esdsl/icunormalizationtokenfilter.go b/typedapi/esdsl/icunormalizationtokenfilter.go new file mode 100644 index 0000000000..d57e430c96 --- /dev/null +++ b/typedapi/esdsl/icunormalizationtokenfilter.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationtype" +) + +type _icuNormalizationTokenFilter struct { + v *types.IcuNormalizationTokenFilter +} + +func NewIcuNormalizationTokenFilter(name icunormalizationtype.IcuNormalizationType) *_icuNormalizationTokenFilter { + + tmp := &_icuNormalizationTokenFilter{v: types.NewIcuNormalizationTokenFilter()} + + tmp.Name(name) + + return tmp + +} + +func (s *_icuNormalizationTokenFilter) Name(name icunormalizationtype.IcuNormalizationType) *_icuNormalizationTokenFilter { + + s.v.Name = name + return s +} + +func (s *_icuNormalizationTokenFilter) Version(versionstring string) *_icuNormalizationTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_icuNormalizationTokenFilter) IcuNormalizationTokenFilterCaster() *types.IcuNormalizationTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/icutokenizer.go b/typedapi/esdsl/icutokenizer.go new file mode 100644 index 0000000000..596f7796cd --- /dev/null +++ b/typedapi/esdsl/icutokenizer.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _icuTokenizer struct { + v *types.IcuTokenizer +} + +func NewIcuTokenizer(rulefiles string) *_icuTokenizer { + + tmp := &_icuTokenizer{v: types.NewIcuTokenizer()} + + tmp.RuleFiles(rulefiles) + + return tmp + +} + +func (s *_icuTokenizer) RuleFiles(rulefiles string) *_icuTokenizer { + + s.v.RuleFiles = rulefiles + + return s +} + +func (s *_icuTokenizer) Version(versionstring string) *_icuTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_icuTokenizer) IcuTokenizerCaster() *types.IcuTokenizer { + return s.v +} diff --git a/typedapi/esdsl/icutransformtokenfilter.go b/typedapi/esdsl/icutransformtokenfilter.go new file mode 100644 index 0000000000..deb41bc427 --- /dev/null +++ b/typedapi/esdsl/icutransformtokenfilter.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icutransformdirection" +) + +type _icuTransformTokenFilter struct { + v *types.IcuTransformTokenFilter +} + +func NewIcuTransformTokenFilter(id string) *_icuTransformTokenFilter { + + tmp := &_icuTransformTokenFilter{v: types.NewIcuTransformTokenFilter()} + + tmp.Id(id) + + return tmp + +} + +func (s *_icuTransformTokenFilter) Dir(dir icutransformdirection.IcuTransformDirection) *_icuTransformTokenFilter { + + s.v.Dir = &dir + return s +} + +func (s *_icuTransformTokenFilter) Id(id string) *_icuTransformTokenFilter { + + s.v.Id = id + + return s +} + +func (s *_icuTransformTokenFilter) Version(versionstring string) *_icuTransformTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_icuTransformTokenFilter) IcuTransformTokenFilterCaster() *types.IcuTransformTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/ids.go b/typedapi/esdsl/ids.go new file mode 100644 index 0000000000..c9d39da7d0 --- /dev/null +++ b/typedapi/esdsl/ids.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _ids struct { + v types.Ids +} + +func NewIds() *_ids { + return &_ids{v: []string{}} +} + +func (u *_ids) IdsCaster() *types.Ids { + return &u.v +} diff --git a/typedapi/esdsl/idsquery.go b/typedapi/esdsl/idsquery.go new file mode 100644 index 0000000000..a19c71dd43 --- /dev/null +++ b/typedapi/esdsl/idsquery.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _idsQuery struct { + v *types.IdsQuery +} + +// Returns roles based on their IDs. +// This query uses role document IDs stored in the `_id` field. +func NewIdsQuery() *_idsQuery { + + return &_idsQuery{v: types.NewIdsQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_idsQuery) Boost(boost float32) *_idsQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_idsQuery) QueryName_(queryname_ string) *_idsQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// An array of document IDs. +func (s *_idsQuery) Values(ids ...string) *_idsQuery { + + s.v.Values = ids + + return s +} + +func (s *_idsQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.Ids = s.v + + return container +} + +func (s *_idsQuery) ApiKeyQueryContainerCaster() *types.ApiKeyQueryContainer { + container := types.NewApiKeyQueryContainer() + + container.Ids = s.v + + return container +} + +func (s *_idsQuery) RoleQueryContainerCaster() *types.RoleQueryContainer { + container := types.NewRoleQueryContainer() + + container.Ids = s.v + + return container +} + +func (s *_idsQuery) UserQueryContainerCaster() *types.UserQueryContainer { + container := types.NewUserQueryContainer() + + container.Ids = s.v + + return container +} + +func (s *_idsQuery) IdsQueryCaster() *types.IdsQuery { + return s.v +} diff --git a/typedapi/esdsl/ilmactions.go b/typedapi/esdsl/ilmactions.go new file mode 100644 index 0000000000..eb6b25aca7 --- /dev/null +++ b/typedapi/esdsl/ilmactions.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _ilmActions struct { + v *types.IlmActions +} + +func NewIlmActions() *_ilmActions { + + return &_ilmActions{v: types.NewIlmActions()} + +} + +// Phases allowed: warm, cold. +func (s *_ilmActions) Allocate(allocate types.AllocateActionVariant) *_ilmActions { + + s.v.Allocate = allocate.AllocateActionCaster() + + return s +} + +// Phases allowed: delete. +func (s *_ilmActions) Delete(delete types.DeleteActionVariant) *_ilmActions { + + s.v.Delete = delete.DeleteActionCaster() + + return s +} + +// Phases allowed: hot, warm, cold. +func (s *_ilmActions) Downsample(downsample types.DownsampleActionVariant) *_ilmActions { + + s.v.Downsample = downsample.DownsampleActionCaster() + + return s +} + +// Phases allowed: hot, warm. +func (s *_ilmActions) Forcemerge(forcemerge types.ForceMergeActionVariant) *_ilmActions { + + s.v.Forcemerge = forcemerge.ForceMergeActionCaster() + + return s +} + +// The freeze action is a noop in 8.x +func (s *_ilmActions) Freeze(freeze types.EmptyObjectVariant) *_ilmActions { + + s.v.Freeze = freeze.EmptyObjectCaster() + + return s +} + +// Phases allowed: warm, cold. +func (s *_ilmActions) Migrate(migrate types.MigrateActionVariant) *_ilmActions { + + s.v.Migrate = migrate.MigrateActionCaster() + + return s +} + +// Phases allowed: hot, warm, cold. +func (s *_ilmActions) Readonly(readonly types.EmptyObjectVariant) *_ilmActions { + + s.v.Readonly = readonly.EmptyObjectCaster() + + return s +} + +// Phases allowed: hot. +func (s *_ilmActions) Rollover(rollover types.RolloverActionVariant) *_ilmActions { + + s.v.Rollover = rollover.RolloverActionCaster() + + return s +} + +// Phases allowed: hot, cold, frozen. +func (s *_ilmActions) SearchableSnapshot(searchablesnapshot types.SearchableSnapshotActionVariant) *_ilmActions { + + s.v.SearchableSnapshot = searchablesnapshot.SearchableSnapshotActionCaster() + + return s +} + +// Phases allowed: hot, warm, cold. +func (s *_ilmActions) SetPriority(setpriority types.SetPriorityActionVariant) *_ilmActions { + + s.v.SetPriority = setpriority.SetPriorityActionCaster() + + return s +} + +// Phases allowed: hot, warm. +func (s *_ilmActions) Shrink(shrink types.ShrinkActionVariant) *_ilmActions { + + s.v.Shrink = shrink.ShrinkActionCaster() + + return s +} + +// Phases allowed: hot, warm, cold, frozen. +func (s *_ilmActions) Unfollow(unfollow types.EmptyObjectVariant) *_ilmActions { + + s.v.Unfollow = unfollow.EmptyObjectCaster() + + return s +} + +// Phases allowed: delete. +func (s *_ilmActions) WaitForSnapshot(waitforsnapshot types.WaitForSnapshotActionVariant) *_ilmActions { + + s.v.WaitForSnapshot = waitforsnapshot.WaitForSnapshotActionCaster() + + return s +} + +func (s *_ilmActions) IlmActionsCaster() *types.IlmActions { + return s.v +} diff --git a/typedapi/esdsl/ilmpolicy.go b/typedapi/esdsl/ilmpolicy.go new file mode 100644 index 0000000000..b8e0dce0b0 --- /dev/null +++ b/typedapi/esdsl/ilmpolicy.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _ilmPolicy struct { + v *types.IlmPolicy +} + +func NewIlmPolicy(phases types.PhasesVariant) *_ilmPolicy { + + tmp := &_ilmPolicy{v: types.NewIlmPolicy()} + + tmp.Phases(phases) + + return tmp + +} + +// Arbitrary metadata that is not automatically generated or used by +// Elasticsearch. +func (s *_ilmPolicy) Meta_(metadata types.MetadataVariant) *_ilmPolicy { + + s.v.Meta_ = *metadata.MetadataCaster() + + return s +} + +func (s *_ilmPolicy) Phases(phases types.PhasesVariant) *_ilmPolicy { + + s.v.Phases = *phases.PhasesCaster() + + return s +} + +func (s *_ilmPolicy) IlmPolicyCaster() *types.IlmPolicy { + return s.v +} diff --git a/typedapi/esdsl/includedinvalidation.go b/typedapi/esdsl/includedinvalidation.go new file mode 100644 index 0000000000..31a5287628 --- /dev/null +++ b/typedapi/esdsl/includedinvalidation.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _includedInValidation struct { + v *types.IncludedInValidation +} + +func NewIncludedInValidation() *_includedInValidation { + + return &_includedInValidation{v: types.NewIncludedInValidation()} + +} + +func (s *_includedInValidation) Constraint(constraints ...types.ScalarValueVariant) *_includedInValidation { + + for _, v := range constraints { + + s.v.Constraint = append(s.v.Constraint, *v.ScalarValueCaster()) + + } + return s +} + +func (s *_includedInValidation) IncludedInValidationCaster() *types.IncludedInValidation { + return s.v +} diff --git a/typedapi/esdsl/indexaction.go b/typedapi/esdsl/indexaction.go new file mode 100644 index 0000000000..0baa88fcee --- /dev/null +++ b/typedapi/esdsl/indexaction.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/optype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" +) + +type _indexAction struct { + v *types.IndexAction +} + +func NewIndexAction() *_indexAction { + + return &_indexAction{v: types.NewIndexAction()} + +} + +func (s *_indexAction) DocId(id string) *_indexAction { + + s.v.DocId = &id + + return s +} + +func (s *_indexAction) ExecutionTimeField(field string) *_indexAction { + + s.v.ExecutionTimeField = &field + + return s +} + +func (s *_indexAction) Index(indexname string) *_indexAction { + + s.v.Index = indexname + + return s +} + +func (s *_indexAction) OpType(optype optype.OpType) *_indexAction { + + s.v.OpType = &optype + return s +} + +func (s *_indexAction) Refresh(refresh refresh.Refresh) *_indexAction { + + s.v.Refresh = &refresh + return s +} + +func (s *_indexAction) Timeout(duration types.DurationVariant) *_indexAction { + + s.v.Timeout = *duration.DurationCaster() + + return s +} + +func (s *_indexAction) IndexActionCaster() *types.IndexAction { + return s.v +} diff --git a/typedapi/esdsl/indexanddatastreamaction.go b/typedapi/esdsl/indexanddatastreamaction.go new file mode 100644 index 0000000000..f068b01768 --- /dev/null +++ b/typedapi/esdsl/indexanddatastreamaction.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexAndDataStreamAction struct { + v *types.IndexAndDataStreamAction +} + +// Removes a backing index from a data stream. +// The index is unhidden as part of this operation. +// A data stream’s write index cannot be removed. +func NewIndexAndDataStreamAction() *_indexAndDataStreamAction { + + return &_indexAndDataStreamAction{v: types.NewIndexAndDataStreamAction()} + +} + +// Data stream targeted by the action. +func (s *_indexAndDataStreamAction) DataStream(datastreamname string) *_indexAndDataStreamAction { + + s.v.DataStream = datastreamname + + return s +} + +// Index for the action. +func (s *_indexAndDataStreamAction) Index(indexname string) *_indexAndDataStreamAction { + + s.v.Index = indexname + + return s +} + +func (s *_indexAndDataStreamAction) IndicesModifyActionCaster() *types.IndicesModifyAction { + container := types.NewIndicesModifyAction() + + container.RemoveBackingIndex = s.v + + return container +} + +func (s *_indexAndDataStreamAction) IndexAndDataStreamActionCaster() *types.IndexAndDataStreamAction { + return s.v +} diff --git a/typedapi/esdsl/indexfield.go b/typedapi/esdsl/indexfield.go new file mode 100644 index 0000000000..421d9dfff2 --- /dev/null +++ b/typedapi/esdsl/indexfield.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexField struct { + v *types.IndexField +} + +func NewIndexField(enabled bool) *_indexField { + + tmp := &_indexField{v: types.NewIndexField()} + + tmp.Enabled(enabled) + + return tmp + +} + +func (s *_indexField) Enabled(enabled bool) *_indexField { + + s.v.Enabled = enabled + + return s +} + +func (s *_indexField) IndexFieldCaster() *types.IndexField { + return s.v +} diff --git a/typedapi/esdsl/indexingslowlogsettings.go b/typedapi/esdsl/indexingslowlogsettings.go new file mode 100644 index 0000000000..8b770806d6 --- /dev/null +++ b/typedapi/esdsl/indexingslowlogsettings.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexingSlowlogSettings struct { + v *types.IndexingSlowlogSettings +} + +func NewIndexingSlowlogSettings() *_indexingSlowlogSettings { + + return &_indexingSlowlogSettings{v: types.NewIndexingSlowlogSettings()} + +} + +func (s *_indexingSlowlogSettings) Level(level string) *_indexingSlowlogSettings { + + s.v.Level = &level + + return s +} + +func (s *_indexingSlowlogSettings) Reformat(reformat bool) *_indexingSlowlogSettings { + + s.v.Reformat = &reformat + + return s +} + +func (s *_indexingSlowlogSettings) Source(source int) *_indexingSlowlogSettings { + + s.v.Source = &source + + return s +} + +func (s *_indexingSlowlogSettings) Threshold(threshold types.IndexingSlowlogTresholdsVariant) *_indexingSlowlogSettings { + + s.v.Threshold = threshold.IndexingSlowlogTresholdsCaster() + + return s +} + +func (s *_indexingSlowlogSettings) IndexingSlowlogSettingsCaster() *types.IndexingSlowlogSettings { + return s.v +} diff --git a/typedapi/esdsl/indexingslowlogtresholds.go b/typedapi/esdsl/indexingslowlogtresholds.go new file mode 100644 index 0000000000..a8610a0139 --- /dev/null +++ b/typedapi/esdsl/indexingslowlogtresholds.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexingSlowlogTresholds struct { + v *types.IndexingSlowlogTresholds +} + +func NewIndexingSlowlogTresholds() *_indexingSlowlogTresholds { + + return &_indexingSlowlogTresholds{v: types.NewIndexingSlowlogTresholds()} + +} + +// The indexing slow log, similar in functionality to the search slow log. The +// log file name ends with `_index_indexing_slowlog.json`. +// Log and the thresholds are configured in the same way as the search slowlog. +func (s *_indexingSlowlogTresholds) Index(index types.SlowlogTresholdLevelsVariant) *_indexingSlowlogTresholds { + + s.v.Index = index.SlowlogTresholdLevelsCaster() + + return s +} + +func (s *_indexingSlowlogTresholds) IndexingSlowlogTresholdsCaster() *types.IndexingSlowlogTresholds { + return s.v +} diff --git a/typedapi/esdsl/indexoperation.go b/typedapi/esdsl/indexoperation.go new file mode 100644 index 0000000000..bfc103c72a --- /dev/null +++ b/typedapi/esdsl/indexoperation.go @@ -0,0 +1,149 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" +) + +type _indexOperation struct { + v *types.IndexOperation +} + +// Index the specified document. +// If the document exists, it replaces the document and increments the version. +// The following line must contain the source data to be indexed. +func NewIndexOperation() *_indexOperation { + + return &_indexOperation{v: types.NewIndexOperation()} + +} + +// A map from the full name of fields to the name of dynamic templates. +// It defaults to an empty map. +// If a name matches a dynamic template, that template will be applied +// regardless of other match predicates defined in the template. +// If a field is already defined in the mapping, then this parameter won't be +// used. +func (s *_indexOperation) DynamicTemplates(dynamictemplates map[string]string) *_indexOperation { + + s.v.DynamicTemplates = dynamictemplates + return s +} + +func (s *_indexOperation) AddDynamicTemplate(key string, value string) *_indexOperation { + + var tmp map[string]string + if s.v.DynamicTemplates == nil { + s.v.DynamicTemplates = make(map[string]string) + } else { + tmp = s.v.DynamicTemplates + } + + tmp[key] = value + + s.v.DynamicTemplates = tmp + return s +} + +// The document ID. +func (s *_indexOperation) Id_(id string) *_indexOperation { + + s.v.Id_ = &id + + return s +} + +func (s *_indexOperation) IfPrimaryTerm(ifprimaryterm int64) *_indexOperation { + + s.v.IfPrimaryTerm = &ifprimaryterm + + return s +} + +func (s *_indexOperation) IfSeqNo(sequencenumber int64) *_indexOperation { + + s.v.IfSeqNo = &sequencenumber + + return s +} + +// The name of the index or index alias to perform the action on. +func (s *_indexOperation) Index_(indexname string) *_indexOperation { + + s.v.Index_ = &indexname + + return s +} + +// The ID of the pipeline to use to preprocess incoming documents. +// If the index has a default ingest pipeline specified, setting the value to +// `_none` turns off the default ingest pipeline for this request. +// If a final pipeline is configured, it will always run regardless of the value +// of this parameter. +func (s *_indexOperation) Pipeline(pipeline string) *_indexOperation { + + s.v.Pipeline = &pipeline + + return s +} + +// If `true`, the request's actions must target an index alias. +func (s *_indexOperation) RequireAlias(requirealias bool) *_indexOperation { + + s.v.RequireAlias = &requirealias + + return s +} + +// A custom value used to route operations to a specific shard. +func (s *_indexOperation) Routing(routing string) *_indexOperation { + + s.v.Routing = &routing + + return s +} + +func (s *_indexOperation) Version(versionnumber int64) *_indexOperation { + + s.v.Version = &versionnumber + + return s +} + +func (s *_indexOperation) VersionType(versiontype versiontype.VersionType) *_indexOperation { + + s.v.VersionType = &versiontype + return s +} + +func (s *_indexOperation) OperationContainerCaster() *types.OperationContainer { + container := types.NewOperationContainer() + + container.Index = s.v + + return container +} + +func (s *_indexOperation) IndexOperationCaster() *types.IndexOperation { + return s.v +} diff --git a/typedapi/esdsl/indexprivilegescheck.go b/typedapi/esdsl/indexprivilegescheck.go new file mode 100644 index 0000000000..df60ad4262 --- /dev/null +++ b/typedapi/esdsl/indexprivilegescheck.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexprivilege" +) + +type _indexPrivilegesCheck struct { + v *types.IndexPrivilegesCheck +} + +func NewIndexPrivilegesCheck() *_indexPrivilegesCheck { + + return &_indexPrivilegesCheck{v: types.NewIndexPrivilegesCheck()} + +} + +// This needs to be set to `true` (default is `false`) if using wildcards or +// regexps for patterns that cover restricted indices. +// Implicitly, restricted indices do not match index patterns because restricted +// indices usually have limited privileges and including them in pattern tests +// would render most such tests false. +// If restricted indices are explicitly included in the names list, privileges +// will be checked against them regardless of the value of +// `allow_restricted_indices`. +func (s *_indexPrivilegesCheck) AllowRestrictedIndices(allowrestrictedindices bool) *_indexPrivilegesCheck { + + s.v.AllowRestrictedIndices = &allowrestrictedindices + + return s +} + +// A list of indices. +func (s *_indexPrivilegesCheck) Names(indices ...string) *_indexPrivilegesCheck { + + s.v.Names = indices + + return s +} + +// A list of the privileges that you want to check for the specified indices. +func (s *_indexPrivilegesCheck) Privileges(privileges ...indexprivilege.IndexPrivilege) *_indexPrivilegesCheck { + + for _, v := range privileges { + + s.v.Privileges = append(s.v.Privileges, v) + + } + return s +} + +func (s *_indexPrivilegesCheck) IndexPrivilegesCheckCaster() *types.IndexPrivilegesCheck { + return s.v +} diff --git a/typedapi/esdsl/indexrouting.go b/typedapi/esdsl/indexrouting.go new file mode 100644 index 0000000000..f44a5ee711 --- /dev/null +++ b/typedapi/esdsl/indexrouting.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexRouting struct { + v *types.IndexRouting +} + +func NewIndexRouting() *_indexRouting { + + return &_indexRouting{v: types.NewIndexRouting()} + +} + +func (s *_indexRouting) Allocation(allocation types.IndexRoutingAllocationVariant) *_indexRouting { + + s.v.Allocation = allocation.IndexRoutingAllocationCaster() + + return s +} + +func (s *_indexRouting) Rebalance(rebalance types.IndexRoutingRebalanceVariant) *_indexRouting { + + s.v.Rebalance = rebalance.IndexRoutingRebalanceCaster() + + return s +} + +func (s *_indexRouting) IndexRoutingCaster() *types.IndexRouting { + return s.v +} diff --git a/typedapi/esdsl/indexroutingallocation.go b/typedapi/esdsl/indexroutingallocation.go new file mode 100644 index 0000000000..703fe26731 --- /dev/null +++ b/typedapi/esdsl/indexroutingallocation.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexroutingallocationoptions" +) + +type _indexRoutingAllocation struct { + v *types.IndexRoutingAllocation +} + +func NewIndexRoutingAllocation() *_indexRoutingAllocation { + + return &_indexRoutingAllocation{v: types.NewIndexRoutingAllocation()} + +} + +func (s *_indexRoutingAllocation) Disk(disk types.IndexRoutingAllocationDiskVariant) *_indexRoutingAllocation { + + s.v.Disk = disk.IndexRoutingAllocationDiskCaster() + + return s +} + +func (s *_indexRoutingAllocation) Enable(enable indexroutingallocationoptions.IndexRoutingAllocationOptions) *_indexRoutingAllocation { + + s.v.Enable = &enable + return s +} + +func (s *_indexRoutingAllocation) Include(include types.IndexRoutingAllocationIncludeVariant) *_indexRoutingAllocation { + + s.v.Include = include.IndexRoutingAllocationIncludeCaster() + + return s +} + +func (s *_indexRoutingAllocation) InitialRecovery(initialrecovery types.IndexRoutingAllocationInitialRecoveryVariant) *_indexRoutingAllocation { + + s.v.InitialRecovery = initialrecovery.IndexRoutingAllocationInitialRecoveryCaster() + + return s +} + +func (s *_indexRoutingAllocation) IndexRoutingAllocationCaster() *types.IndexRoutingAllocation { + return s.v +} diff --git a/typedapi/esdsl/indexroutingallocationdisk.go b/typedapi/esdsl/indexroutingallocationdisk.go new file mode 100644 index 0000000000..d38935231d --- /dev/null +++ b/typedapi/esdsl/indexroutingallocationdisk.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexRoutingAllocationDisk struct { + v *types.IndexRoutingAllocationDisk +} + +func NewIndexRoutingAllocationDisk() *_indexRoutingAllocationDisk { + + return &_indexRoutingAllocationDisk{v: types.NewIndexRoutingAllocationDisk()} + +} + +func (s *_indexRoutingAllocationDisk) ThresholdEnabled(thresholdenabled string) *_indexRoutingAllocationDisk { + + s.v.ThresholdEnabled = thresholdenabled + + return s +} + +func (s *_indexRoutingAllocationDisk) IndexRoutingAllocationDiskCaster() *types.IndexRoutingAllocationDisk { + return s.v +} diff --git a/typedapi/esdsl/indexroutingallocationinclude.go b/typedapi/esdsl/indexroutingallocationinclude.go new file mode 100644 index 0000000000..e1ff9650a3 --- /dev/null +++ b/typedapi/esdsl/indexroutingallocationinclude.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexRoutingAllocationInclude struct { + v *types.IndexRoutingAllocationInclude +} + +func NewIndexRoutingAllocationInclude() *_indexRoutingAllocationInclude { + + return &_indexRoutingAllocationInclude{v: types.NewIndexRoutingAllocationInclude()} + +} + +func (s *_indexRoutingAllocationInclude) Id_(id string) *_indexRoutingAllocationInclude { + + s.v.Id_ = &id + + return s +} + +func (s *_indexRoutingAllocationInclude) TierPreference_(tierpreference_ string) *_indexRoutingAllocationInclude { + + s.v.TierPreference_ = &tierpreference_ + + return s +} + +func (s *_indexRoutingAllocationInclude) IndexRoutingAllocationIncludeCaster() *types.IndexRoutingAllocationInclude { + return s.v +} diff --git a/typedapi/esdsl/indexroutingallocationinitialrecovery.go b/typedapi/esdsl/indexroutingallocationinitialrecovery.go new file mode 100644 index 0000000000..35511979b6 --- /dev/null +++ b/typedapi/esdsl/indexroutingallocationinitialrecovery.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexRoutingAllocationInitialRecovery struct { + v *types.IndexRoutingAllocationInitialRecovery +} + +func NewIndexRoutingAllocationInitialRecovery() *_indexRoutingAllocationInitialRecovery { + + return &_indexRoutingAllocationInitialRecovery{v: types.NewIndexRoutingAllocationInitialRecovery()} + +} + +func (s *_indexRoutingAllocationInitialRecovery) Id_(id string) *_indexRoutingAllocationInitialRecovery { + + s.v.Id_ = &id + + return s +} + +func (s *_indexRoutingAllocationInitialRecovery) IndexRoutingAllocationInitialRecoveryCaster() *types.IndexRoutingAllocationInitialRecovery { + return s.v +} diff --git a/typedapi/esdsl/indexroutingrebalance.go b/typedapi/esdsl/indexroutingrebalance.go new file mode 100644 index 0000000000..fb6a4cb020 --- /dev/null +++ b/typedapi/esdsl/indexroutingrebalance.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexroutingrebalanceoptions" +) + +type _indexRoutingRebalance struct { + v *types.IndexRoutingRebalance +} + +func NewIndexRoutingRebalance(enable indexroutingrebalanceoptions.IndexRoutingRebalanceOptions) *_indexRoutingRebalance { + + tmp := &_indexRoutingRebalance{v: types.NewIndexRoutingRebalance()} + + tmp.Enable(enable) + + return tmp + +} + +func (s *_indexRoutingRebalance) Enable(enable indexroutingrebalanceoptions.IndexRoutingRebalanceOptions) *_indexRoutingRebalance { + + s.v.Enable = enable + return s +} + +func (s *_indexRoutingRebalance) IndexRoutingRebalanceCaster() *types.IndexRoutingRebalance { + return s.v +} diff --git a/typedapi/esdsl/indexsegmentsort.go b/typedapi/esdsl/indexsegmentsort.go new file mode 100644 index 0000000000..49cec404e1 --- /dev/null +++ b/typedapi/esdsl/indexsegmentsort.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortmissing" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortmode" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortorder" +) + +type _indexSegmentSort struct { + v *types.IndexSegmentSort +} + +func NewIndexSegmentSort() *_indexSegmentSort { + + return &_indexSegmentSort{v: types.NewIndexSegmentSort()} + +} + +func (s *_indexSegmentSort) Field(fields ...string) *_indexSegmentSort { + + s.v.Field = fields + + return s +} + +func (s *_indexSegmentSort) Missing(missings ...segmentsortmissing.SegmentSortMissing) *_indexSegmentSort { + + s.v.Missing = make([]segmentsortmissing.SegmentSortMissing, len(missings)) + s.v.Missing = missings + + return s +} + +func (s *_indexSegmentSort) Mode(modes ...segmentsortmode.SegmentSortMode) *_indexSegmentSort { + + s.v.Mode = make([]segmentsortmode.SegmentSortMode, len(modes)) + s.v.Mode = modes + + return s +} + +func (s *_indexSegmentSort) Order(orders ...segmentsortorder.SegmentSortOrder) *_indexSegmentSort { + + s.v.Order = make([]segmentsortorder.SegmentSortOrder, len(orders)) + s.v.Order = orders + + return s +} + +func (s *_indexSegmentSort) IndexSegmentSortCaster() *types.IndexSegmentSort { + return s.v +} diff --git a/typedapi/esdsl/indexsettingblocks.go b/typedapi/esdsl/indexsettingblocks.go new file mode 100644 index 0000000000..b8735b4084 --- /dev/null +++ b/typedapi/esdsl/indexsettingblocks.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexSettingBlocks struct { + v *types.IndexSettingBlocks +} + +func NewIndexSettingBlocks() *_indexSettingBlocks { + + return &_indexSettingBlocks{v: types.NewIndexSettingBlocks()} + +} + +func (s *_indexSettingBlocks) Metadata(stringifiedboolean types.StringifiedbooleanVariant) *_indexSettingBlocks { + + s.v.Metadata = *stringifiedboolean.StringifiedbooleanCaster() + + return s +} + +func (s *_indexSettingBlocks) Read(stringifiedboolean types.StringifiedbooleanVariant) *_indexSettingBlocks { + + s.v.Read = *stringifiedboolean.StringifiedbooleanCaster() + + return s +} + +func (s *_indexSettingBlocks) ReadOnly(stringifiedboolean types.StringifiedbooleanVariant) *_indexSettingBlocks { + + s.v.ReadOnly = *stringifiedboolean.StringifiedbooleanCaster() + + return s +} + +func (s *_indexSettingBlocks) ReadOnlyAllowDelete(stringifiedboolean types.StringifiedbooleanVariant) *_indexSettingBlocks { + + s.v.ReadOnlyAllowDelete = *stringifiedboolean.StringifiedbooleanCaster() + + return s +} + +func (s *_indexSettingBlocks) Write(stringifiedboolean types.StringifiedbooleanVariant) *_indexSettingBlocks { + + s.v.Write = *stringifiedboolean.StringifiedbooleanCaster() + + return s +} + +func (s *_indexSettingBlocks) IndexSettingBlocksCaster() *types.IndexSettingBlocks { + return s.v +} diff --git a/typedapi/esdsl/indexsettings.go b/typedapi/esdsl/indexsettings.go new file mode 100644 index 0000000000..ef3de33da6 --- /dev/null +++ b/typedapi/esdsl/indexsettings.go @@ -0,0 +1,477 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexcheckonstartup" +) + +type _indexSettings struct { + v *types.IndexSettings +} + +func NewIndexSettings() *_indexSettings { + + return &_indexSettings{v: types.NewIndexSettings()} + +} + +func (s *_indexSettings) Analysis(analysis types.IndexSettingsAnalysisVariant) *_indexSettings { + + s.v.Analysis = analysis.IndexSettingsAnalysisCaster() + + return s +} + +// Settings to define analyzers, tokenizers, token filters and character +// filters. +func (s *_indexSettings) Analyze(analyze types.SettingsAnalyzeVariant) *_indexSettings { + + s.v.Analyze = analyze.SettingsAnalyzeCaster() + + return s +} + +func (s *_indexSettings) AutoExpandReplicas(autoexpandreplicas any) *_indexSettings { + + s.v.AutoExpandReplicas = autoexpandreplicas + + return s +} + +func (s *_indexSettings) Blocks(blocks types.IndexSettingBlocksVariant) *_indexSettings { + + s.v.Blocks = blocks.IndexSettingBlocksCaster() + + return s +} + +func (s *_indexSettings) CheckOnStartup(checkonstartup indexcheckonstartup.IndexCheckOnStartup) *_indexSettings { + + s.v.CheckOnStartup = &checkonstartup + return s +} + +func (s *_indexSettings) Codec(codec string) *_indexSettings { + + s.v.Codec = &codec + + return s +} + +func (s *_indexSettings) CreationDate(stringifiedepochtimeunitmillis types.StringifiedEpochTimeUnitMillisVariant) *_indexSettings { + + s.v.CreationDate = *stringifiedepochtimeunitmillis.StringifiedEpochTimeUnitMillisCaster() + + return s +} + +func (s *_indexSettings) CreationDateString(datetime types.DateTimeVariant) *_indexSettings { + + s.v.CreationDateString = *datetime.DateTimeCaster() + + return s +} + +func (s *_indexSettings) DefaultPipeline(pipelinename string) *_indexSettings { + + s.v.DefaultPipeline = &pipelinename + + return s +} + +func (s *_indexSettings) FinalPipeline(pipelinename string) *_indexSettings { + + s.v.FinalPipeline = &pipelinename + + return s +} + +func (s *_indexSettings) Format(format string) *_indexSettings { + + s.v.Format = format + + return s +} + +func (s *_indexSettings) GcDeletes(duration types.DurationVariant) *_indexSettings { + + s.v.GcDeletes = *duration.DurationCaster() + + return s +} + +func (s *_indexSettings) Hidden(hidden string) *_indexSettings { + + s.v.Hidden = hidden + + return s +} + +func (s *_indexSettings) Highlight(highlight types.SettingsHighlightVariant) *_indexSettings { + + s.v.Highlight = highlight.SettingsHighlightCaster() + + return s +} + +func (s *_indexSettings) Index(index types.IndexSettingsVariant) *_indexSettings { + + s.v.Index = index.IndexSettingsCaster() + + return s +} + +func (s *_indexSettings) IndexSettings(indexsettings map[string]json.RawMessage) *_indexSettings { + + s.v.IndexSettings = indexsettings + return s +} + +func (s *_indexSettings) AddIndexSetting(key string, value json.RawMessage) *_indexSettings { + + var tmp map[string]json.RawMessage + if s.v.IndexSettings == nil { + s.v.IndexSettings = make(map[string]json.RawMessage) + } else { + tmp = s.v.IndexSettings + } + + tmp[key] = value + + s.v.IndexSettings = tmp + return s +} + +// Configure indexing back pressure limits. +func (s *_indexSettings) IndexingPressure(indexingpressure types.IndicesIndexingPressureVariant) *_indexSettings { + + s.v.IndexingPressure = indexingpressure.IndicesIndexingPressureCaster() + + return s +} + +func (s *_indexSettings) IndexingSlowlog(indexingslowlog types.IndexingSlowlogSettingsVariant) *_indexSettings { + + s.v.IndexingSlowlog = indexingslowlog.IndexingSlowlogSettingsCaster() + + return s +} + +func (s *_indexSettings) Lifecycle(lifecycle types.IndexSettingsLifecycleVariant) *_indexSettings { + + s.v.Lifecycle = lifecycle.IndexSettingsLifecycleCaster() + + return s +} + +func (s *_indexSettings) LoadFixedBitsetFiltersEagerly(loadfixedbitsetfilterseagerly bool) *_indexSettings { + + s.v.LoadFixedBitsetFiltersEagerly = &loadfixedbitsetfilterseagerly + + return s +} + +// Enable or disable dynamic mapping for an index. +func (s *_indexSettings) Mapping(mapping types.MappingLimitSettingsVariant) *_indexSettings { + + s.v.Mapping = mapping.MappingLimitSettingsCaster() + + return s +} + +func (s *_indexSettings) MaxDocvalueFieldsSearch(maxdocvaluefieldssearch int) *_indexSettings { + + s.v.MaxDocvalueFieldsSearch = &maxdocvaluefieldssearch + + return s +} + +func (s *_indexSettings) MaxInnerResultWindow(maxinnerresultwindow int) *_indexSettings { + + s.v.MaxInnerResultWindow = &maxinnerresultwindow + + return s +} + +func (s *_indexSettings) MaxNgramDiff(maxngramdiff int) *_indexSettings { + + s.v.MaxNgramDiff = &maxngramdiff + + return s +} + +func (s *_indexSettings) MaxRefreshListeners(maxrefreshlisteners int) *_indexSettings { + + s.v.MaxRefreshListeners = &maxrefreshlisteners + + return s +} + +func (s *_indexSettings) MaxRegexLength(maxregexlength int) *_indexSettings { + + s.v.MaxRegexLength = &maxregexlength + + return s +} + +func (s *_indexSettings) MaxRescoreWindow(maxrescorewindow int) *_indexSettings { + + s.v.MaxRescoreWindow = &maxrescorewindow + + return s +} + +func (s *_indexSettings) MaxResultWindow(maxresultwindow int) *_indexSettings { + + s.v.MaxResultWindow = &maxresultwindow + + return s +} + +func (s *_indexSettings) MaxScriptFields(maxscriptfields int) *_indexSettings { + + s.v.MaxScriptFields = &maxscriptfields + + return s +} + +func (s *_indexSettings) MaxShingleDiff(maxshinglediff int) *_indexSettings { + + s.v.MaxShingleDiff = &maxshinglediff + + return s +} + +func (s *_indexSettings) MaxSlicesPerScroll(maxslicesperscroll int) *_indexSettings { + + s.v.MaxSlicesPerScroll = &maxslicesperscroll + + return s +} + +func (s *_indexSettings) MaxTermsCount(maxtermscount int) *_indexSettings { + + s.v.MaxTermsCount = &maxtermscount + + return s +} + +func (s *_indexSettings) Merge(merge types.MergeVariant) *_indexSettings { + + s.v.Merge = merge.MergeCaster() + + return s +} + +func (s *_indexSettings) Mode(mode string) *_indexSettings { + + s.v.Mode = &mode + + return s +} + +func (s *_indexSettings) NumberOfReplicas(numberofreplicas string) *_indexSettings { + + s.v.NumberOfReplicas = numberofreplicas + + return s +} + +func (s *_indexSettings) NumberOfRoutingShards(numberofroutingshards int) *_indexSettings { + + s.v.NumberOfRoutingShards = &numberofroutingshards + + return s +} + +func (s *_indexSettings) NumberOfShards(numberofshards string) *_indexSettings { + + s.v.NumberOfShards = numberofshards + + return s +} + +func (s *_indexSettings) Priority(priority string) *_indexSettings { + + s.v.Priority = priority + + return s +} + +func (s *_indexSettings) ProvidedName(name string) *_indexSettings { + + s.v.ProvidedName = &name + + return s +} + +func (s *_indexSettings) Queries(queries types.QueriesVariant) *_indexSettings { + + s.v.Queries = queries.QueriesCaster() + + return s +} + +func (s *_indexSettings) QueryString(querystring types.SettingsQueryStringVariant) *_indexSettings { + + s.v.QueryString = querystring.SettingsQueryStringCaster() + + return s +} + +func (s *_indexSettings) RefreshInterval(duration types.DurationVariant) *_indexSettings { + + s.v.RefreshInterval = *duration.DurationCaster() + + return s +} + +func (s *_indexSettings) Routing(routing types.IndexRoutingVariant) *_indexSettings { + + s.v.Routing = routing.IndexRoutingCaster() + + return s +} + +func (s *_indexSettings) RoutingPartitionSize(stringifiedinteger types.StringifiedintegerVariant) *_indexSettings { + + s.v.RoutingPartitionSize = *stringifiedinteger.StringifiedintegerCaster() + + return s +} + +func (s *_indexSettings) RoutingPath(routingpaths ...string) *_indexSettings { + + s.v.RoutingPath = make([]string, len(routingpaths)) + s.v.RoutingPath = routingpaths + + return s +} + +func (s *_indexSettings) Search(search types.SettingsSearchVariant) *_indexSettings { + + s.v.Search = search.SettingsSearchCaster() + + return s +} + +func (s *_indexSettings) Settings(settings types.IndexSettingsVariant) *_indexSettings { + + s.v.Settings = settings.IndexSettingsCaster() + + return s +} + +// Configure custom similarity settings to customize how search results are +// scored. +func (s *_indexSettings) Similarity(similarity map[string]types.SettingsSimilarity) *_indexSettings { + + s.v.Similarity = similarity + return s +} + +func (s *_indexSettings) AddSimilarity(key string, value types.SettingsSimilarityVariant) *_indexSettings { + + var tmp map[string]types.SettingsSimilarity + if s.v.Similarity == nil { + s.v.Similarity = make(map[string]types.SettingsSimilarity) + } else { + tmp = s.v.Similarity + } + + tmp[key] = *value.SettingsSimilarityCaster() + + s.v.Similarity = tmp + return s +} + +func (s *_indexSettings) SoftDeletes(softdeletes types.SoftDeletesVariant) *_indexSettings { + + s.v.SoftDeletes = softdeletes.SoftDeletesCaster() + + return s +} + +func (s *_indexSettings) Sort(sort types.IndexSegmentSortVariant) *_indexSettings { + + s.v.Sort = sort.IndexSegmentSortCaster() + + return s +} + +// The store module allows you to control how index data is stored and accessed +// on disk. +func (s *_indexSettings) Store(store types.StorageVariant) *_indexSettings { + + s.v.Store = store.StorageCaster() + + return s +} + +func (s *_indexSettings) TimeSeries(timeseries types.IndexSettingsTimeSeriesVariant) *_indexSettings { + + s.v.TimeSeries = timeseries.IndexSettingsTimeSeriesCaster() + + return s +} + +func (s *_indexSettings) TopMetricsMaxSize(topmetricsmaxsize int) *_indexSettings { + + s.v.TopMetricsMaxSize = &topmetricsmaxsize + + return s +} + +func (s *_indexSettings) Translog(translog types.TranslogVariant) *_indexSettings { + + s.v.Translog = translog.TranslogCaster() + + return s +} + +func (s *_indexSettings) Uuid(uuid string) *_indexSettings { + + s.v.Uuid = &uuid + + return s +} + +func (s *_indexSettings) VerifiedBeforeClose(verifiedbeforeclose string) *_indexSettings { + + s.v.VerifiedBeforeClose = verifiedbeforeclose + + return s +} + +func (s *_indexSettings) Version(version types.IndexVersioningVariant) *_indexSettings { + + s.v.Version = version.IndexVersioningCaster() + + return s +} + +func (s *_indexSettings) IndexSettingsCaster() *types.IndexSettings { + return s.v +} diff --git a/typedapi/esdsl/indexsettingsanalysis.go b/typedapi/esdsl/indexsettingsanalysis.go new file mode 100644 index 0000000000..5075948487 --- /dev/null +++ b/typedapi/esdsl/indexsettingsanalysis.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexSettingsAnalysis struct { + v *types.IndexSettingsAnalysis +} + +func NewIndexSettingsAnalysis() *_indexSettingsAnalysis { + + return &_indexSettingsAnalysis{v: types.NewIndexSettingsAnalysis()} + +} + +func (s *_indexSettingsAnalysis) Analyzer(analyzer map[string]types.Analyzer) *_indexSettingsAnalysis { + + s.v.Analyzer = analyzer + return s +} + +func (s *_indexSettingsAnalysis) AddAnalyzer(key string, value types.AnalyzerVariant) *_indexSettingsAnalysis { + + var tmp map[string]types.Analyzer + if s.v.Analyzer == nil { + s.v.Analyzer = make(map[string]types.Analyzer) + } else { + tmp = s.v.Analyzer + } + + tmp[key] = *value.AnalyzerCaster() + + s.v.Analyzer = tmp + return s +} + +func (s *_indexSettingsAnalysis) CharFilter(charfilter map[string]types.CharFilter) *_indexSettingsAnalysis { + + s.v.CharFilter = charfilter + return s +} + +func (s *_indexSettingsAnalysis) AddCharFilter(key string, value types.CharFilterVariant) *_indexSettingsAnalysis { + + var tmp map[string]types.CharFilter + if s.v.CharFilter == nil { + s.v.CharFilter = make(map[string]types.CharFilter) + } else { + tmp = s.v.CharFilter + } + + tmp[key] = *value.CharFilterCaster() + + s.v.CharFilter = tmp + return s +} + +func (s *_indexSettingsAnalysis) Filter(filter map[string]types.TokenFilter) *_indexSettingsAnalysis { + + s.v.Filter = filter + return s +} + +func (s *_indexSettingsAnalysis) AddFilter(key string, value types.TokenFilterVariant) *_indexSettingsAnalysis { + + var tmp map[string]types.TokenFilter + if s.v.Filter == nil { + s.v.Filter = make(map[string]types.TokenFilter) + } else { + tmp = s.v.Filter + } + + tmp[key] = *value.TokenFilterCaster() + + s.v.Filter = tmp + return s +} + +func (s *_indexSettingsAnalysis) Normalizer(normalizer map[string]types.Normalizer) *_indexSettingsAnalysis { + + s.v.Normalizer = normalizer + return s +} + +func (s *_indexSettingsAnalysis) AddNormalizer(key string, value types.NormalizerVariant) *_indexSettingsAnalysis { + + var tmp map[string]types.Normalizer + if s.v.Normalizer == nil { + s.v.Normalizer = make(map[string]types.Normalizer) + } else { + tmp = s.v.Normalizer + } + + tmp[key] = *value.NormalizerCaster() + + s.v.Normalizer = tmp + return s +} + +func (s *_indexSettingsAnalysis) Tokenizer(tokenizer map[string]types.Tokenizer) *_indexSettingsAnalysis { + + s.v.Tokenizer = tokenizer + return s +} + +func (s *_indexSettingsAnalysis) AddTokenizer(key string, value types.TokenizerVariant) *_indexSettingsAnalysis { + + var tmp map[string]types.Tokenizer + if s.v.Tokenizer == nil { + s.v.Tokenizer = make(map[string]types.Tokenizer) + } else { + tmp = s.v.Tokenizer + } + + tmp[key] = *value.TokenizerCaster() + + s.v.Tokenizer = tmp + return s +} + +func (s *_indexSettingsAnalysis) IndexSettingsAnalysisCaster() *types.IndexSettingsAnalysis { + return s.v +} diff --git a/typedapi/esdsl/indexsettingslifecycle.go b/typedapi/esdsl/indexsettingslifecycle.go new file mode 100644 index 0000000000..1d28d23294 --- /dev/null +++ b/typedapi/esdsl/indexsettingslifecycle.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexSettingsLifecycle struct { + v *types.IndexSettingsLifecycle +} + +func NewIndexSettingsLifecycle() *_indexSettingsLifecycle { + + return &_indexSettingsLifecycle{v: types.NewIndexSettingsLifecycle()} + +} + +// Indicates whether or not the index has been rolled over. Automatically set to +// true when ILM completes the rollover action. +// You can explicitly set it to skip rollover. +func (s *_indexSettingsLifecycle) IndexingComplete(stringifiedboolean types.StringifiedbooleanVariant) *_indexSettingsLifecycle { + + s.v.IndexingComplete = *stringifiedboolean.StringifiedbooleanCaster() + + return s +} + +// The name of the policy to use to manage the index. For information about how +// Elasticsearch applies policy changes, see Policy updates. +func (s *_indexSettingsLifecycle) Name(name string) *_indexSettingsLifecycle { + + s.v.Name = &name + + return s +} + +// If specified, this is the timestamp used to calculate the index age for its +// phase transitions. Use this setting +// if you create a new index that contains old data and want to use the original +// creation date to calculate the index +// age. Specified as a Unix epoch value in milliseconds. +func (s *_indexSettingsLifecycle) OriginationDate(originationdate int64) *_indexSettingsLifecycle { + + s.v.OriginationDate = &originationdate + + return s +} + +// Set to true to parse the origination date from the index name. This +// origination date is used to calculate the index age +// for its phase transitions. The index name must match the pattern +// ^.*-{date_format}-\\d+, where the date_format is +// yyyy.MM.dd and the trailing digits are optional. An index that was rolled +// over would normally match the full format, +// for example logs-2016.10.31-000002). If the index name doesn’t match the +// pattern, index creation fails. +func (s *_indexSettingsLifecycle) ParseOriginationDate(parseoriginationdate bool) *_indexSettingsLifecycle { + + s.v.ParseOriginationDate = &parseoriginationdate + + return s +} + +// Preference for the system that manages a data stream backing index +// (preferring ILM when both ILM and DLM are +// applicable for an index). +func (s *_indexSettingsLifecycle) PreferIlm(preferilm string) *_indexSettingsLifecycle { + + s.v.PreferIlm = preferilm + + return s +} + +// The index alias to update when the index rolls over. Specify when using a +// policy that contains a rollover action. +// When the index rolls over, the alias is updated to reflect that the index is +// no longer the write index. For more +// information about rolling indices, see Rollover. +func (s *_indexSettingsLifecycle) RolloverAlias(rolloveralias string) *_indexSettingsLifecycle { + + s.v.RolloverAlias = &rolloveralias + + return s +} + +func (s *_indexSettingsLifecycle) Step(step types.IndexSettingsLifecycleStepVariant) *_indexSettingsLifecycle { + + s.v.Step = step.IndexSettingsLifecycleStepCaster() + + return s +} + +func (s *_indexSettingsLifecycle) IndexSettingsLifecycleCaster() *types.IndexSettingsLifecycle { + return s.v +} diff --git a/typedapi/esdsl/indexsettingslifecyclestep.go b/typedapi/esdsl/indexsettingslifecyclestep.go new file mode 100644 index 0000000000..c54b73ad3b --- /dev/null +++ b/typedapi/esdsl/indexsettingslifecyclestep.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexSettingsLifecycleStep struct { + v *types.IndexSettingsLifecycleStep +} + +func NewIndexSettingsLifecycleStep() *_indexSettingsLifecycleStep { + + return &_indexSettingsLifecycleStep{v: types.NewIndexSettingsLifecycleStep()} + +} + +// Time to wait for the cluster to resolve allocation issues during an ILM +// shrink action. Must be greater than 1h (1 hour). +// See Shard allocation for shrink. +func (s *_indexSettingsLifecycleStep) WaitTimeThreshold(duration types.DurationVariant) *_indexSettingsLifecycleStep { + + s.v.WaitTimeThreshold = *duration.DurationCaster() + + return s +} + +func (s *_indexSettingsLifecycleStep) IndexSettingsLifecycleStepCaster() *types.IndexSettingsLifecycleStep { + return s.v +} diff --git a/typedapi/esdsl/indexsettingstimeseries.go b/typedapi/esdsl/indexsettingstimeseries.go new file mode 100644 index 0000000000..4863d8cc8d --- /dev/null +++ b/typedapi/esdsl/indexsettingstimeseries.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexSettingsTimeSeries struct { + v *types.IndexSettingsTimeSeries +} + +func NewIndexSettingsTimeSeries() *_indexSettingsTimeSeries { + + return &_indexSettingsTimeSeries{v: types.NewIndexSettingsTimeSeries()} + +} + +func (s *_indexSettingsTimeSeries) EndTime(datetime types.DateTimeVariant) *_indexSettingsTimeSeries { + + s.v.EndTime = *datetime.DateTimeCaster() + + return s +} + +func (s *_indexSettingsTimeSeries) StartTime(datetime types.DateTimeVariant) *_indexSettingsTimeSeries { + + s.v.StartTime = *datetime.DateTimeCaster() + + return s +} + +func (s *_indexSettingsTimeSeries) IndexSettingsTimeSeriesCaster() *types.IndexSettingsTimeSeries { + return s.v +} diff --git a/typedapi/esdsl/indexstate.go b/typedapi/esdsl/indexstate.go new file mode 100644 index 0000000000..f2f33c568d --- /dev/null +++ b/typedapi/esdsl/indexstate.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexState struct { + v *types.IndexState +} + +func NewIndexState() *_indexState { + + return &_indexState{v: types.NewIndexState()} + +} + +func (s *_indexState) Aliases(aliases map[string]types.Alias) *_indexState { + + s.v.Aliases = aliases + return s +} + +func (s *_indexState) AddAlias(key string, value types.AliasVariant) *_indexState { + + var tmp map[string]types.Alias + if s.v.Aliases == nil { + s.v.Aliases = make(map[string]types.Alias) + } else { + tmp = s.v.Aliases + } + + tmp[key] = *value.AliasCaster() + + s.v.Aliases = tmp + return s +} + +func (s *_indexState) DataStream(datastreamname string) *_indexState { + + s.v.DataStream = &datastreamname + + return s +} + +// Default settings, included when the request's `include_default` is `true`. +func (s *_indexState) Defaults(defaults types.IndexSettingsVariant) *_indexState { + + s.v.Defaults = defaults.IndexSettingsCaster() + + return s +} + +// Data stream lifecycle applicable if this is a data stream. +func (s *_indexState) Lifecycle(lifecycle types.DataStreamLifecycleVariant) *_indexState { + + s.v.Lifecycle = lifecycle.DataStreamLifecycleCaster() + + return s +} + +func (s *_indexState) Mappings(mappings types.TypeMappingVariant) *_indexState { + + s.v.Mappings = mappings.TypeMappingCaster() + + return s +} + +func (s *_indexState) Settings(settings types.IndexSettingsVariant) *_indexState { + + s.v.Settings = settings.IndexSettingsCaster() + + return s +} + +func (s *_indexState) IndexStateCaster() *types.IndexState { + return s.v +} diff --git a/typedapi/esdsl/indextemplate.go b/typedapi/esdsl/indextemplate.go new file mode 100644 index 0000000000..a2541c10e4 --- /dev/null +++ b/typedapi/esdsl/indextemplate.go @@ -0,0 +1,136 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexTemplate struct { + v *types.IndexTemplate +} + +func NewIndexTemplate() *_indexTemplate { + + return &_indexTemplate{v: types.NewIndexTemplate()} + +} + +func (s *_indexTemplate) AllowAutoCreate(allowautocreate bool) *_indexTemplate { + + s.v.AllowAutoCreate = &allowautocreate + + return s +} + +// An ordered list of component template names. +// Component templates are merged in the order specified, meaning that the last +// component template specified has the highest precedence. +func (s *_indexTemplate) ComposedOf(composedofs ...string) *_indexTemplate { + + for _, v := range composedofs { + + s.v.ComposedOf = append(s.v.ComposedOf, v) + + } + return s +} + +// If this object is included, the template is used to create data streams and +// their backing indices. +// Supports an empty object. +// Data streams require a matching index template with a `data_stream` object. +func (s *_indexTemplate) DataStream(datastream types.IndexTemplateDataStreamConfigurationVariant) *_indexTemplate { + + s.v.DataStream = datastream.IndexTemplateDataStreamConfigurationCaster() + + return s +} + +// Marks this index template as deprecated. +// When creating or updating a non-deprecated index template that uses +// deprecated components, +// Elasticsearch will emit a deprecation warning. +func (s *_indexTemplate) Deprecated(deprecated bool) *_indexTemplate { + + s.v.Deprecated = &deprecated + + return s +} + +// A list of component template names that are allowed to be absent. +func (s *_indexTemplate) IgnoreMissingComponentTemplates(names ...string) *_indexTemplate { + + s.v.IgnoreMissingComponentTemplates = names + + return s +} + +// Name of the index template. +func (s *_indexTemplate) IndexPatterns(names ...string) *_indexTemplate { + + s.v.IndexPatterns = names + + return s +} + +// Optional user metadata about the index template. May have any contents. +// This map is not automatically generated by Elasticsearch. +func (s *_indexTemplate) Meta_(metadata types.MetadataVariant) *_indexTemplate { + + s.v.Meta_ = *metadata.MetadataCaster() + + return s +} + +// Priority to determine index template precedence when a new data stream or +// index is created. +// The index template with the highest priority is chosen. +// If no priority is specified the template is treated as though it is of +// priority 0 (lowest priority). +// This number is not automatically generated by Elasticsearch. +func (s *_indexTemplate) Priority(priority int64) *_indexTemplate { + + s.v.Priority = &priority + + return s +} + +// Template to be applied. +// It may optionally include an `aliases`, `mappings`, or `settings` +// configuration. +func (s *_indexTemplate) Template(template types.IndexTemplateSummaryVariant) *_indexTemplate { + + s.v.Template = template.IndexTemplateSummaryCaster() + + return s +} + +// Version number used to manage index templates externally. +// This number is not automatically generated by Elasticsearch. +func (s *_indexTemplate) Version(versionnumber int64) *_indexTemplate { + + s.v.Version = &versionnumber + + return s +} + +func (s *_indexTemplate) IndexTemplateCaster() *types.IndexTemplate { + return s.v +} diff --git a/typedapi/esdsl/indextemplatedatastreamconfiguration.go b/typedapi/esdsl/indextemplatedatastreamconfiguration.go new file mode 100644 index 0000000000..3dda6a5db5 --- /dev/null +++ b/typedapi/esdsl/indextemplatedatastreamconfiguration.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexTemplateDataStreamConfiguration struct { + v *types.IndexTemplateDataStreamConfiguration +} + +func NewIndexTemplateDataStreamConfiguration() *_indexTemplateDataStreamConfiguration { + + return &_indexTemplateDataStreamConfiguration{v: types.NewIndexTemplateDataStreamConfiguration()} + +} + +// If true, the data stream supports custom routing. +func (s *_indexTemplateDataStreamConfiguration) AllowCustomRouting(allowcustomrouting bool) *_indexTemplateDataStreamConfiguration { + + s.v.AllowCustomRouting = &allowcustomrouting + + return s +} + +// If true, the data stream is hidden. +func (s *_indexTemplateDataStreamConfiguration) Hidden(hidden bool) *_indexTemplateDataStreamConfiguration { + + s.v.Hidden = &hidden + + return s +} + +func (s *_indexTemplateDataStreamConfiguration) IndexTemplateDataStreamConfigurationCaster() *types.IndexTemplateDataStreamConfiguration { + return s.v +} diff --git a/typedapi/esdsl/indextemplatemapping.go b/typedapi/esdsl/indextemplatemapping.go new file mode 100644 index 0000000000..109f845382 --- /dev/null +++ b/typedapi/esdsl/indextemplatemapping.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexTemplateMapping struct { + v *types.IndexTemplateMapping +} + +func NewIndexTemplateMapping() *_indexTemplateMapping { + + return &_indexTemplateMapping{v: types.NewIndexTemplateMapping()} + +} + +// Aliases to add. +// If the index template includes a `data_stream` object, these are data stream +// aliases. +// Otherwise, these are index aliases. +// Data stream aliases ignore the `index_routing`, `routing`, and +// `search_routing` options. +func (s *_indexTemplateMapping) Aliases(aliases map[string]types.Alias) *_indexTemplateMapping { + + s.v.Aliases = aliases + return s +} + +func (s *_indexTemplateMapping) AddAlias(key string, value types.AliasVariant) *_indexTemplateMapping { + + var tmp map[string]types.Alias + if s.v.Aliases == nil { + s.v.Aliases = make(map[string]types.Alias) + } else { + tmp = s.v.Aliases + } + + tmp[key] = *value.AliasCaster() + + s.v.Aliases = tmp + return s +} + +func (s *_indexTemplateMapping) Lifecycle(lifecycle types.DataStreamLifecycleVariant) *_indexTemplateMapping { + + s.v.Lifecycle = lifecycle.DataStreamLifecycleCaster() + + return s +} + +// Mapping for fields in the index. +// If specified, this mapping can include field names, field data types, and +// mapping parameters. +func (s *_indexTemplateMapping) Mappings(mappings types.TypeMappingVariant) *_indexTemplateMapping { + + s.v.Mappings = mappings.TypeMappingCaster() + + return s +} + +// Configuration options for the index. +func (s *_indexTemplateMapping) Settings(settings types.IndexSettingsVariant) *_indexTemplateMapping { + + s.v.Settings = settings.IndexSettingsCaster() + + return s +} + +func (s *_indexTemplateMapping) IndexTemplateMappingCaster() *types.IndexTemplateMapping { + return s.v +} diff --git a/typedapi/esdsl/indextemplatesummary.go b/typedapi/esdsl/indextemplatesummary.go new file mode 100644 index 0000000000..5a22c34e46 --- /dev/null +++ b/typedapi/esdsl/indextemplatesummary.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexTemplateSummary struct { + v *types.IndexTemplateSummary +} + +func NewIndexTemplateSummary() *_indexTemplateSummary { + + return &_indexTemplateSummary{v: types.NewIndexTemplateSummary()} + +} + +// Aliases to add. +// If the index template includes a `data_stream` object, these are data stream +// aliases. +// Otherwise, these are index aliases. +// Data stream aliases ignore the `index_routing`, `routing`, and +// `search_routing` options. +func (s *_indexTemplateSummary) Aliases(aliases map[string]types.Alias) *_indexTemplateSummary { + + s.v.Aliases = aliases + return s +} + +func (s *_indexTemplateSummary) AddAlias(key string, value types.AliasVariant) *_indexTemplateSummary { + + var tmp map[string]types.Alias + if s.v.Aliases == nil { + s.v.Aliases = make(map[string]types.Alias) + } else { + tmp = s.v.Aliases + } + + tmp[key] = *value.AliasCaster() + + s.v.Aliases = tmp + return s +} + +func (s *_indexTemplateSummary) Lifecycle(lifecycle types.DataStreamLifecycleWithRolloverVariant) *_indexTemplateSummary { + + s.v.Lifecycle = lifecycle.DataStreamLifecycleWithRolloverCaster() + + return s +} + +// Mapping for fields in the index. +// If specified, this mapping can include field names, field data types, and +// mapping parameters. +func (s *_indexTemplateSummary) Mappings(mappings types.TypeMappingVariant) *_indexTemplateSummary { + + s.v.Mappings = mappings.TypeMappingCaster() + + return s +} + +// Configuration options for the index. +func (s *_indexTemplateSummary) Settings(settings types.IndexSettingsVariant) *_indexTemplateSummary { + + s.v.Settings = settings.IndexSettingsCaster() + + return s +} + +func (s *_indexTemplateSummary) IndexTemplateSummaryCaster() *types.IndexTemplateSummary { + return s.v +} diff --git a/typedapi/esdsl/indexversioning.go b/typedapi/esdsl/indexversioning.go new file mode 100644 index 0000000000..35e26340bf --- /dev/null +++ b/typedapi/esdsl/indexversioning.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indexVersioning struct { + v *types.IndexVersioning +} + +func NewIndexVersioning() *_indexVersioning { + + return &_indexVersioning{v: types.NewIndexVersioning()} + +} + +func (s *_indexVersioning) Created(versionstring string) *_indexVersioning { + + s.v.Created = &versionstring + + return s +} + +func (s *_indexVersioning) CreatedString(createdstring string) *_indexVersioning { + + s.v.CreatedString = &createdstring + + return s +} + +func (s *_indexVersioning) IndexVersioningCaster() *types.IndexVersioning { + return s.v +} diff --git a/typedapi/esdsl/indices.go b/typedapi/esdsl/indices.go new file mode 100644 index 0000000000..9498181eb7 --- /dev/null +++ b/typedapi/esdsl/indices.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _indices struct { + v types.Indices +} + +func NewIndices() *_indices { + return &_indices{v: []string{}} +} + +func (u *_indices) IndicesCaster() *types.Indices { + return &u.v +} diff --git a/typedapi/esdsl/indicesaction.go b/typedapi/esdsl/indicesaction.go new file mode 100644 index 0000000000..098d6a648b --- /dev/null +++ b/typedapi/esdsl/indicesaction.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _indicesAction struct { + v *types.IndicesAction +} + +func NewIndicesAction() *_indicesAction { + return &_indicesAction{v: types.NewIndicesAction()} +} + +// Adds a data stream or index to an alias. +// If the alias doesn’t exist, the `add` action creates it. +func (s *_indicesAction) Add(add types.AddActionVariant) *_indicesAction { + + s.v.Add = add.AddActionCaster() + + return s +} + +// AdditionalIndicesActionProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_indicesAction) AdditionalIndicesActionProperty(key string, value json.RawMessage) *_indicesAction { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalIndicesActionProperty = tmp + return s +} + +// Removes a data stream or index from an alias. +func (s *_indicesAction) Remove(remove types.RemoveActionVariant) *_indicesAction { + + s.v.Remove = remove.RemoveActionCaster() + + return s +} + +// Deletes an index. +// You cannot use this action on aliases or data streams. +func (s *_indicesAction) RemoveIndex(removeindex types.RemoveIndexActionVariant) *_indicesAction { + + s.v.RemoveIndex = removeindex.RemoveIndexActionCaster() + + return s +} + +func (s *_indicesAction) IndicesActionCaster() *types.IndicesAction { + return s.v +} diff --git a/typedapi/esdsl/indicesindexingpressure.go b/typedapi/esdsl/indicesindexingpressure.go new file mode 100644 index 0000000000..d827c847bb --- /dev/null +++ b/typedapi/esdsl/indicesindexingpressure.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indicesIndexingPressure struct { + v *types.IndicesIndexingPressure +} + +func NewIndicesIndexingPressure(memory types.IndicesIndexingPressureMemoryVariant) *_indicesIndexingPressure { + + tmp := &_indicesIndexingPressure{v: types.NewIndicesIndexingPressure()} + + tmp.Memory(memory) + + return tmp + +} + +func (s *_indicesIndexingPressure) Memory(memory types.IndicesIndexingPressureMemoryVariant) *_indicesIndexingPressure { + + s.v.Memory = *memory.IndicesIndexingPressureMemoryCaster() + + return s +} + +func (s *_indicesIndexingPressure) IndicesIndexingPressureCaster() *types.IndicesIndexingPressure { + return s.v +} diff --git a/typedapi/esdsl/indicesindexingpressurememory.go b/typedapi/esdsl/indicesindexingpressurememory.go new file mode 100644 index 0000000000..7e996c6428 --- /dev/null +++ b/typedapi/esdsl/indicesindexingpressurememory.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indicesIndexingPressureMemory struct { + v *types.IndicesIndexingPressureMemory +} + +func NewIndicesIndexingPressureMemory() *_indicesIndexingPressureMemory { + + return &_indicesIndexingPressureMemory{v: types.NewIndicesIndexingPressureMemory()} + +} + +// Number of outstanding bytes that may be consumed by indexing requests. When +// this limit is reached or exceeded, +// the node will reject new coordinating and primary operations. When replica +// operations consume 1.5x this limit, +// the node will reject new replica operations. Defaults to 10% of the heap. +func (s *_indicesIndexingPressureMemory) Limit(limit int) *_indicesIndexingPressureMemory { + + s.v.Limit = &limit + + return s +} + +func (s *_indicesIndexingPressureMemory) IndicesIndexingPressureMemoryCaster() *types.IndicesIndexingPressureMemory { + return s.v +} diff --git a/typedapi/esdsl/indicesmodifyaction.go b/typedapi/esdsl/indicesmodifyaction.go new file mode 100644 index 0000000000..a7f2e7043a --- /dev/null +++ b/typedapi/esdsl/indicesmodifyaction.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _indicesModifyAction struct { + v *types.IndicesModifyAction +} + +func NewIndicesModifyAction() *_indicesModifyAction { + return &_indicesModifyAction{v: types.NewIndicesModifyAction()} +} + +// Adds an existing index as a backing index for a data stream. +// The index is hidden as part of this operation. +// WARNING: Adding indices with the `add_backing_index` action can potentially +// result in improper data stream behavior. +// This should be considered an expert level API. +func (s *_indicesModifyAction) AddBackingIndex(addbackingindex types.IndexAndDataStreamActionVariant) *_indicesModifyAction { + + s.v.AddBackingIndex = addbackingindex.IndexAndDataStreamActionCaster() + + return s +} + +// AdditionalIndicesModifyActionProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_indicesModifyAction) AdditionalIndicesModifyActionProperty(key string, value json.RawMessage) *_indicesModifyAction { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalIndicesModifyActionProperty = tmp + return s +} + +// Removes a backing index from a data stream. +// The index is unhidden as part of this operation. +// A data stream’s write index cannot be removed. +func (s *_indicesModifyAction) RemoveBackingIndex(removebackingindex types.IndexAndDataStreamActionVariant) *_indicesModifyAction { + + s.v.RemoveBackingIndex = removebackingindex.IndexAndDataStreamActionCaster() + + return s +} + +func (s *_indicesModifyAction) IndicesModifyActionCaster() *types.IndicesModifyAction { + return s.v +} diff --git a/typedapi/esdsl/indicesoptions.go b/typedapi/esdsl/indicesoptions.go new file mode 100644 index 0000000000..4e30506fad --- /dev/null +++ b/typedapi/esdsl/indicesoptions.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" +) + +type _indicesOptions struct { + v *types.IndicesOptions +} + +func NewIndicesOptions() *_indicesOptions { + + return &_indicesOptions{v: types.NewIndicesOptions()} + +} + +// If false, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only +// missing or closed indices. This behavior applies even if the request targets +// other open indices. For example, +// a request targeting `foo*,bar*` returns an error if an index starts with +// `foo` but no index starts with `bar`. +func (s *_indicesOptions) AllowNoIndices(allownoindices bool) *_indicesOptions { + + s.v.AllowNoIndices = &allownoindices + + return s +} + +// Type of index that wildcard patterns can match. If the request can target +// data streams, this argument +// determines whether wildcard expressions match hidden data streams. Supports +// comma-separated values, +// such as `open,hidden`. +func (s *_indicesOptions) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *_indicesOptions { + + s.v.ExpandWildcards = expandwildcards + + return s +} + +// If true, concrete, expanded or aliased indices are ignored when frozen. +func (s *_indicesOptions) IgnoreThrottled(ignorethrottled bool) *_indicesOptions { + + s.v.IgnoreThrottled = &ignorethrottled + + return s +} + +// If true, missing or closed indices are not included in the response. +func (s *_indicesOptions) IgnoreUnavailable(ignoreunavailable bool) *_indicesOptions { + + s.v.IgnoreUnavailable = &ignoreunavailable + + return s +} + +func (s *_indicesOptions) IndicesOptionsCaster() *types.IndicesOptions { + return s.v +} diff --git a/typedapi/esdsl/indicesprivileges.go b/typedapi/esdsl/indicesprivileges.go new file mode 100644 index 0000000000..39a6e3c407 --- /dev/null +++ b/typedapi/esdsl/indicesprivileges.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexprivilege" +) + +type _indicesPrivileges struct { + v *types.IndicesPrivileges +} + +func NewIndicesPrivileges() *_indicesPrivileges { + + return &_indicesPrivileges{v: types.NewIndicesPrivileges()} + +} + +// Set to `true` if using wildcard or regular expressions for patterns that +// cover restricted indices. Implicitly, restricted indices have limited +// privileges that can cause pattern tests to fail. If restricted indices are +// explicitly included in the `names` list, Elasticsearch checks privileges +// against these indices regardless of the value set for +// `allow_restricted_indices`. +func (s *_indicesPrivileges) AllowRestrictedIndices(allowrestrictedindices bool) *_indicesPrivileges { + + s.v.AllowRestrictedIndices = &allowrestrictedindices + + return s +} + +// The document fields that the owners of the role have read access to. +func (s *_indicesPrivileges) FieldSecurity(fieldsecurity types.FieldSecurityVariant) *_indicesPrivileges { + + s.v.FieldSecurity = fieldsecurity.FieldSecurityCaster() + + return s +} + +// A list of indices (or index name patterns) to which the permissions in this +// entry apply. +func (s *_indicesPrivileges) Names(names ...string) *_indicesPrivileges { + + s.v.Names = make([]string, len(names)) + s.v.Names = names + + return s +} + +// The index level privileges that owners of the role have on the specified +// indices. +func (s *_indicesPrivileges) Privileges(privileges ...indexprivilege.IndexPrivilege) *_indicesPrivileges { + + for _, v := range privileges { + + s.v.Privileges = append(s.v.Privileges, v) + + } + return s +} + +// A search query that defines the documents the owners of the role have access +// to. A document within the specified indices must match this query for it to +// be accessible by the owners of the role. +func (s *_indicesPrivileges) Query(indicesprivilegesquery types.IndicesPrivilegesQueryVariant) *_indicesPrivileges { + + s.v.Query = *indicesprivilegesquery.IndicesPrivilegesQueryCaster() + + return s +} + +func (s *_indicesPrivileges) IndicesPrivilegesCaster() *types.IndicesPrivileges { + return s.v +} diff --git a/typedapi/esdsl/indicesprivilegesquery.go b/typedapi/esdsl/indicesprivilegesquery.go new file mode 100644 index 0000000000..29ebd1d600 --- /dev/null +++ b/typedapi/esdsl/indicesprivilegesquery.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _indicesPrivilegesQuery struct { + v types.IndicesPrivilegesQuery +} + +func NewIndicesPrivilegesQuery() *_indicesPrivilegesQuery { + return &_indicesPrivilegesQuery{v: nil} +} + +func (u *_indicesPrivilegesQuery) String(string string) *_indicesPrivilegesQuery { + + u.v = &string + + return u +} + +func (u *_indicesPrivilegesQuery) Query(query types.QueryVariant) *_indicesPrivilegesQuery { + + u.v = &query + + return u +} + +// Interface implementation for Query in IndicesPrivilegesQuery union +func (u *_query) IndicesPrivilegesQueryCaster() *types.IndicesPrivilegesQuery { + t := types.IndicesPrivilegesQuery(u.v) + return &t +} + +func (u *_indicesPrivilegesQuery) RoleTemplateQuery(roletemplatequery types.RoleTemplateQueryVariant) *_indicesPrivilegesQuery { + + u.v = &roletemplatequery + + return u +} + +// Interface implementation for RoleTemplateQuery in IndicesPrivilegesQuery union +func (u *_roleTemplateQuery) IndicesPrivilegesQueryCaster() *types.IndicesPrivilegesQuery { + t := types.IndicesPrivilegesQuery(u.v) + return &t +} + +func (u *_indicesPrivilegesQuery) IndicesPrivilegesQueryCaster() *types.IndicesPrivilegesQuery { + return &u.v +} diff --git a/typedapi/esdsl/indonesiananalyzer.go b/typedapi/esdsl/indonesiananalyzer.go new file mode 100644 index 0000000000..e18d696a62 --- /dev/null +++ b/typedapi/esdsl/indonesiananalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _indonesianAnalyzer struct { + v *types.IndonesianAnalyzer +} + +func NewIndonesianAnalyzer() *_indonesianAnalyzer { + + return &_indonesianAnalyzer{v: types.NewIndonesianAnalyzer()} + +} + +func (s *_indonesianAnalyzer) StemExclusion(stemexclusions ...string) *_indonesianAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_indonesianAnalyzer) Stopwords(stopwords ...string) *_indonesianAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_indonesianAnalyzer) StopwordsPath(stopwordspath string) *_indonesianAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_indonesianAnalyzer) IndonesianAnalyzerCaster() *types.IndonesianAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/inferenceaggregation.go b/typedapi/esdsl/inferenceaggregation.go new file mode 100644 index 0000000000..c73274d393 --- /dev/null +++ b/typedapi/esdsl/inferenceaggregation.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _inferenceAggregation struct { + v *types.InferenceAggregation +} + +// A parent pipeline aggregation which loads a pre-trained model and performs +// inference on the collated result fields from the parent bucket aggregation. +func NewInferenceAggregation() *_inferenceAggregation { + + return &_inferenceAggregation{v: types.NewInferenceAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_inferenceAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_inferenceAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_inferenceAggregation) Format(format string) *_inferenceAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_inferenceAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_inferenceAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +// Contains the inference type and its options. +func (s *_inferenceAggregation) InferenceConfig(inferenceconfig types.InferenceConfigContainerVariant) *_inferenceAggregation { + + s.v.InferenceConfig = inferenceconfig.InferenceConfigContainerCaster() + + return s +} + +// The ID or alias for the trained model. +func (s *_inferenceAggregation) ModelId(name string) *_inferenceAggregation { + + s.v.ModelId = name + + return s +} + +func (s *_inferenceAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Inference = s.v + + return container +} + +func (s *_inferenceAggregation) InferenceAggregationCaster() *types.InferenceAggregation { + return s.v +} diff --git a/typedapi/esdsl/inferencechunkingsettings.go b/typedapi/esdsl/inferencechunkingsettings.go new file mode 100644 index 0000000000..ce23e75525 --- /dev/null +++ b/typedapi/esdsl/inferencechunkingsettings.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _inferenceChunkingSettings struct { + v *types.InferenceChunkingSettings +} + +func NewInferenceChunkingSettings(service string) *_inferenceChunkingSettings { + + tmp := &_inferenceChunkingSettings{v: types.NewInferenceChunkingSettings()} + + tmp.Service(service) + + return tmp + +} + +// Chunking configuration object +func (s *_inferenceChunkingSettings) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *_inferenceChunkingSettings { + + s.v.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return s +} + +// The maximum size of a chunk in words. +// This value cannot be higher than `300` or lower than `20` (for `sentence` +// strategy) or `10` (for `word` strategy). +func (s *_inferenceChunkingSettings) MaxChunkSize(maxchunksize int) *_inferenceChunkingSettings { + + s.v.MaxChunkSize = &maxchunksize + + return s +} + +// The number of overlapping words for chunks. +// It is applicable only to a `word` chunking strategy. +// This value cannot be higher than half the `max_chunk_size` value. +func (s *_inferenceChunkingSettings) Overlap(overlap int) *_inferenceChunkingSettings { + + s.v.Overlap = &overlap + + return s +} + +// The number of overlapping sentences for chunks. +// It is applicable only for a `sentence` chunking strategy. +// It can be either `1` or `0`. +func (s *_inferenceChunkingSettings) SentenceOverlap(sentenceoverlap int) *_inferenceChunkingSettings { + + s.v.SentenceOverlap = &sentenceoverlap + + return s +} + +// The service type +func (s *_inferenceChunkingSettings) Service(service string) *_inferenceChunkingSettings { + + s.v.Service = service + + return s +} + +// Settings specific to the service +func (s *_inferenceChunkingSettings) ServiceSettings(servicesettings json.RawMessage) *_inferenceChunkingSettings { + + s.v.ServiceSettings = servicesettings + + return s +} + +// The chunking strategy: `sentence` or `word`. +func (s *_inferenceChunkingSettings) Strategy(strategy string) *_inferenceChunkingSettings { + + s.v.Strategy = &strategy + + return s +} + +// Task settings specific to the service and task type +func (s *_inferenceChunkingSettings) TaskSettings(tasksettings json.RawMessage) *_inferenceChunkingSettings { + + s.v.TaskSettings = tasksettings + + return s +} + +func (s *_inferenceChunkingSettings) InferenceChunkingSettingsCaster() *types.InferenceChunkingSettings { + return s.v +} diff --git a/typedapi/esdsl/inferenceconfig.go b/typedapi/esdsl/inferenceconfig.go new file mode 100644 index 0000000000..5f9ac13a1e --- /dev/null +++ b/typedapi/esdsl/inferenceconfig.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _inferenceConfig struct { + v *types.InferenceConfig +} + +func NewInferenceConfig() *_inferenceConfig { + return &_inferenceConfig{v: types.NewInferenceConfig()} +} + +// AdditionalInferenceConfigProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_inferenceConfig) AdditionalInferenceConfigProperty(key string, value json.RawMessage) *_inferenceConfig { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalInferenceConfigProperty = tmp + return s +} + +// Classification configuration for inference. +func (s *_inferenceConfig) Classification(classification types.InferenceConfigClassificationVariant) *_inferenceConfig { + + s.v.Classification = classification.InferenceConfigClassificationCaster() + + return s +} + +// Regression configuration for inference. +func (s *_inferenceConfig) Regression(regression types.InferenceConfigRegressionVariant) *_inferenceConfig { + + s.v.Regression = regression.InferenceConfigRegressionCaster() + + return s +} + +func (s *_inferenceConfig) InferenceConfigCaster() *types.InferenceConfig { + return s.v +} diff --git a/typedapi/esdsl/inferenceconfigclassification.go b/typedapi/esdsl/inferenceconfigclassification.go new file mode 100644 index 0000000000..9ff4ba20e5 --- /dev/null +++ b/typedapi/esdsl/inferenceconfigclassification.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _inferenceConfigClassification struct { + v *types.InferenceConfigClassification +} + +// Classification configuration for inference. +func NewInferenceConfigClassification() *_inferenceConfigClassification { + + return &_inferenceConfigClassification{v: types.NewInferenceConfigClassification()} + +} + +// Specifies the number of top class predictions to return. +func (s *_inferenceConfigClassification) NumTopClasses(numtopclasses int) *_inferenceConfigClassification { + + s.v.NumTopClasses = &numtopclasses + + return s +} + +// Specifies the maximum number of feature importance values per document. +func (s *_inferenceConfigClassification) NumTopFeatureImportanceValues(numtopfeatureimportancevalues int) *_inferenceConfigClassification { + + s.v.NumTopFeatureImportanceValues = &numtopfeatureimportancevalues + + return s +} + +// Specifies the type of the predicted field to write. +// Valid values are: `string`, `number`, `boolean`. +func (s *_inferenceConfigClassification) PredictionFieldType(predictionfieldtype string) *_inferenceConfigClassification { + + s.v.PredictionFieldType = &predictionfieldtype + + return s +} + +// The field that is added to incoming documents to contain the inference +// prediction. +func (s *_inferenceConfigClassification) ResultsField(field string) *_inferenceConfigClassification { + + s.v.ResultsField = &field + + return s +} + +// Specifies the field to which the top classes are written. +func (s *_inferenceConfigClassification) TopClassesResultsField(field string) *_inferenceConfigClassification { + + s.v.TopClassesResultsField = &field + + return s +} + +func (s *_inferenceConfigClassification) InferenceConfigCaster() *types.InferenceConfig { + container := types.NewInferenceConfig() + + container.Classification = s.v + + return container +} + +func (s *_inferenceConfigClassification) InferenceConfigClassificationCaster() *types.InferenceConfigClassification { + return s.v +} diff --git a/typedapi/esdsl/inferenceconfigcontainer.go b/typedapi/esdsl/inferenceconfigcontainer.go new file mode 100644 index 0000000000..08874b27b8 --- /dev/null +++ b/typedapi/esdsl/inferenceconfigcontainer.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _inferenceConfigContainer struct { + v *types.InferenceConfigContainer +} + +func NewInferenceConfigContainer() *_inferenceConfigContainer { + return &_inferenceConfigContainer{v: types.NewInferenceConfigContainer()} +} + +// AdditionalInferenceConfigContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_inferenceConfigContainer) AdditionalInferenceConfigContainerProperty(key string, value json.RawMessage) *_inferenceConfigContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalInferenceConfigContainerProperty = tmp + return s +} + +// Classification configuration for inference. +func (s *_inferenceConfigContainer) Classification(classification types.ClassificationInferenceOptionsVariant) *_inferenceConfigContainer { + + s.v.Classification = classification.ClassificationInferenceOptionsCaster() + + return s +} + +// Regression configuration for inference. +func (s *_inferenceConfigContainer) Regression(regression types.RegressionInferenceOptionsVariant) *_inferenceConfigContainer { + + s.v.Regression = regression.RegressionInferenceOptionsCaster() + + return s +} + +func (s *_inferenceConfigContainer) InferenceConfigContainerCaster() *types.InferenceConfigContainer { + return s.v +} diff --git a/typedapi/esdsl/inferenceconfigcreatecontainer.go b/typedapi/esdsl/inferenceconfigcreatecontainer.go new file mode 100644 index 0000000000..9c5c67cabe --- /dev/null +++ b/typedapi/esdsl/inferenceconfigcreatecontainer.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _inferenceConfigCreateContainer struct { + v *types.InferenceConfigCreateContainer +} + +func NewInferenceConfigCreateContainer() *_inferenceConfigCreateContainer { + return &_inferenceConfigCreateContainer{v: types.NewInferenceConfigCreateContainer()} +} + +// AdditionalInferenceConfigCreateContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_inferenceConfigCreateContainer) AdditionalInferenceConfigCreateContainerProperty(key string, value json.RawMessage) *_inferenceConfigCreateContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalInferenceConfigCreateContainerProperty = tmp + return s +} + +// Classification configuration for inference. +func (s *_inferenceConfigCreateContainer) Classification(classification types.ClassificationInferenceOptionsVariant) *_inferenceConfigCreateContainer { + + s.v.Classification = classification.ClassificationInferenceOptionsCaster() + + return s +} + +// Fill mask configuration for inference. +func (s *_inferenceConfigCreateContainer) FillMask(fillmask types.FillMaskInferenceOptionsVariant) *_inferenceConfigCreateContainer { + + s.v.FillMask = fillmask.FillMaskInferenceOptionsCaster() + + return s +} + +// Named entity recognition configuration for inference. +func (s *_inferenceConfigCreateContainer) Ner(ner types.NerInferenceOptionsVariant) *_inferenceConfigCreateContainer { + + s.v.Ner = ner.NerInferenceOptionsCaster() + + return s +} + +// Pass through configuration for inference. +func (s *_inferenceConfigCreateContainer) PassThrough(passthrough types.PassThroughInferenceOptionsVariant) *_inferenceConfigCreateContainer { + + s.v.PassThrough = passthrough.PassThroughInferenceOptionsCaster() + + return s +} + +// Question answering configuration for inference. +func (s *_inferenceConfigCreateContainer) QuestionAnswering(questionanswering types.QuestionAnsweringInferenceOptionsVariant) *_inferenceConfigCreateContainer { + + s.v.QuestionAnswering = questionanswering.QuestionAnsweringInferenceOptionsCaster() + + return s +} + +// Regression configuration for inference. +func (s *_inferenceConfigCreateContainer) Regression(regression types.RegressionInferenceOptionsVariant) *_inferenceConfigCreateContainer { + + s.v.Regression = regression.RegressionInferenceOptionsCaster() + + return s +} + +// Text classification configuration for inference. +func (s *_inferenceConfigCreateContainer) TextClassification(textclassification types.TextClassificationInferenceOptionsVariant) *_inferenceConfigCreateContainer { + + s.v.TextClassification = textclassification.TextClassificationInferenceOptionsCaster() + + return s +} + +// Text embedding configuration for inference. +func (s *_inferenceConfigCreateContainer) TextEmbedding(textembedding types.TextEmbeddingInferenceOptionsVariant) *_inferenceConfigCreateContainer { + + s.v.TextEmbedding = textembedding.TextEmbeddingInferenceOptionsCaster() + + return s +} + +// Text expansion configuration for inference. +func (s *_inferenceConfigCreateContainer) TextExpansion(textexpansion types.TextExpansionInferenceOptionsVariant) *_inferenceConfigCreateContainer { + + s.v.TextExpansion = textexpansion.TextExpansionInferenceOptionsCaster() + + return s +} + +// Zeroshot classification configuration for inference. +func (s *_inferenceConfigCreateContainer) ZeroShotClassification(zeroshotclassification types.ZeroShotClassificationInferenceOptionsVariant) *_inferenceConfigCreateContainer { + + s.v.ZeroShotClassification = zeroshotclassification.ZeroShotClassificationInferenceOptionsCaster() + + return s +} + +func (s *_inferenceConfigCreateContainer) InferenceConfigCreateContainerCaster() *types.InferenceConfigCreateContainer { + return s.v +} diff --git a/typedapi/esdsl/inferenceconfigregression.go b/typedapi/esdsl/inferenceconfigregression.go new file mode 100644 index 0000000000..39154a7f6c --- /dev/null +++ b/typedapi/esdsl/inferenceconfigregression.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _inferenceConfigRegression struct { + v *types.InferenceConfigRegression +} + +// Regression configuration for inference. +func NewInferenceConfigRegression() *_inferenceConfigRegression { + + return &_inferenceConfigRegression{v: types.NewInferenceConfigRegression()} + +} + +// Specifies the maximum number of feature importance values per document. +func (s *_inferenceConfigRegression) NumTopFeatureImportanceValues(numtopfeatureimportancevalues int) *_inferenceConfigRegression { + + s.v.NumTopFeatureImportanceValues = &numtopfeatureimportancevalues + + return s +} + +// The field that is added to incoming documents to contain the inference +// prediction. +func (s *_inferenceConfigRegression) ResultsField(field string) *_inferenceConfigRegression { + + s.v.ResultsField = &field + + return s +} + +func (s *_inferenceConfigRegression) InferenceConfigCaster() *types.InferenceConfig { + container := types.NewInferenceConfig() + + container.Regression = s.v + + return container +} + +func (s *_inferenceConfigRegression) InferenceConfigRegressionCaster() *types.InferenceConfigRegression { + return s.v +} diff --git a/typedapi/esdsl/inferenceconfigupdatecontainer.go b/typedapi/esdsl/inferenceconfigupdatecontainer.go new file mode 100644 index 0000000000..e1c2e7bfef --- /dev/null +++ b/typedapi/esdsl/inferenceconfigupdatecontainer.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _inferenceConfigUpdateContainer struct { + v *types.InferenceConfigUpdateContainer +} + +func NewInferenceConfigUpdateContainer() *_inferenceConfigUpdateContainer { + return &_inferenceConfigUpdateContainer{v: types.NewInferenceConfigUpdateContainer()} +} + +// AdditionalInferenceConfigUpdateContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_inferenceConfigUpdateContainer) AdditionalInferenceConfigUpdateContainerProperty(key string, value json.RawMessage) *_inferenceConfigUpdateContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalInferenceConfigUpdateContainerProperty = tmp + return s +} + +// Classification configuration for inference. +func (s *_inferenceConfigUpdateContainer) Classification(classification types.ClassificationInferenceOptionsVariant) *_inferenceConfigUpdateContainer { + + s.v.Classification = classification.ClassificationInferenceOptionsCaster() + + return s +} + +// Fill mask configuration for inference. +func (s *_inferenceConfigUpdateContainer) FillMask(fillmask types.FillMaskInferenceUpdateOptionsVariant) *_inferenceConfigUpdateContainer { + + s.v.FillMask = fillmask.FillMaskInferenceUpdateOptionsCaster() + + return s +} + +// Named entity recognition configuration for inference. +func (s *_inferenceConfigUpdateContainer) Ner(ner types.NerInferenceUpdateOptionsVariant) *_inferenceConfigUpdateContainer { + + s.v.Ner = ner.NerInferenceUpdateOptionsCaster() + + return s +} + +// Pass through configuration for inference. +func (s *_inferenceConfigUpdateContainer) PassThrough(passthrough types.PassThroughInferenceUpdateOptionsVariant) *_inferenceConfigUpdateContainer { + + s.v.PassThrough = passthrough.PassThroughInferenceUpdateOptionsCaster() + + return s +} + +// Question answering configuration for inference +func (s *_inferenceConfigUpdateContainer) QuestionAnswering(questionanswering types.QuestionAnsweringInferenceUpdateOptionsVariant) *_inferenceConfigUpdateContainer { + + s.v.QuestionAnswering = questionanswering.QuestionAnsweringInferenceUpdateOptionsCaster() + + return s +} + +// Regression configuration for inference. +func (s *_inferenceConfigUpdateContainer) Regression(regression types.RegressionInferenceOptionsVariant) *_inferenceConfigUpdateContainer { + + s.v.Regression = regression.RegressionInferenceOptionsCaster() + + return s +} + +// Text classification configuration for inference. +func (s *_inferenceConfigUpdateContainer) TextClassification(textclassification types.TextClassificationInferenceUpdateOptionsVariant) *_inferenceConfigUpdateContainer { + + s.v.TextClassification = textclassification.TextClassificationInferenceUpdateOptionsCaster() + + return s +} + +// Text embedding configuration for inference. +func (s *_inferenceConfigUpdateContainer) TextEmbedding(textembedding types.TextEmbeddingInferenceUpdateOptionsVariant) *_inferenceConfigUpdateContainer { + + s.v.TextEmbedding = textembedding.TextEmbeddingInferenceUpdateOptionsCaster() + + return s +} + +// Text expansion configuration for inference. +func (s *_inferenceConfigUpdateContainer) TextExpansion(textexpansion types.TextExpansionInferenceUpdateOptionsVariant) *_inferenceConfigUpdateContainer { + + s.v.TextExpansion = textexpansion.TextExpansionInferenceUpdateOptionsCaster() + + return s +} + +// Zeroshot classification configuration for inference. +func (s *_inferenceConfigUpdateContainer) ZeroShotClassification(zeroshotclassification types.ZeroShotClassificationInferenceUpdateOptionsVariant) *_inferenceConfigUpdateContainer { + + s.v.ZeroShotClassification = zeroshotclassification.ZeroShotClassificationInferenceUpdateOptionsCaster() + + return s +} + +func (s *_inferenceConfigUpdateContainer) InferenceConfigUpdateContainerCaster() *types.InferenceConfigUpdateContainer { + return s.v +} diff --git a/typedapi/esdsl/inferenceendpoint.go b/typedapi/esdsl/inferenceendpoint.go new file mode 100644 index 0000000000..23e71b5d0b --- /dev/null +++ b/typedapi/esdsl/inferenceendpoint.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _inferenceEndpoint struct { + v *types.InferenceEndpoint +} + +func NewInferenceEndpoint(service string) *_inferenceEndpoint { + + tmp := &_inferenceEndpoint{v: types.NewInferenceEndpoint()} + + tmp.Service(service) + + return tmp + +} + +// Chunking configuration object +func (s *_inferenceEndpoint) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *_inferenceEndpoint { + + s.v.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return s +} + +// The service type +func (s *_inferenceEndpoint) Service(service string) *_inferenceEndpoint { + + s.v.Service = service + + return s +} + +// Settings specific to the service +func (s *_inferenceEndpoint) ServiceSettings(servicesettings json.RawMessage) *_inferenceEndpoint { + + s.v.ServiceSettings = servicesettings + + return s +} + +// Task settings specific to the service and task type +func (s *_inferenceEndpoint) TaskSettings(tasksettings json.RawMessage) *_inferenceEndpoint { + + s.v.TaskSettings = tasksettings + + return s +} + +func (s *_inferenceEndpoint) InferenceEndpointCaster() *types.InferenceEndpoint { + return s.v +} diff --git a/typedapi/esdsl/inferenceprocessor.go b/typedapi/esdsl/inferenceprocessor.go new file mode 100644 index 0000000000..522be64594 --- /dev/null +++ b/typedapi/esdsl/inferenceprocessor.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _inferenceProcessor struct { + v *types.InferenceProcessor +} + +// Uses a pre-trained data frame analytics model or a model deployed for natural +// language processing tasks to infer against the data that is being ingested in +// the pipeline. +func NewInferenceProcessor() *_inferenceProcessor { + + return &_inferenceProcessor{v: types.NewInferenceProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_inferenceProcessor) Description(description string) *_inferenceProcessor { + + s.v.Description = &description + + return s +} + +// Maps the document field names to the known field names of the model. +// This mapping takes precedence over any default mappings provided in the model +// configuration. +func (s *_inferenceProcessor) FieldMap(fieldmap map[string]json.RawMessage) *_inferenceProcessor { + + s.v.FieldMap = fieldmap + return s +} + +func (s *_inferenceProcessor) AddFieldMap(key string, value json.RawMessage) *_inferenceProcessor { + + var tmp map[string]json.RawMessage + if s.v.FieldMap == nil { + s.v.FieldMap = make(map[string]json.RawMessage) + } else { + tmp = s.v.FieldMap + } + + tmp[key] = value + + s.v.FieldMap = tmp + return s +} + +// Conditionally execute the processor. +func (s *_inferenceProcessor) If(if_ types.ScriptVariant) *_inferenceProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_inferenceProcessor) IgnoreFailure(ignorefailure bool) *_inferenceProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// Contains the inference type and its options. +func (s *_inferenceProcessor) InferenceConfig(inferenceconfig types.InferenceConfigVariant) *_inferenceProcessor { + + s.v.InferenceConfig = inferenceconfig.InferenceConfigCaster() + + return s +} + +// The ID or alias for the trained model, or the ID of the deployment. +func (s *_inferenceProcessor) ModelId(id string) *_inferenceProcessor { + + s.v.ModelId = id + + return s +} + +// Handle failures for the processor. +func (s *_inferenceProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_inferenceProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_inferenceProcessor) Tag(tag string) *_inferenceProcessor { + + s.v.Tag = &tag + + return s +} + +// Field added to incoming documents to contain results objects. +func (s *_inferenceProcessor) TargetField(field string) *_inferenceProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_inferenceProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Inference = s.v + + return container +} + +func (s *_inferenceProcessor) InferenceProcessorCaster() *types.InferenceProcessor { + return s.v +} diff --git a/typedapi/esdsl/ingestpipeline.go b/typedapi/esdsl/ingestpipeline.go new file mode 100644 index 0000000000..94701fdda8 --- /dev/null +++ b/typedapi/esdsl/ingestpipeline.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _ingestPipeline struct { + v *types.IngestPipeline +} + +func NewIngestPipeline() *_ingestPipeline { + + return &_ingestPipeline{v: types.NewIngestPipeline()} + +} + +// Marks this ingest pipeline as deprecated. +// When a deprecated ingest pipeline is referenced as the default or final +// pipeline when creating or updating a non-deprecated index template, +// Elasticsearch will emit a deprecation warning. +func (s *_ingestPipeline) Deprecated(deprecated bool) *_ingestPipeline { + + s.v.Deprecated = &deprecated + + return s +} + +// Description of the ingest pipeline. +func (s *_ingestPipeline) Description(description string) *_ingestPipeline { + + s.v.Description = &description + + return s +} + +// Arbitrary metadata about the ingest pipeline. This map is not automatically +// generated by Elasticsearch. +func (s *_ingestPipeline) Meta_(metadata types.MetadataVariant) *_ingestPipeline { + + s.v.Meta_ = *metadata.MetadataCaster() + + return s +} + +// Processors to run immediately after a processor failure. +func (s *_ingestPipeline) OnFailure(onfailures ...types.ProcessorContainerVariant) *_ingestPipeline { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Processors used to perform transformations on documents before indexing. +// Processors run sequentially in the order specified. +func (s *_ingestPipeline) Processors(processors ...types.ProcessorContainerVariant) *_ingestPipeline { + + for _, v := range processors { + + s.v.Processors = append(s.v.Processors, *v.ProcessorContainerCaster()) + + } + return s +} + +// Version number used by external systems to track ingest pipelines. +func (s *_ingestPipeline) Version(versionnumber int64) *_ingestPipeline { + + s.v.Version = &versionnumber + + return s +} + +func (s *_ingestPipeline) IngestPipelineCaster() *types.IngestPipeline { + return s.v +} diff --git a/typedapi/esdsl/ingestpipelineparams.go b/typedapi/esdsl/ingestpipelineparams.go new file mode 100644 index 0000000000..c38075a772 --- /dev/null +++ b/typedapi/esdsl/ingestpipelineparams.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _ingestPipelineParams struct { + v *types.IngestPipelineParams +} + +func NewIngestPipelineParams(extractbinarycontent bool, name string, reducewhitespace bool, runmlinference bool) *_ingestPipelineParams { + + tmp := &_ingestPipelineParams{v: types.NewIngestPipelineParams()} + + tmp.ExtractBinaryContent(extractbinarycontent) + + tmp.Name(name) + + tmp.ReduceWhitespace(reducewhitespace) + + tmp.RunMlInference(runmlinference) + + return tmp + +} + +func (s *_ingestPipelineParams) ExtractBinaryContent(extractbinarycontent bool) *_ingestPipelineParams { + + s.v.ExtractBinaryContent = extractbinarycontent + + return s +} + +func (s *_ingestPipelineParams) Name(name string) *_ingestPipelineParams { + + s.v.Name = name + + return s +} + +func (s *_ingestPipelineParams) ReduceWhitespace(reducewhitespace bool) *_ingestPipelineParams { + + s.v.ReduceWhitespace = reducewhitespace + + return s +} + +func (s *_ingestPipelineParams) RunMlInference(runmlinference bool) *_ingestPipelineParams { + + s.v.RunMlInference = runmlinference + + return s +} + +func (s *_ingestPipelineParams) IngestPipelineParamsCaster() *types.IngestPipelineParams { + return s.v +} diff --git a/typedapi/esdsl/innerhits.go b/typedapi/esdsl/innerhits.go new file mode 100644 index 0000000000..b897ddef20 --- /dev/null +++ b/typedapi/esdsl/innerhits.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _innerHits struct { + v *types.InnerHits +} + +func NewInnerHits() *_innerHits { + + return &_innerHits{v: types.NewInnerHits()} + +} + +func (s *_innerHits) Collapse(collapse types.FieldCollapseVariant) *_innerHits { + + s.v.Collapse = collapse.FieldCollapseCaster() + + return s +} + +func (s *_innerHits) DocvalueFields(docvaluefields ...types.FieldAndFormatVariant) *_innerHits { + + for _, v := range docvaluefields { + + s.v.DocvalueFields = append(s.v.DocvalueFields, *v.FieldAndFormatCaster()) + + } + return s +} + +func (s *_innerHits) Explain(explain bool) *_innerHits { + + s.v.Explain = &explain + + return s +} + +func (s *_innerHits) Fields(fields ...string) *_innerHits { + + s.v.Fields = fields + + return s +} + +// Inner hit starting document offset. +func (s *_innerHits) From(from int) *_innerHits { + + s.v.From = &from + + return s +} + +func (s *_innerHits) Highlight(highlight types.HighlightVariant) *_innerHits { + + s.v.Highlight = highlight.HighlightCaster() + + return s +} + +func (s *_innerHits) IgnoreUnmapped(ignoreunmapped bool) *_innerHits { + + s.v.IgnoreUnmapped = &ignoreunmapped + + return s +} + +// The name for the particular inner hit definition in the response. +// Useful when a search request contains multiple inner hits. +func (s *_innerHits) Name(name string) *_innerHits { + + s.v.Name = &name + + return s +} + +func (s *_innerHits) ScriptFields(scriptfields map[string]types.ScriptField) *_innerHits { + + s.v.ScriptFields = scriptfields + return s +} + +func (s *_innerHits) AddScriptField(key string, value types.ScriptFieldVariant) *_innerHits { + + var tmp map[string]types.ScriptField + if s.v.ScriptFields == nil { + s.v.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = s.v.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + + s.v.ScriptFields = tmp + return s +} + +func (s *_innerHits) SeqNoPrimaryTerm(seqnoprimaryterm bool) *_innerHits { + + s.v.SeqNoPrimaryTerm = &seqnoprimaryterm + + return s +} + +// The maximum number of hits to return per `inner_hits`. +func (s *_innerHits) Size(size int) *_innerHits { + + s.v.Size = &size + + return s +} + +// How the inner hits should be sorted per `inner_hits`. +// By default, inner hits are sorted by score. +func (s *_innerHits) Sort(sorts ...types.SortCombinationsVariant) *_innerHits { + + for _, v := range sorts { + s.v.Sort = append(s.v.Sort, *v.SortCombinationsCaster()) + } + + return s +} + +func (s *_innerHits) Source_(sourceconfig types.SourceConfigVariant) *_innerHits { + + s.v.Source_ = *sourceconfig.SourceConfigCaster() + + return s +} + +func (s *_innerHits) StoredFields(fields ...string) *_innerHits { + + s.v.StoredFields = fields + + return s +} + +func (s *_innerHits) TrackScores(trackscores bool) *_innerHits { + + s.v.TrackScores = &trackscores + + return s +} + +func (s *_innerHits) Version(version bool) *_innerHits { + + s.v.Version = &version + + return s +} + +func (s *_innerHits) InnerHitsCaster() *types.InnerHits { + return s.v +} diff --git a/typedapi/esdsl/input.go b/typedapi/esdsl/input.go new file mode 100644 index 0000000000..a9371a904e --- /dev/null +++ b/typedapi/esdsl/input.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _input struct { + v *types.Input +} + +func NewInput() *_input { + + return &_input{v: types.NewInput()} + +} + +func (s *_input) FieldNames(names ...string) *_input { + + s.v.FieldNames = names + + return s +} + +func (s *_input) InputCaster() *types.Input { + return s.v +} diff --git a/typedapi/esdsl/integernumberproperty.go b/typedapi/esdsl/integernumberproperty.go new file mode 100644 index 0000000000..4a281401ea --- /dev/null +++ b/typedapi/esdsl/integernumberproperty.go @@ -0,0 +1,220 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" +) + +type _integerNumberProperty struct { + v *types.IntegerNumberProperty +} + +func NewIntegerNumberProperty() *_integerNumberProperty { + + return &_integerNumberProperty{v: types.NewIntegerNumberProperty()} + +} + +func (s *_integerNumberProperty) Boost(boost types.Float64) *_integerNumberProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_integerNumberProperty) Coerce(coerce bool) *_integerNumberProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_integerNumberProperty) CopyTo(fields ...string) *_integerNumberProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_integerNumberProperty) DocValues(docvalues bool) *_integerNumberProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_integerNumberProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_integerNumberProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_integerNumberProperty) Fields(fields map[string]types.Property) *_integerNumberProperty { + + s.v.Fields = fields + return s +} + +func (s *_integerNumberProperty) AddField(key string, value types.PropertyVariant) *_integerNumberProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_integerNumberProperty) IgnoreAbove(ignoreabove int) *_integerNumberProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_integerNumberProperty) IgnoreMalformed(ignoremalformed bool) *_integerNumberProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_integerNumberProperty) Index(index bool) *_integerNumberProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_integerNumberProperty) Meta(meta map[string]string) *_integerNumberProperty { + + s.v.Meta = meta + return s +} + +func (s *_integerNumberProperty) AddMeta(key string, value string) *_integerNumberProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_integerNumberProperty) NullValue(nullvalue int) *_integerNumberProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_integerNumberProperty) OnScriptError(onscripterror onscripterror.OnScriptError) *_integerNumberProperty { + + s.v.OnScriptError = &onscripterror + return s +} + +func (s *_integerNumberProperty) Properties(properties map[string]types.Property) *_integerNumberProperty { + + s.v.Properties = properties + return s +} + +func (s *_integerNumberProperty) AddProperty(key string, value types.PropertyVariant) *_integerNumberProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_integerNumberProperty) Script(script types.ScriptVariant) *_integerNumberProperty { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_integerNumberProperty) Store(store bool) *_integerNumberProperty { + + s.v.Store = &store + + return s +} + +func (s *_integerNumberProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_integerNumberProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_integerNumberProperty) TimeSeriesDimension(timeseriesdimension bool) *_integerNumberProperty { + + s.v.TimeSeriesDimension = ×eriesdimension + + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_integerNumberProperty) TimeSeriesMetric(timeseriesmetric timeseriesmetrictype.TimeSeriesMetricType) *_integerNumberProperty { + + s.v.TimeSeriesMetric = ×eriesmetric + return s +} + +func (s *_integerNumberProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_integerNumberProperty) IntegerNumberPropertyCaster() *types.IntegerNumberProperty { + return s.v +} diff --git a/typedapi/esdsl/integerrangeproperty.go b/typedapi/esdsl/integerrangeproperty.go new file mode 100644 index 0000000000..1aa0b11743 --- /dev/null +++ b/typedapi/esdsl/integerrangeproperty.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _integerRangeProperty struct { + v *types.IntegerRangeProperty +} + +func NewIntegerRangeProperty() *_integerRangeProperty { + + return &_integerRangeProperty{v: types.NewIntegerRangeProperty()} + +} + +func (s *_integerRangeProperty) Boost(boost types.Float64) *_integerRangeProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_integerRangeProperty) Coerce(coerce bool) *_integerRangeProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_integerRangeProperty) CopyTo(fields ...string) *_integerRangeProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_integerRangeProperty) DocValues(docvalues bool) *_integerRangeProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_integerRangeProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_integerRangeProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_integerRangeProperty) Fields(fields map[string]types.Property) *_integerRangeProperty { + + s.v.Fields = fields + return s +} + +func (s *_integerRangeProperty) AddField(key string, value types.PropertyVariant) *_integerRangeProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_integerRangeProperty) IgnoreAbove(ignoreabove int) *_integerRangeProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_integerRangeProperty) Index(index bool) *_integerRangeProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_integerRangeProperty) Meta(meta map[string]string) *_integerRangeProperty { + + s.v.Meta = meta + return s +} + +func (s *_integerRangeProperty) AddMeta(key string, value string) *_integerRangeProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_integerRangeProperty) Properties(properties map[string]types.Property) *_integerRangeProperty { + + s.v.Properties = properties + return s +} + +func (s *_integerRangeProperty) AddProperty(key string, value types.PropertyVariant) *_integerRangeProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_integerRangeProperty) Store(store bool) *_integerRangeProperty { + + s.v.Store = &store + + return s +} + +func (s *_integerRangeProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_integerRangeProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_integerRangeProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_integerRangeProperty) IntegerRangePropertyCaster() *types.IntegerRangeProperty { + return s.v +} diff --git a/typedapi/esdsl/intervals.go b/typedapi/esdsl/intervals.go new file mode 100644 index 0000000000..ebc8b26217 --- /dev/null +++ b/typedapi/esdsl/intervals.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _intervals struct { + v *types.Intervals +} + +func NewIntervals() *_intervals { + return &_intervals{v: types.NewIntervals()} +} + +// AdditionalIntervalsProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_intervals) AdditionalIntervalsProperty(key string, value json.RawMessage) *_intervals { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalIntervalsProperty = tmp + return s +} + +// Returns matches that span a combination of other rules. +func (s *_intervals) AllOf(allof types.IntervalsAllOfVariant) *_intervals { + + s.v.AllOf = allof.IntervalsAllOfCaster() + + return s +} + +// Returns intervals produced by any of its sub-rules. +func (s *_intervals) AnyOf(anyof types.IntervalsAnyOfVariant) *_intervals { + + s.v.AnyOf = anyof.IntervalsAnyOfCaster() + + return s +} + +// Matches analyzed text. +func (s *_intervals) Fuzzy(fuzzy types.IntervalsFuzzyVariant) *_intervals { + + s.v.Fuzzy = fuzzy.IntervalsFuzzyCaster() + + return s +} + +// Matches analyzed text. +func (s *_intervals) Match(match types.IntervalsMatchVariant) *_intervals { + + s.v.Match = match.IntervalsMatchCaster() + + return s +} + +// Matches terms that start with a specified set of characters. +func (s *_intervals) Prefix(prefix types.IntervalsPrefixVariant) *_intervals { + + s.v.Prefix = prefix.IntervalsPrefixCaster() + + return s +} + +// Matches terms using a wildcard pattern. +func (s *_intervals) Wildcard(wildcard types.IntervalsWildcardVariant) *_intervals { + + s.v.Wildcard = wildcard.IntervalsWildcardCaster() + + return s +} + +func (s *_intervals) IntervalsCaster() *types.Intervals { + return s.v +} diff --git a/typedapi/esdsl/intervalsallof.go b/typedapi/esdsl/intervalsallof.go new file mode 100644 index 0000000000..de5048318d --- /dev/null +++ b/typedapi/esdsl/intervalsallof.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _intervalsAllOf struct { + v *types.IntervalsAllOf +} + +// Returns matches that span a combination of other rules. +func NewIntervalsAllOf() *_intervalsAllOf { + + return &_intervalsAllOf{v: types.NewIntervalsAllOf()} + +} + +// Rule used to filter returned intervals. +func (s *_intervalsAllOf) Filter(filter types.IntervalsFilterVariant) *_intervalsAllOf { + + s.v.Filter = filter.IntervalsFilterCaster() + + return s +} + +// An array of rules to combine. All rules must produce a match in a document +// for the overall source to match. +func (s *_intervalsAllOf) Intervals(intervals ...types.IntervalsVariant) *_intervalsAllOf { + + for _, v := range intervals { + + s.v.Intervals = append(s.v.Intervals, *v.IntervalsCaster()) + + } + return s +} + +// Maximum number of positions between the matching terms. +// Intervals produced by the rules further apart than this are not considered +// matches. +func (s *_intervalsAllOf) MaxGaps(maxgaps int) *_intervalsAllOf { + + s.v.MaxGaps = &maxgaps + + return s +} + +// If `true`, intervals produced by the rules should appear in the order in +// which they are specified. +func (s *_intervalsAllOf) Ordered(ordered bool) *_intervalsAllOf { + + s.v.Ordered = &ordered + + return s +} + +func (s *_intervalsAllOf) IntervalsCaster() *types.Intervals { + container := types.NewIntervals() + + container.AllOf = s.v + + return container +} + +func (s *_intervalsAllOf) IntervalsQueryCaster() *types.IntervalsQuery { + container := types.NewIntervalsQuery() + + container.AllOf = s.v + + return container +} + +func (s *_intervalsAllOf) IntervalsAllOfCaster() *types.IntervalsAllOf { + return s.v +} diff --git a/typedapi/esdsl/intervalsanyof.go b/typedapi/esdsl/intervalsanyof.go new file mode 100644 index 0000000000..4d5b13ad34 --- /dev/null +++ b/typedapi/esdsl/intervalsanyof.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _intervalsAnyOf struct { + v *types.IntervalsAnyOf +} + +// Returns intervals produced by any of its sub-rules. +func NewIntervalsAnyOf() *_intervalsAnyOf { + + return &_intervalsAnyOf{v: types.NewIntervalsAnyOf()} + +} + +// Rule used to filter returned intervals. +func (s *_intervalsAnyOf) Filter(filter types.IntervalsFilterVariant) *_intervalsAnyOf { + + s.v.Filter = filter.IntervalsFilterCaster() + + return s +} + +// An array of rules to match. +func (s *_intervalsAnyOf) Intervals(intervals ...types.IntervalsVariant) *_intervalsAnyOf { + + for _, v := range intervals { + + s.v.Intervals = append(s.v.Intervals, *v.IntervalsCaster()) + + } + return s +} + +func (s *_intervalsAnyOf) IntervalsCaster() *types.Intervals { + container := types.NewIntervals() + + container.AnyOf = s.v + + return container +} + +func (s *_intervalsAnyOf) IntervalsQueryCaster() *types.IntervalsQuery { + container := types.NewIntervalsQuery() + + container.AnyOf = s.v + + return container +} + +func (s *_intervalsAnyOf) IntervalsAnyOfCaster() *types.IntervalsAnyOf { + return s.v +} diff --git a/typedapi/esdsl/intervalsfilter.go b/typedapi/esdsl/intervalsfilter.go new file mode 100644 index 0000000000..9076bb108f --- /dev/null +++ b/typedapi/esdsl/intervalsfilter.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _intervalsFilter struct { + v *types.IntervalsFilter +} + +func NewIntervalsFilter() *_intervalsFilter { + return &_intervalsFilter{v: types.NewIntervalsFilter()} +} + +// AdditionalIntervalsFilterProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_intervalsFilter) AdditionalIntervalsFilterProperty(key string, value json.RawMessage) *_intervalsFilter { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalIntervalsFilterProperty = tmp + return s +} + +// Query used to return intervals that follow an interval from the `filter` +// rule. +func (s *_intervalsFilter) After(after types.IntervalsVariant) *_intervalsFilter { + + s.v.After = after.IntervalsCaster() + + return s +} + +// Query used to return intervals that occur before an interval from the +// `filter` rule. +func (s *_intervalsFilter) Before(before types.IntervalsVariant) *_intervalsFilter { + + s.v.Before = before.IntervalsCaster() + + return s +} + +// Query used to return intervals contained by an interval from the `filter` +// rule. +func (s *_intervalsFilter) ContainedBy(containedby types.IntervalsVariant) *_intervalsFilter { + + s.v.ContainedBy = containedby.IntervalsCaster() + + return s +} + +// Query used to return intervals that contain an interval from the `filter` +// rule. +func (s *_intervalsFilter) Containing(containing types.IntervalsVariant) *_intervalsFilter { + + s.v.Containing = containing.IntervalsCaster() + + return s +} + +// Query used to return intervals that are **not** contained by an interval from +// the `filter` rule. +func (s *_intervalsFilter) NotContainedBy(notcontainedby types.IntervalsVariant) *_intervalsFilter { + + s.v.NotContainedBy = notcontainedby.IntervalsCaster() + + return s +} + +// Query used to return intervals that do **not** contain an interval from the +// `filter` rule. +func (s *_intervalsFilter) NotContaining(notcontaining types.IntervalsVariant) *_intervalsFilter { + + s.v.NotContaining = notcontaining.IntervalsCaster() + + return s +} + +// Query used to return intervals that do **not** overlap with an interval from +// the `filter` rule. +func (s *_intervalsFilter) NotOverlapping(notoverlapping types.IntervalsVariant) *_intervalsFilter { + + s.v.NotOverlapping = notoverlapping.IntervalsCaster() + + return s +} + +// Query used to return intervals that overlap with an interval from the +// `filter` rule. +func (s *_intervalsFilter) Overlapping(overlapping types.IntervalsVariant) *_intervalsFilter { + + s.v.Overlapping = overlapping.IntervalsCaster() + + return s +} + +// Script used to return matching documents. +// This script must return a boolean value: `true` or `false`. +func (s *_intervalsFilter) Script(script types.ScriptVariant) *_intervalsFilter { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_intervalsFilter) IntervalsFilterCaster() *types.IntervalsFilter { + return s.v +} diff --git a/typedapi/esdsl/intervalsfuzzy.go b/typedapi/esdsl/intervalsfuzzy.go new file mode 100644 index 0000000000..a1902c873c --- /dev/null +++ b/typedapi/esdsl/intervalsfuzzy.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _intervalsFuzzy struct { + v *types.IntervalsFuzzy +} + +// Matches analyzed text. +func NewIntervalsFuzzy(term string) *_intervalsFuzzy { + + tmp := &_intervalsFuzzy{v: types.NewIntervalsFuzzy()} + + tmp.Term(term) + + return tmp + +} + +// Analyzer used to normalize the term. +func (s *_intervalsFuzzy) Analyzer(analyzer string) *_intervalsFuzzy { + + s.v.Analyzer = &analyzer + + return s +} + +// Maximum edit distance allowed for matching. +func (s *_intervalsFuzzy) Fuzziness(fuzziness types.FuzzinessVariant) *_intervalsFuzzy { + + s.v.Fuzziness = *fuzziness.FuzzinessCaster() + + return s +} + +// Number of beginning characters left unchanged when creating expansions. +func (s *_intervalsFuzzy) PrefixLength(prefixlength int) *_intervalsFuzzy { + + s.v.PrefixLength = &prefixlength + + return s +} + +// The term to match. +func (s *_intervalsFuzzy) Term(term string) *_intervalsFuzzy { + + s.v.Term = term + + return s +} + +// Indicates whether edits include transpositions of two adjacent characters +// (for example, `ab` to `ba`). +func (s *_intervalsFuzzy) Transpositions(transpositions bool) *_intervalsFuzzy { + + s.v.Transpositions = &transpositions + + return s +} + +// If specified, match intervals from this field rather than the top-level +// field. +// The `term` is normalized using the search analyzer from this field, unless +// `analyzer` is specified separately. +func (s *_intervalsFuzzy) UseField(field string) *_intervalsFuzzy { + + s.v.UseField = &field + + return s +} + +func (s *_intervalsFuzzy) IntervalsCaster() *types.Intervals { + container := types.NewIntervals() + + container.Fuzzy = s.v + + return container +} + +func (s *_intervalsFuzzy) IntervalsQueryCaster() *types.IntervalsQuery { + container := types.NewIntervalsQuery() + + container.Fuzzy = s.v + + return container +} + +func (s *_intervalsFuzzy) IntervalsFuzzyCaster() *types.IntervalsFuzzy { + return s.v +} diff --git a/typedapi/esdsl/intervalsmatch.go b/typedapi/esdsl/intervalsmatch.go new file mode 100644 index 0000000000..170559a888 --- /dev/null +++ b/typedapi/esdsl/intervalsmatch.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _intervalsMatch struct { + v *types.IntervalsMatch +} + +// Matches analyzed text. +func NewIntervalsMatch(query string) *_intervalsMatch { + + tmp := &_intervalsMatch{v: types.NewIntervalsMatch()} + + tmp.Query(query) + + return tmp + +} + +// Analyzer used to analyze terms in the query. +func (s *_intervalsMatch) Analyzer(analyzer string) *_intervalsMatch { + + s.v.Analyzer = &analyzer + + return s +} + +// An optional interval filter. +func (s *_intervalsMatch) Filter(filter types.IntervalsFilterVariant) *_intervalsMatch { + + s.v.Filter = filter.IntervalsFilterCaster() + + return s +} + +// Maximum number of positions between the matching terms. +// Terms further apart than this are not considered matches. +func (s *_intervalsMatch) MaxGaps(maxgaps int) *_intervalsMatch { + + s.v.MaxGaps = &maxgaps + + return s +} + +// If `true`, matching terms must appear in their specified order. +func (s *_intervalsMatch) Ordered(ordered bool) *_intervalsMatch { + + s.v.Ordered = &ordered + + return s +} + +// Text you wish to find in the provided field. +func (s *_intervalsMatch) Query(query string) *_intervalsMatch { + + s.v.Query = query + + return s +} + +// If specified, match intervals from this field rather than the top-level +// field. +// The `term` is normalized using the search analyzer from this field, unless +// `analyzer` is specified separately. +func (s *_intervalsMatch) UseField(field string) *_intervalsMatch { + + s.v.UseField = &field + + return s +} + +func (s *_intervalsMatch) IntervalsCaster() *types.Intervals { + container := types.NewIntervals() + + container.Match = s.v + + return container +} + +func (s *_intervalsMatch) IntervalsQueryCaster() *types.IntervalsQuery { + container := types.NewIntervalsQuery() + + container.Match = s.v + + return container +} + +func (s *_intervalsMatch) IntervalsMatchCaster() *types.IntervalsMatch { + return s.v +} diff --git a/typedapi/esdsl/intervalsprefix.go b/typedapi/esdsl/intervalsprefix.go new file mode 100644 index 0000000000..8d2fa707d6 --- /dev/null +++ b/typedapi/esdsl/intervalsprefix.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _intervalsPrefix struct { + v *types.IntervalsPrefix +} + +// Matches terms that start with a specified set of characters. +func NewIntervalsPrefix(prefix string) *_intervalsPrefix { + + tmp := &_intervalsPrefix{v: types.NewIntervalsPrefix()} + + tmp.Prefix(prefix) + + return tmp + +} + +// Analyzer used to analyze the `prefix`. +func (s *_intervalsPrefix) Analyzer(analyzer string) *_intervalsPrefix { + + s.v.Analyzer = &analyzer + + return s +} + +// Beginning characters of terms you wish to find in the top-level field. +func (s *_intervalsPrefix) Prefix(prefix string) *_intervalsPrefix { + + s.v.Prefix = prefix + + return s +} + +// If specified, match intervals from this field rather than the top-level +// field. +// The `prefix` is normalized using the search analyzer from this field, unless +// `analyzer` is specified separately. +func (s *_intervalsPrefix) UseField(field string) *_intervalsPrefix { + + s.v.UseField = &field + + return s +} + +func (s *_intervalsPrefix) IntervalsCaster() *types.Intervals { + container := types.NewIntervals() + + container.Prefix = s.v + + return container +} + +func (s *_intervalsPrefix) IntervalsQueryCaster() *types.IntervalsQuery { + container := types.NewIntervalsQuery() + + container.Prefix = s.v + + return container +} + +func (s *_intervalsPrefix) IntervalsPrefixCaster() *types.IntervalsPrefix { + return s.v +} diff --git a/typedapi/esdsl/intervalsquery.go b/typedapi/esdsl/intervalsquery.go new file mode 100644 index 0000000000..0321b57126 --- /dev/null +++ b/typedapi/esdsl/intervalsquery.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _intervalsQuery struct { + v *types.IntervalsQuery +} + +func NewIntervalsQuery() *_intervalsQuery { + return &_intervalsQuery{v: types.NewIntervalsQuery()} +} + +// AdditionalIntervalsQueryProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_intervalsQuery) AdditionalIntervalsQueryProperty(key string, value json.RawMessage) *_intervalsQuery { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalIntervalsQueryProperty = tmp + return s +} + +// Returns matches that span a combination of other rules. +func (s *_intervalsQuery) AllOf(allof types.IntervalsAllOfVariant) *_intervalsQuery { + + s.v.AllOf = allof.IntervalsAllOfCaster() + + return s +} + +// Returns intervals produced by any of its sub-rules. +func (s *_intervalsQuery) AnyOf(anyof types.IntervalsAnyOfVariant) *_intervalsQuery { + + s.v.AnyOf = anyof.IntervalsAnyOfCaster() + + return s +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_intervalsQuery) Boost(boost float32) *_intervalsQuery { + + s.v.Boost = &boost + + return s +} + +// Matches terms that are similar to the provided term, within an edit distance +// defined by `fuzziness`. +func (s *_intervalsQuery) Fuzzy(fuzzy types.IntervalsFuzzyVariant) *_intervalsQuery { + + s.v.Fuzzy = fuzzy.IntervalsFuzzyCaster() + + return s +} + +// Matches analyzed text. +func (s *_intervalsQuery) Match(match types.IntervalsMatchVariant) *_intervalsQuery { + + s.v.Match = match.IntervalsMatchCaster() + + return s +} + +// Matches terms that start with a specified set of characters. +func (s *_intervalsQuery) Prefix(prefix types.IntervalsPrefixVariant) *_intervalsQuery { + + s.v.Prefix = prefix.IntervalsPrefixCaster() + + return s +} + +func (s *_intervalsQuery) QueryName_(queryname_ string) *_intervalsQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Matches terms using a wildcard pattern. +func (s *_intervalsQuery) Wildcard(wildcard types.IntervalsWildcardVariant) *_intervalsQuery { + + s.v.Wildcard = wildcard.IntervalsWildcardCaster() + + return s +} + +func (s *_intervalsQuery) IntervalsQueryCaster() *types.IntervalsQuery { + return s.v +} diff --git a/typedapi/esdsl/intervalswildcard.go b/typedapi/esdsl/intervalswildcard.go new file mode 100644 index 0000000000..53c78e9ccd --- /dev/null +++ b/typedapi/esdsl/intervalswildcard.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _intervalsWildcard struct { + v *types.IntervalsWildcard +} + +// Matches terms using a wildcard pattern. +func NewIntervalsWildcard(pattern string) *_intervalsWildcard { + + tmp := &_intervalsWildcard{v: types.NewIntervalsWildcard()} + + tmp.Pattern(pattern) + + return tmp + +} + +// Analyzer used to analyze the `pattern`. +// Defaults to the top-level field's analyzer. +func (s *_intervalsWildcard) Analyzer(analyzer string) *_intervalsWildcard { + + s.v.Analyzer = &analyzer + + return s +} + +// Wildcard pattern used to find matching terms. +func (s *_intervalsWildcard) Pattern(pattern string) *_intervalsWildcard { + + s.v.Pattern = pattern + + return s +} + +// If specified, match intervals from this field rather than the top-level +// field. +// The `pattern` is normalized using the search analyzer from this field, unless +// `analyzer` is specified separately. +func (s *_intervalsWildcard) UseField(field string) *_intervalsWildcard { + + s.v.UseField = &field + + return s +} + +func (s *_intervalsWildcard) IntervalsCaster() *types.Intervals { + container := types.NewIntervals() + + container.Wildcard = s.v + + return container +} + +func (s *_intervalsWildcard) IntervalsQueryCaster() *types.IntervalsQuery { + container := types.NewIntervalsQuery() + + container.Wildcard = s.v + + return container +} + +func (s *_intervalsWildcard) IntervalsWildcardCaster() *types.IntervalsWildcard { + return s.v +} diff --git a/typedapi/esdsl/ipinfo.go b/typedapi/esdsl/ipinfo.go new file mode 100644 index 0000000000..5e0c21d8e5 --- /dev/null +++ b/typedapi/esdsl/ipinfo.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _ipinfo struct { + v *types.Ipinfo +} + +func NewIpinfo() *_ipinfo { + + return &_ipinfo{v: types.NewIpinfo()} + +} + +func (s *_ipinfo) DatabaseConfigurationCaster() *types.DatabaseConfiguration { + container := types.NewDatabaseConfiguration() + + container.Ipinfo = s.v + + return container +} + +func (s *_ipinfo) DatabaseConfigurationFullCaster() *types.DatabaseConfigurationFull { + container := types.NewDatabaseConfigurationFull() + + container.Ipinfo = s.v + + return container +} + +func (s *_ipinfo) IpinfoCaster() *types.Ipinfo { + return s.v +} diff --git a/typedapi/esdsl/iplocationprocessor.go b/typedapi/esdsl/iplocationprocessor.go new file mode 100644 index 0000000000..00ca94235b --- /dev/null +++ b/typedapi/esdsl/iplocationprocessor.go @@ -0,0 +1,159 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _ipLocationProcessor struct { + v *types.IpLocationProcessor +} + +// Currently an undocumented alias for GeoIP Processor. +func NewIpLocationProcessor() *_ipLocationProcessor { + + return &_ipLocationProcessor{v: types.NewIpLocationProcessor()} + +} + +// The database filename referring to a database the module ships with +// (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom +// database in the ingest-geoip config directory. +func (s *_ipLocationProcessor) DatabaseFile(databasefile string) *_ipLocationProcessor { + + s.v.DatabaseFile = &databasefile + + return s +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_ipLocationProcessor) Description(description string) *_ipLocationProcessor { + + s.v.Description = &description + + return s +} + +// If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the +// missing database is downloaded when the pipeline is created. +// Else, the download is triggered by when the pipeline is used as the +// `default_pipeline` or `final_pipeline` in an index. +func (s *_ipLocationProcessor) DownloadDatabaseOnPipelineCreation(downloaddatabaseonpipelinecreation bool) *_ipLocationProcessor { + + s.v.DownloadDatabaseOnPipelineCreation = &downloaddatabaseonpipelinecreation + + return s +} + +// The field to get the ip address from for the geographical lookup. +func (s *_ipLocationProcessor) Field(field string) *_ipLocationProcessor { + + s.v.Field = field + + return s +} + +// If `true`, only the first found IP location data will be returned, even if +// the field contains an array. +func (s *_ipLocationProcessor) FirstOnly(firstonly bool) *_ipLocationProcessor { + + s.v.FirstOnly = &firstonly + + return s +} + +// Conditionally execute the processor. +func (s *_ipLocationProcessor) If(if_ types.ScriptVariant) *_ipLocationProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_ipLocationProcessor) IgnoreFailure(ignorefailure bool) *_ipLocationProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist, the processor quietly exits without +// modifying the document. +func (s *_ipLocationProcessor) IgnoreMissing(ignoremissing bool) *_ipLocationProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_ipLocationProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_ipLocationProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Controls what properties are added to the `target_field` based on the IP +// location lookup. +func (s *_ipLocationProcessor) Properties(properties ...string) *_ipLocationProcessor { + + for _, v := range properties { + + s.v.Properties = append(s.v.Properties, v) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_ipLocationProcessor) Tag(tag string) *_ipLocationProcessor { + + s.v.Tag = &tag + + return s +} + +// The field that will hold the geographical information looked up from the +// MaxMind database. +func (s *_ipLocationProcessor) TargetField(field string) *_ipLocationProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_ipLocationProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.IpLocation = s.v + + return container +} + +func (s *_ipLocationProcessor) IpLocationProcessorCaster() *types.IpLocationProcessor { + return s.v +} diff --git a/typedapi/esdsl/ipprefixaggregation.go b/typedapi/esdsl/ipprefixaggregation.go new file mode 100644 index 0000000000..07228effc4 --- /dev/null +++ b/typedapi/esdsl/ipprefixaggregation.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _ipPrefixAggregation struct { + v *types.IpPrefixAggregation +} + +// A bucket aggregation that groups documents based on the network or +// sub-network of an IP address. +func NewIpPrefixAggregation(prefixlength int) *_ipPrefixAggregation { + + tmp := &_ipPrefixAggregation{v: types.NewIpPrefixAggregation()} + + tmp.PrefixLength(prefixlength) + + return tmp + +} + +// Defines whether the prefix length is appended to IP address keys in the +// response. +func (s *_ipPrefixAggregation) AppendPrefixLength(appendprefixlength bool) *_ipPrefixAggregation { + + s.v.AppendPrefixLength = &appendprefixlength + + return s +} + +// The IP address field to aggregation on. The field mapping type must be `ip`. +func (s *_ipPrefixAggregation) Field(field string) *_ipPrefixAggregation { + + s.v.Field = field + + return s +} + +// Defines whether the prefix applies to IPv6 addresses. +func (s *_ipPrefixAggregation) IsIpv6(isipv6 bool) *_ipPrefixAggregation { + + s.v.IsIpv6 = &isipv6 + + return s +} + +// Defines whether buckets are returned as a hash rather than an array in the +// response. +func (s *_ipPrefixAggregation) Keyed(keyed bool) *_ipPrefixAggregation { + + s.v.Keyed = &keyed + + return s +} + +// Minimum number of documents in a bucket for it to be included in the +// response. +func (s *_ipPrefixAggregation) MinDocCount(mindoccount int64) *_ipPrefixAggregation { + + s.v.MinDocCount = &mindoccount + + return s +} + +// Length of the network prefix. For IPv4 addresses the accepted range is [0, +// 32]. +// For IPv6 addresses the accepted range is [0, 128]. +func (s *_ipPrefixAggregation) PrefixLength(prefixlength int) *_ipPrefixAggregation { + + s.v.PrefixLength = prefixlength + + return s +} + +func (s *_ipPrefixAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.IpPrefix = s.v + + return container +} + +func (s *_ipPrefixAggregation) IpPrefixAggregationCaster() *types.IpPrefixAggregation { + return s.v +} diff --git a/typedapi/esdsl/ipproperty.go b/typedapi/esdsl/ipproperty.go new file mode 100644 index 0000000000..3b43d42b96 --- /dev/null +++ b/typedapi/esdsl/ipproperty.go @@ -0,0 +1,204 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _ipProperty struct { + v *types.IpProperty +} + +func NewIpProperty() *_ipProperty { + + return &_ipProperty{v: types.NewIpProperty()} + +} + +func (s *_ipProperty) Boost(boost types.Float64) *_ipProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_ipProperty) CopyTo(fields ...string) *_ipProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_ipProperty) DocValues(docvalues bool) *_ipProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_ipProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_ipProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_ipProperty) Fields(fields map[string]types.Property) *_ipProperty { + + s.v.Fields = fields + return s +} + +func (s *_ipProperty) AddField(key string, value types.PropertyVariant) *_ipProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_ipProperty) IgnoreAbove(ignoreabove int) *_ipProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_ipProperty) IgnoreMalformed(ignoremalformed bool) *_ipProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_ipProperty) Index(index bool) *_ipProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_ipProperty) Meta(meta map[string]string) *_ipProperty { + + s.v.Meta = meta + return s +} + +func (s *_ipProperty) AddMeta(key string, value string) *_ipProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_ipProperty) NullValue(nullvalue string) *_ipProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_ipProperty) OnScriptError(onscripterror onscripterror.OnScriptError) *_ipProperty { + + s.v.OnScriptError = &onscripterror + return s +} + +func (s *_ipProperty) Properties(properties map[string]types.Property) *_ipProperty { + + s.v.Properties = properties + return s +} + +func (s *_ipProperty) AddProperty(key string, value types.PropertyVariant) *_ipProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_ipProperty) Script(script types.ScriptVariant) *_ipProperty { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_ipProperty) Store(store bool) *_ipProperty { + + s.v.Store = &store + + return s +} + +func (s *_ipProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_ipProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_ipProperty) TimeSeriesDimension(timeseriesdimension bool) *_ipProperty { + + s.v.TimeSeriesDimension = ×eriesdimension + + return s +} + +func (s *_ipProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_ipProperty) IpPropertyCaster() *types.IpProperty { + return s.v +} diff --git a/typedapi/esdsl/iprangeaggregation.go b/typedapi/esdsl/iprangeaggregation.go new file mode 100644 index 0000000000..d6b31a0531 --- /dev/null +++ b/typedapi/esdsl/iprangeaggregation.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _ipRangeAggregation struct { + v *types.IpRangeAggregation +} + +// A multi-bucket value source based aggregation that enables the user to define +// a set of IP ranges - each representing a bucket. +func NewIpRangeAggregation() *_ipRangeAggregation { + + return &_ipRangeAggregation{v: types.NewIpRangeAggregation()} + +} + +// The date field whose values are used to build ranges. +func (s *_ipRangeAggregation) Field(field string) *_ipRangeAggregation { + + s.v.Field = &field + + return s +} + +// Array of IP ranges. +func (s *_ipRangeAggregation) Ranges(ranges ...types.IpRangeAggregationRangeVariant) *_ipRangeAggregation { + + for _, v := range ranges { + + s.v.Ranges = append(s.v.Ranges, *v.IpRangeAggregationRangeCaster()) + + } + return s +} + +func (s *_ipRangeAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.IpRange = s.v + + return container +} + +func (s *_ipRangeAggregation) IpRangeAggregationCaster() *types.IpRangeAggregation { + return s.v +} diff --git a/typedapi/esdsl/iprangeaggregationrange.go b/typedapi/esdsl/iprangeaggregationrange.go new file mode 100644 index 0000000000..08a2af461b --- /dev/null +++ b/typedapi/esdsl/iprangeaggregationrange.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _ipRangeAggregationRange struct { + v *types.IpRangeAggregationRange +} + +func NewIpRangeAggregationRange() *_ipRangeAggregationRange { + + return &_ipRangeAggregationRange{v: types.NewIpRangeAggregationRange()} + +} + +// Start of the range. +func (s *_ipRangeAggregationRange) From(from string) *_ipRangeAggregationRange { + + s.v.From = &from + + return s +} + +// IP range defined as a CIDR mask. +func (s *_ipRangeAggregationRange) Mask(mask string) *_ipRangeAggregationRange { + + s.v.Mask = &mask + + return s +} + +// End of the range. +func (s *_ipRangeAggregationRange) To(to string) *_ipRangeAggregationRange { + + s.v.To = &to + + return s +} + +func (s *_ipRangeAggregationRange) IpRangeAggregationRangeCaster() *types.IpRangeAggregationRange { + return s.v +} diff --git a/typedapi/esdsl/iprangeproperty.go b/typedapi/esdsl/iprangeproperty.go new file mode 100644 index 0000000000..2f5b99d11e --- /dev/null +++ b/typedapi/esdsl/iprangeproperty.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _ipRangeProperty struct { + v *types.IpRangeProperty +} + +func NewIpRangeProperty() *_ipRangeProperty { + + return &_ipRangeProperty{v: types.NewIpRangeProperty()} + +} + +func (s *_ipRangeProperty) Boost(boost types.Float64) *_ipRangeProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_ipRangeProperty) Coerce(coerce bool) *_ipRangeProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_ipRangeProperty) CopyTo(fields ...string) *_ipRangeProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_ipRangeProperty) DocValues(docvalues bool) *_ipRangeProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_ipRangeProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_ipRangeProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_ipRangeProperty) Fields(fields map[string]types.Property) *_ipRangeProperty { + + s.v.Fields = fields + return s +} + +func (s *_ipRangeProperty) AddField(key string, value types.PropertyVariant) *_ipRangeProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_ipRangeProperty) IgnoreAbove(ignoreabove int) *_ipRangeProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_ipRangeProperty) Index(index bool) *_ipRangeProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_ipRangeProperty) Meta(meta map[string]string) *_ipRangeProperty { + + s.v.Meta = meta + return s +} + +func (s *_ipRangeProperty) AddMeta(key string, value string) *_ipRangeProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_ipRangeProperty) Properties(properties map[string]types.Property) *_ipRangeProperty { + + s.v.Properties = properties + return s +} + +func (s *_ipRangeProperty) AddProperty(key string, value types.PropertyVariant) *_ipRangeProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_ipRangeProperty) Store(store bool) *_ipRangeProperty { + + s.v.Store = &store + + return s +} + +func (s *_ipRangeProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_ipRangeProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_ipRangeProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_ipRangeProperty) IpRangePropertyCaster() *types.IpRangeProperty { + return s.v +} diff --git a/typedapi/esdsl/irishanalyzer.go b/typedapi/esdsl/irishanalyzer.go new file mode 100644 index 0000000000..a13f2bf224 --- /dev/null +++ b/typedapi/esdsl/irishanalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _irishAnalyzer struct { + v *types.IrishAnalyzer +} + +func NewIrishAnalyzer() *_irishAnalyzer { + + return &_irishAnalyzer{v: types.NewIrishAnalyzer()} + +} + +func (s *_irishAnalyzer) StemExclusion(stemexclusions ...string) *_irishAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_irishAnalyzer) Stopwords(stopwords ...string) *_irishAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_irishAnalyzer) StopwordsPath(stopwordspath string) *_irishAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_irishAnalyzer) IrishAnalyzerCaster() *types.IrishAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/italiananalyzer.go b/typedapi/esdsl/italiananalyzer.go new file mode 100644 index 0000000000..8ff34f9d93 --- /dev/null +++ b/typedapi/esdsl/italiananalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _italianAnalyzer struct { + v *types.ItalianAnalyzer +} + +func NewItalianAnalyzer() *_italianAnalyzer { + + return &_italianAnalyzer{v: types.NewItalianAnalyzer()} + +} + +func (s *_italianAnalyzer) StemExclusion(stemexclusions ...string) *_italianAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_italianAnalyzer) Stopwords(stopwords ...string) *_italianAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_italianAnalyzer) StopwordsPath(stopwordspath string) *_italianAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_italianAnalyzer) ItalianAnalyzerCaster() *types.ItalianAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/jobconfig.go b/typedapi/esdsl/jobconfig.go new file mode 100644 index 0000000000..f9138fda71 --- /dev/null +++ b/typedapi/esdsl/jobconfig.go @@ -0,0 +1,236 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _jobConfig struct { + v *types.JobConfig +} + +func NewJobConfig(analysisconfig types.AnalysisConfigVariant, datadescription types.DataDescriptionVariant) *_jobConfig { + + tmp := &_jobConfig{v: types.NewJobConfig()} + + tmp.AnalysisConfig(analysisconfig) + + tmp.DataDescription(datadescription) + + return tmp + +} + +// Advanced configuration option. Specifies whether this job can open when there +// is insufficient machine learning node capacity for it to be immediately +// assigned to a node. +func (s *_jobConfig) AllowLazyOpen(allowlazyopen bool) *_jobConfig { + + s.v.AllowLazyOpen = &allowlazyopen + + return s +} + +// The analysis configuration, which specifies how to analyze the data. +// After you create a job, you cannot change the analysis configuration; all the +// properties are informational. +func (s *_jobConfig) AnalysisConfig(analysisconfig types.AnalysisConfigVariant) *_jobConfig { + + s.v.AnalysisConfig = *analysisconfig.AnalysisConfigCaster() + + return s +} + +// Limits can be applied for the resources required to hold the mathematical +// models in memory. +// These limits are approximate and can be set per job. +// They do not control the memory used by other processes, for example the +// Elasticsearch Java processes. +func (s *_jobConfig) AnalysisLimits(analysislimits types.AnalysisLimitsVariant) *_jobConfig { + + s.v.AnalysisLimits = analysislimits.AnalysisLimitsCaster() + + return s +} + +// Advanced configuration option. +// The time between each periodic persistence of the model. +// The default value is a randomized value between 3 to 4 hours, which avoids +// all jobs persisting at exactly the same time. +// The smallest allowed value is 1 hour. +func (s *_jobConfig) BackgroundPersistInterval(duration types.DurationVariant) *_jobConfig { + + s.v.BackgroundPersistInterval = *duration.DurationCaster() + + return s +} + +// Advanced configuration option. +// Contains custom metadata about the job. +func (s *_jobConfig) CustomSettings(customsettings json.RawMessage) *_jobConfig { + + s.v.CustomSettings = customsettings + + return s +} + +// Advanced configuration option, which affects the automatic removal of old +// model snapshots for this job. +// It specifies a period of time (in days) after which only the first snapshot +// per day is retained. +// This period is relative to the timestamp of the most recent snapshot for this +// job. +func (s *_jobConfig) DailyModelSnapshotRetentionAfterDays(dailymodelsnapshotretentionafterdays int64) *_jobConfig { + + s.v.DailyModelSnapshotRetentionAfterDays = &dailymodelsnapshotretentionafterdays + + return s +} + +// The data description defines the format of the input data when you send data +// to the job by using the post data API. +// Note that when configure a datafeed, these properties are automatically set. +func (s *_jobConfig) DataDescription(datadescription types.DataDescriptionVariant) *_jobConfig { + + s.v.DataDescription = *datadescription.DataDescriptionCaster() + + return s +} + +// The datafeed, which retrieves data from Elasticsearch for analysis by the +// job. +// You can associate only one datafeed with each anomaly detection job. +func (s *_jobConfig) DatafeedConfig(datafeedconfig types.DatafeedConfigVariant) *_jobConfig { + + s.v.DatafeedConfig = datafeedconfig.DatafeedConfigCaster() + + return s +} + +// A description of the job. +func (s *_jobConfig) Description(description string) *_jobConfig { + + s.v.Description = &description + + return s +} + +// A list of job groups. A job can belong to no groups or many. +func (s *_jobConfig) Groups(groups ...string) *_jobConfig { + + for _, v := range groups { + + s.v.Groups = append(s.v.Groups, v) + + } + return s +} + +// Identifier for the anomaly detection job. +// This identifier can contain lowercase alphanumeric characters (a-z and 0-9), +// hyphens, and underscores. +// It must start and end with alphanumeric characters. +func (s *_jobConfig) JobId(id string) *_jobConfig { + + s.v.JobId = &id + + return s +} + +// Reserved for future use, currently set to `anomaly_detector`. +func (s *_jobConfig) JobType(jobtype string) *_jobConfig { + + s.v.JobType = &jobtype + + return s +} + +// This advanced configuration option stores model information along with the +// results. +// It provides a more detailed view into anomaly detection. +// Model plot provides a simplified and indicative view of the model and its +// bounds. +func (s *_jobConfig) ModelPlotConfig(modelplotconfig types.ModelPlotConfigVariant) *_jobConfig { + + s.v.ModelPlotConfig = modelplotconfig.ModelPlotConfigCaster() + + return s +} + +// Advanced configuration option, which affects the automatic removal of old +// model snapshots for this job. +// It specifies the maximum period of time (in days) that snapshots are +// retained. +// This period is relative to the timestamp of the most recent snapshot for this +// job. +// The default value is `10`, which means snapshots ten days older than the +// newest snapshot are deleted. +func (s *_jobConfig) ModelSnapshotRetentionDays(modelsnapshotretentiondays int64) *_jobConfig { + + s.v.ModelSnapshotRetentionDays = &modelsnapshotretentiondays + + return s +} + +// Advanced configuration option. +// The period over which adjustments to the score are applied, as new data is +// seen. +// The default value is the longer of 30 days or 100 `bucket_spans`. +func (s *_jobConfig) RenormalizationWindowDays(renormalizationwindowdays int64) *_jobConfig { + + s.v.RenormalizationWindowDays = &renormalizationwindowdays + + return s +} + +// A text string that affects the name of the machine learning results index. +// The default value is `shared`, which generates an index named +// `.ml-anomalies-shared`. +func (s *_jobConfig) ResultsIndexName(indexname string) *_jobConfig { + + s.v.ResultsIndexName = &indexname + + return s +} + +// Advanced configuration option. +// The period of time (in days) that results are retained. +// Age is calculated relative to the timestamp of the latest bucket result. +// If this property has a non-null value, once per day at 00:30 (server time), +// results that are the specified number of days older than the latest bucket +// result are deleted from Elasticsearch. +// The default value is null, which means all results are retained. +// Annotations generated by the system also count as results for retention +// purposes; they are deleted after the same number of days as results. +// Annotations added by users are retained forever. +func (s *_jobConfig) ResultsRetentionDays(resultsretentiondays int64) *_jobConfig { + + s.v.ResultsRetentionDays = &resultsretentiondays + + return s +} + +func (s *_jobConfig) JobConfigCaster() *types.JobConfig { + return s.v +} diff --git a/typedapi/esdsl/joinprocessor.go b/typedapi/esdsl/joinprocessor.go new file mode 100644 index 0000000000..e322c551f8 --- /dev/null +++ b/typedapi/esdsl/joinprocessor.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _joinProcessor struct { + v *types.JoinProcessor +} + +// Joins each element of an array into a single string using a separator +// character between each element. +// Throws an error when the field is not an array. +func NewJoinProcessor(separator string) *_joinProcessor { + + tmp := &_joinProcessor{v: types.NewJoinProcessor()} + + tmp.Separator(separator) + + return tmp + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_joinProcessor) Description(description string) *_joinProcessor { + + s.v.Description = &description + + return s +} + +// Field containing array values to join. +func (s *_joinProcessor) Field(field string) *_joinProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_joinProcessor) If(if_ types.ScriptVariant) *_joinProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_joinProcessor) IgnoreFailure(ignorefailure bool) *_joinProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// Handle failures for the processor. +func (s *_joinProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_joinProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// The separator character. +func (s *_joinProcessor) Separator(separator string) *_joinProcessor { + + s.v.Separator = separator + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_joinProcessor) Tag(tag string) *_joinProcessor { + + s.v.Tag = &tag + + return s +} + +// The field to assign the joined value to. +// By default, the field is updated in-place. +func (s *_joinProcessor) TargetField(field string) *_joinProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_joinProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Join = s.v + + return container +} + +func (s *_joinProcessor) JoinProcessorCaster() *types.JoinProcessor { + return s.v +} diff --git a/typedapi/esdsl/joinproperty.go b/typedapi/esdsl/joinproperty.go new file mode 100644 index 0000000000..eebb6659aa --- /dev/null +++ b/typedapi/esdsl/joinproperty.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _joinProperty struct { + v *types.JoinProperty +} + +func NewJoinProperty() *_joinProperty { + + return &_joinProperty{v: types.NewJoinProperty()} + +} + +func (s *_joinProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_joinProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_joinProperty) EagerGlobalOrdinals(eagerglobalordinals bool) *_joinProperty { + + s.v.EagerGlobalOrdinals = &eagerglobalordinals + + return s +} + +func (s *_joinProperty) Fields(fields map[string]types.Property) *_joinProperty { + + s.v.Fields = fields + return s +} + +func (s *_joinProperty) AddField(key string, value types.PropertyVariant) *_joinProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_joinProperty) IgnoreAbove(ignoreabove int) *_joinProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +// Metadata about the field. +func (s *_joinProperty) Meta(meta map[string]string) *_joinProperty { + + s.v.Meta = meta + return s +} + +func (s *_joinProperty) AddMeta(key string, value string) *_joinProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_joinProperty) Properties(properties map[string]types.Property) *_joinProperty { + + s.v.Properties = properties + return s +} + +func (s *_joinProperty) AddProperty(key string, value types.PropertyVariant) *_joinProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_joinProperty) Relations(relations map[string][]string) *_joinProperty { + + s.v.Relations = relations + return s +} + +func (s *_joinProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_joinProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_joinProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_joinProperty) JoinPropertyCaster() *types.JoinProperty { + return s.v +} diff --git a/typedapi/esdsl/jsonprocessor.go b/typedapi/esdsl/jsonprocessor.go new file mode 100644 index 0000000000..54e4301dd1 --- /dev/null +++ b/typedapi/esdsl/jsonprocessor.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jsonprocessorconflictstrategy" +) + +type _jsonProcessor struct { + v *types.JsonProcessor +} + +// Converts a JSON string into a structured JSON object. +func NewJsonProcessor() *_jsonProcessor { + + return &_jsonProcessor{v: types.NewJsonProcessor()} + +} + +// Flag that forces the parsed JSON to be added at the top level of the +// document. +// `target_field` must not be set when this option is chosen. +func (s *_jsonProcessor) AddToRoot(addtoroot bool) *_jsonProcessor { + + s.v.AddToRoot = &addtoroot + + return s +} + +// When set to `replace`, root fields that conflict with fields from the parsed +// JSON will be overridden. +// When set to `merge`, conflicting fields will be merged. +// Only applicable `if add_to_root` is set to true. +func (s *_jsonProcessor) AddToRootConflictStrategy(addtorootconflictstrategy jsonprocessorconflictstrategy.JsonProcessorConflictStrategy) *_jsonProcessor { + + s.v.AddToRootConflictStrategy = &addtorootconflictstrategy + return s +} + +// When set to `true`, the JSON parser will not fail if the JSON contains +// duplicate keys. +// Instead, the last encountered value for any duplicate key wins. +func (s *_jsonProcessor) AllowDuplicateKeys(allowduplicatekeys bool) *_jsonProcessor { + + s.v.AllowDuplicateKeys = &allowduplicatekeys + + return s +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_jsonProcessor) Description(description string) *_jsonProcessor { + + s.v.Description = &description + + return s +} + +// The field to be parsed. +func (s *_jsonProcessor) Field(field string) *_jsonProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_jsonProcessor) If(if_ types.ScriptVariant) *_jsonProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_jsonProcessor) IgnoreFailure(ignorefailure bool) *_jsonProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// Handle failures for the processor. +func (s *_jsonProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_jsonProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_jsonProcessor) Tag(tag string) *_jsonProcessor { + + s.v.Tag = &tag + + return s +} + +// The field that the converted structured object will be written into. +// Any existing content in this field will be overwritten. +func (s *_jsonProcessor) TargetField(field string) *_jsonProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_jsonProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Json = s.v + + return container +} + +func (s *_jsonProcessor) JsonProcessorCaster() *types.JsonProcessor { + return s.v +} diff --git a/typedapi/esdsl/keeptypestokenfilter.go b/typedapi/esdsl/keeptypestokenfilter.go new file mode 100644 index 0000000000..5cea2241e3 --- /dev/null +++ b/typedapi/esdsl/keeptypestokenfilter.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/keeptypesmode" +) + +type _keepTypesTokenFilter struct { + v *types.KeepTypesTokenFilter +} + +func NewKeepTypesTokenFilter() *_keepTypesTokenFilter { + + return &_keepTypesTokenFilter{v: types.NewKeepTypesTokenFilter()} + +} + +func (s *_keepTypesTokenFilter) Mode(mode keeptypesmode.KeepTypesMode) *_keepTypesTokenFilter { + + s.v.Mode = &mode + return s +} + +func (s *_keepTypesTokenFilter) Types(types ...string) *_keepTypesTokenFilter { + + for _, v := range types { + + s.v.Types = append(s.v.Types, v) + + } + return s +} + +func (s *_keepTypesTokenFilter) Version(versionstring string) *_keepTypesTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_keepTypesTokenFilter) KeepTypesTokenFilterCaster() *types.KeepTypesTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/keepwordstokenfilter.go b/typedapi/esdsl/keepwordstokenfilter.go new file mode 100644 index 0000000000..c2162d8e8b --- /dev/null +++ b/typedapi/esdsl/keepwordstokenfilter.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _keepWordsTokenFilter struct { + v *types.KeepWordsTokenFilter +} + +func NewKeepWordsTokenFilter() *_keepWordsTokenFilter { + + return &_keepWordsTokenFilter{v: types.NewKeepWordsTokenFilter()} + +} + +func (s *_keepWordsTokenFilter) KeepWords(keepwords ...string) *_keepWordsTokenFilter { + + for _, v := range keepwords { + + s.v.KeepWords = append(s.v.KeepWords, v) + + } + return s +} + +func (s *_keepWordsTokenFilter) KeepWordsCase(keepwordscase bool) *_keepWordsTokenFilter { + + s.v.KeepWordsCase = &keepwordscase + + return s +} + +func (s *_keepWordsTokenFilter) KeepWordsPath(keepwordspath string) *_keepWordsTokenFilter { + + s.v.KeepWordsPath = &keepwordspath + + return s +} + +func (s *_keepWordsTokenFilter) Version(versionstring string) *_keepWordsTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_keepWordsTokenFilter) KeepWordsTokenFilterCaster() *types.KeepWordsTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/keyvalueprocessor.go b/typedapi/esdsl/keyvalueprocessor.go new file mode 100644 index 0000000000..a08daf9544 --- /dev/null +++ b/typedapi/esdsl/keyvalueprocessor.go @@ -0,0 +1,199 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _keyValueProcessor struct { + v *types.KeyValueProcessor +} + +// This processor helps automatically parse messages (or specific event fields) +// which are of the `foo=bar` variety. +func NewKeyValueProcessor(fieldsplit string, valuesplit string) *_keyValueProcessor { + + tmp := &_keyValueProcessor{v: types.NewKeyValueProcessor()} + + tmp.FieldSplit(fieldsplit) + + tmp.ValueSplit(valuesplit) + + return tmp + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_keyValueProcessor) Description(description string) *_keyValueProcessor { + + s.v.Description = &description + + return s +} + +// List of keys to exclude from document. +func (s *_keyValueProcessor) ExcludeKeys(excludekeys ...string) *_keyValueProcessor { + + for _, v := range excludekeys { + + s.v.ExcludeKeys = append(s.v.ExcludeKeys, v) + + } + return s +} + +// The field to be parsed. +// Supports template snippets. +func (s *_keyValueProcessor) Field(field string) *_keyValueProcessor { + + s.v.Field = field + + return s +} + +// Regex pattern to use for splitting key-value pairs. +func (s *_keyValueProcessor) FieldSplit(fieldsplit string) *_keyValueProcessor { + + s.v.FieldSplit = fieldsplit + + return s +} + +// Conditionally execute the processor. +func (s *_keyValueProcessor) If(if_ types.ScriptVariant) *_keyValueProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_keyValueProcessor) IgnoreFailure(ignorefailure bool) *_keyValueProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist or is `null`, the processor quietly +// exits without modifying the document. +func (s *_keyValueProcessor) IgnoreMissing(ignoremissing bool) *_keyValueProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// List of keys to filter and insert into document. +// Defaults to including all keys. +func (s *_keyValueProcessor) IncludeKeys(includekeys ...string) *_keyValueProcessor { + + for _, v := range includekeys { + + s.v.IncludeKeys = append(s.v.IncludeKeys, v) + + } + return s +} + +// Handle failures for the processor. +func (s *_keyValueProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_keyValueProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Prefix to be added to extracted keys. +func (s *_keyValueProcessor) Prefix(prefix string) *_keyValueProcessor { + + s.v.Prefix = &prefix + + return s +} + +// If `true`. strip brackets `()`, `<>`, `[]` as well as quotes `'` and `"` from +// extracted values. +func (s *_keyValueProcessor) StripBrackets(stripbrackets bool) *_keyValueProcessor { + + s.v.StripBrackets = &stripbrackets + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_keyValueProcessor) Tag(tag string) *_keyValueProcessor { + + s.v.Tag = &tag + + return s +} + +// The field to insert the extracted keys into. +// Defaults to the root of the document. +// Supports template snippets. +func (s *_keyValueProcessor) TargetField(field string) *_keyValueProcessor { + + s.v.TargetField = &field + + return s +} + +// String of characters to trim from extracted keys. +func (s *_keyValueProcessor) TrimKey(trimkey string) *_keyValueProcessor { + + s.v.TrimKey = &trimkey + + return s +} + +// String of characters to trim from extracted values. +func (s *_keyValueProcessor) TrimValue(trimvalue string) *_keyValueProcessor { + + s.v.TrimValue = &trimvalue + + return s +} + +// Regex pattern to use for splitting the key from the value within a key-value +// pair. +func (s *_keyValueProcessor) ValueSplit(valuesplit string) *_keyValueProcessor { + + s.v.ValueSplit = valuesplit + + return s +} + +func (s *_keyValueProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Kv = s.v + + return container +} + +func (s *_keyValueProcessor) KeyValueProcessorCaster() *types.KeyValueProcessor { + return s.v +} diff --git a/typedapi/esdsl/keywordanalyzer.go b/typedapi/esdsl/keywordanalyzer.go new file mode 100644 index 0000000000..3f31c0ea1c --- /dev/null +++ b/typedapi/esdsl/keywordanalyzer.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _keywordAnalyzer struct { + v *types.KeywordAnalyzer +} + +func NewKeywordAnalyzer() *_keywordAnalyzer { + + return &_keywordAnalyzer{v: types.NewKeywordAnalyzer()} + +} + +func (s *_keywordAnalyzer) Version(versionstring string) *_keywordAnalyzer { + + s.v.Version = &versionstring + + return s +} + +func (s *_keywordAnalyzer) KeywordAnalyzerCaster() *types.KeywordAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/keywordmarkertokenfilter.go b/typedapi/esdsl/keywordmarkertokenfilter.go new file mode 100644 index 0000000000..d30eb7cf6a --- /dev/null +++ b/typedapi/esdsl/keywordmarkertokenfilter.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _keywordMarkerTokenFilter struct { + v *types.KeywordMarkerTokenFilter +} + +func NewKeywordMarkerTokenFilter() *_keywordMarkerTokenFilter { + + return &_keywordMarkerTokenFilter{v: types.NewKeywordMarkerTokenFilter()} + +} + +func (s *_keywordMarkerTokenFilter) IgnoreCase(ignorecase bool) *_keywordMarkerTokenFilter { + + s.v.IgnoreCase = &ignorecase + + return s +} + +func (s *_keywordMarkerTokenFilter) Keywords(keywords ...string) *_keywordMarkerTokenFilter { + + s.v.Keywords = make([]string, len(keywords)) + s.v.Keywords = keywords + + return s +} + +func (s *_keywordMarkerTokenFilter) KeywordsPath(keywordspath string) *_keywordMarkerTokenFilter { + + s.v.KeywordsPath = &keywordspath + + return s +} + +func (s *_keywordMarkerTokenFilter) KeywordsPattern(keywordspattern string) *_keywordMarkerTokenFilter { + + s.v.KeywordsPattern = &keywordspattern + + return s +} + +func (s *_keywordMarkerTokenFilter) Version(versionstring string) *_keywordMarkerTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_keywordMarkerTokenFilter) KeywordMarkerTokenFilterCaster() *types.KeywordMarkerTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/keywordproperty.go b/typedapi/esdsl/keywordproperty.go new file mode 100644 index 0000000000..8bbde9c240 --- /dev/null +++ b/typedapi/esdsl/keywordproperty.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _keywordProperty struct { + v *types.KeywordProperty +} + +func NewKeywordProperty() *_keywordProperty { + + return &_keywordProperty{v: types.NewKeywordProperty()} + +} + +func (s *_keywordProperty) Boost(boost types.Float64) *_keywordProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_keywordProperty) CopyTo(fields ...string) *_keywordProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_keywordProperty) DocValues(docvalues bool) *_keywordProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_keywordProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_keywordProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_keywordProperty) EagerGlobalOrdinals(eagerglobalordinals bool) *_keywordProperty { + + s.v.EagerGlobalOrdinals = &eagerglobalordinals + + return s +} + +func (s *_keywordProperty) Fields(fields map[string]types.Property) *_keywordProperty { + + s.v.Fields = fields + return s +} + +func (s *_keywordProperty) AddField(key string, value types.PropertyVariant) *_keywordProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_keywordProperty) IgnoreAbove(ignoreabove int) *_keywordProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_keywordProperty) Index(index bool) *_keywordProperty { + + s.v.Index = &index + + return s +} + +func (s *_keywordProperty) IndexOptions(indexoptions indexoptions.IndexOptions) *_keywordProperty { + + s.v.IndexOptions = &indexoptions + return s +} + +// Metadata about the field. +func (s *_keywordProperty) Meta(meta map[string]string) *_keywordProperty { + + s.v.Meta = meta + return s +} + +func (s *_keywordProperty) AddMeta(key string, value string) *_keywordProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_keywordProperty) Normalizer(normalizer string) *_keywordProperty { + + s.v.Normalizer = &normalizer + + return s +} + +func (s *_keywordProperty) Norms(norms bool) *_keywordProperty { + + s.v.Norms = &norms + + return s +} + +func (s *_keywordProperty) NullValue(nullvalue string) *_keywordProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_keywordProperty) OnScriptError(onscripterror onscripterror.OnScriptError) *_keywordProperty { + + s.v.OnScriptError = &onscripterror + return s +} + +func (s *_keywordProperty) Properties(properties map[string]types.Property) *_keywordProperty { + + s.v.Properties = properties + return s +} + +func (s *_keywordProperty) AddProperty(key string, value types.PropertyVariant) *_keywordProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_keywordProperty) Script(script types.ScriptVariant) *_keywordProperty { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_keywordProperty) Similarity(similarity string) *_keywordProperty { + + s.v.Similarity = &similarity + + return s +} + +func (s *_keywordProperty) SplitQueriesOnWhitespace(splitqueriesonwhitespace bool) *_keywordProperty { + + s.v.SplitQueriesOnWhitespace = &splitqueriesonwhitespace + + return s +} + +func (s *_keywordProperty) Store(store bool) *_keywordProperty { + + s.v.Store = &store + + return s +} + +func (s *_keywordProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_keywordProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_keywordProperty) TimeSeriesDimension(timeseriesdimension bool) *_keywordProperty { + + s.v.TimeSeriesDimension = ×eriesdimension + + return s +} + +func (s *_keywordProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_keywordProperty) KeywordPropertyCaster() *types.KeywordProperty { + return s.v +} diff --git a/typedapi/esdsl/keywordtokenizer.go b/typedapi/esdsl/keywordtokenizer.go new file mode 100644 index 0000000000..c9b0ee9466 --- /dev/null +++ b/typedapi/esdsl/keywordtokenizer.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _keywordTokenizer struct { + v *types.KeywordTokenizer +} + +func NewKeywordTokenizer() *_keywordTokenizer { + + return &_keywordTokenizer{v: types.NewKeywordTokenizer()} + +} + +func (s *_keywordTokenizer) BufferSize(buffersize int) *_keywordTokenizer { + + s.v.BufferSize = &buffersize + + return s +} + +func (s *_keywordTokenizer) Version(versionstring string) *_keywordTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_keywordTokenizer) KeywordTokenizerCaster() *types.KeywordTokenizer { + return s.v +} diff --git a/typedapi/esdsl/knnquery.go b/typedapi/esdsl/knnquery.go new file mode 100644 index 0000000000..6f0c3cf5a2 --- /dev/null +++ b/typedapi/esdsl/knnquery.go @@ -0,0 +1,135 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _knnQuery struct { + v *types.KnnQuery +} + +// Finds the k nearest vectors to a query vector, as measured by a similarity +// metric. knn query finds nearest vectors through approximate search on indexed +// dense_vectors. +func NewKnnQuery() *_knnQuery { + + return &_knnQuery{v: types.NewKnnQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_knnQuery) Boost(boost float32) *_knnQuery { + + s.v.Boost = &boost + + return s +} + +// The name of the vector field to search against +func (s *_knnQuery) Field(field string) *_knnQuery { + + s.v.Field = field + + return s +} + +// Filters for the kNN search query +func (s *_knnQuery) Filter(filters ...types.QueryVariant) *_knnQuery { + + s.v.Filter = make([]types.Query, len(filters)) + for i, v := range filters { + s.v.Filter[i] = *v.QueryCaster() + } + + return s +} + +// The final number of nearest neighbors to return as top hits +func (s *_knnQuery) K(k int) *_knnQuery { + + s.v.K = &k + + return s +} + +// The number of nearest neighbor candidates to consider per shard +func (s *_knnQuery) NumCandidates(numcandidates int) *_knnQuery { + + s.v.NumCandidates = &numcandidates + + return s +} + +func (s *_knnQuery) QueryName_(queryname_ string) *_knnQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// The query vector +func (s *_knnQuery) QueryVector(queryvectors ...float32) *_knnQuery { + + s.v.QueryVector = queryvectors + + return s +} + +// The query vector builder. You must provide a query_vector_builder or +// query_vector, but not both. +func (s *_knnQuery) QueryVectorBuilder(queryvectorbuilder types.QueryVectorBuilderVariant) *_knnQuery { + + s.v.QueryVectorBuilder = queryvectorbuilder.QueryVectorBuilderCaster() + + return s +} + +// Apply oversampling and rescoring to quantized vectors * +func (s *_knnQuery) RescoreVector(rescorevector types.RescoreVectorVariant) *_knnQuery { + + s.v.RescoreVector = rescorevector.RescoreVectorCaster() + + return s +} + +// The minimum similarity for a vector to be considered a match +func (s *_knnQuery) Similarity(similarity float32) *_knnQuery { + + s.v.Similarity = &similarity + + return s +} + +func (s *_knnQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.Knn = s.v + + return container +} + +func (s *_knnQuery) KnnQueryCaster() *types.KnnQuery { + return s.v +} diff --git a/typedapi/esdsl/knnretriever.go b/typedapi/esdsl/knnretriever.go new file mode 100644 index 0000000000..e951d947b8 --- /dev/null +++ b/typedapi/esdsl/knnretriever.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _knnRetriever struct { + v *types.KnnRetriever +} + +// A retriever that replaces the functionality of a knn search. +func NewKnnRetriever(field string, k int, numcandidates int) *_knnRetriever { + + tmp := &_knnRetriever{v: types.NewKnnRetriever()} + + tmp.Field(field) + + tmp.K(k) + + tmp.NumCandidates(numcandidates) + + return tmp + +} + +// The name of the vector field to search against. +func (s *_knnRetriever) Field(field string) *_knnRetriever { + + s.v.Field = field + + return s +} + +// Query to filter the documents that can match. +func (s *_knnRetriever) Filter(filters ...types.QueryVariant) *_knnRetriever { + + s.v.Filter = make([]types.Query, len(filters)) + for i, v := range filters { + s.v.Filter[i] = *v.QueryCaster() + } + + return s +} + +// Number of nearest neighbors to return as top hits. +func (s *_knnRetriever) K(k int) *_knnRetriever { + + s.v.K = k + + return s +} + +// Minimum _score for matching documents. Documents with a lower _score are not +// included in the top documents. +func (s *_knnRetriever) MinScore(minscore float32) *_knnRetriever { + + s.v.MinScore = &minscore + + return s +} + +// Number of nearest neighbor candidates to consider per shard. +func (s *_knnRetriever) NumCandidates(numcandidates int) *_knnRetriever { + + s.v.NumCandidates = numcandidates + + return s +} + +// Query vector. Must have the same number of dimensions as the vector field you +// are searching against. You must provide a query_vector_builder or +// query_vector, but not both. +func (s *_knnRetriever) QueryVector(queryvectors ...float32) *_knnRetriever { + + s.v.QueryVector = queryvectors + + return s +} + +// Defines a model to build a query vector. +func (s *_knnRetriever) QueryVectorBuilder(queryvectorbuilder types.QueryVectorBuilderVariant) *_knnRetriever { + + s.v.QueryVectorBuilder = queryvectorbuilder.QueryVectorBuilderCaster() + + return s +} + +// Apply oversampling and rescoring to quantized vectors * +func (s *_knnRetriever) RescoreVector(rescorevector types.RescoreVectorVariant) *_knnRetriever { + + s.v.RescoreVector = rescorevector.RescoreVectorCaster() + + return s +} + +// The minimum similarity required for a document to be considered a match. +func (s *_knnRetriever) Similarity(similarity float32) *_knnRetriever { + + s.v.Similarity = &similarity + + return s +} + +func (s *_knnRetriever) RetrieverContainerCaster() *types.RetrieverContainer { + container := types.NewRetrieverContainer() + + container.Knn = s.v + + return container +} + +func (s *_knnRetriever) KnnRetrieverCaster() *types.KnnRetriever { + return s.v +} diff --git a/typedapi/esdsl/knnsearch.go b/typedapi/esdsl/knnsearch.go new file mode 100644 index 0000000000..81cc51c1d7 --- /dev/null +++ b/typedapi/esdsl/knnsearch.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _knnSearch struct { + v *types.KnnSearch +} + +func NewKnnSearch() *_knnSearch { + + return &_knnSearch{v: types.NewKnnSearch()} + +} + +// Boost value to apply to kNN scores +func (s *_knnSearch) Boost(boost float32) *_knnSearch { + + s.v.Boost = &boost + + return s +} + +// The name of the vector field to search against +func (s *_knnSearch) Field(field string) *_knnSearch { + + s.v.Field = field + + return s +} + +// Filters for the kNN search query +func (s *_knnSearch) Filter(filters ...types.QueryVariant) *_knnSearch { + + s.v.Filter = make([]types.Query, len(filters)) + for i, v := range filters { + s.v.Filter[i] = *v.QueryCaster() + } + + return s +} + +// If defined, each search hit will contain inner hits. +func (s *_knnSearch) InnerHits(innerhits types.InnerHitsVariant) *_knnSearch { + + s.v.InnerHits = innerhits.InnerHitsCaster() + + return s +} + +// The final number of nearest neighbors to return as top hits +func (s *_knnSearch) K(k int) *_knnSearch { + + s.v.K = &k + + return s +} + +// The number of nearest neighbor candidates to consider per shard +func (s *_knnSearch) NumCandidates(numcandidates int) *_knnSearch { + + s.v.NumCandidates = &numcandidates + + return s +} + +// The query vector +func (s *_knnSearch) QueryVector(queryvectors ...float32) *_knnSearch { + + s.v.QueryVector = queryvectors + + return s +} + +// The query vector builder. You must provide a query_vector_builder or +// query_vector, but not both. +func (s *_knnSearch) QueryVectorBuilder(queryvectorbuilder types.QueryVectorBuilderVariant) *_knnSearch { + + s.v.QueryVectorBuilder = queryvectorbuilder.QueryVectorBuilderCaster() + + return s +} + +// Apply oversampling and rescoring to quantized vectors * +func (s *_knnSearch) RescoreVector(rescorevector types.RescoreVectorVariant) *_knnSearch { + + s.v.RescoreVector = rescorevector.RescoreVectorCaster() + + return s +} + +// The minimum similarity for a vector to be considered a match +func (s *_knnSearch) Similarity(similarity float32) *_knnSearch { + + s.v.Similarity = &similarity + + return s +} + +func (s *_knnSearch) KnnSearchCaster() *types.KnnSearch { + return s.v +} diff --git a/typedapi/esdsl/kstemtokenfilter.go b/typedapi/esdsl/kstemtokenfilter.go new file mode 100644 index 0000000000..375d65949d --- /dev/null +++ b/typedapi/esdsl/kstemtokenfilter.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _kStemTokenFilter struct { + v *types.KStemTokenFilter +} + +func NewKStemTokenFilter() *_kStemTokenFilter { + + return &_kStemTokenFilter{v: types.NewKStemTokenFilter()} + +} + +func (s *_kStemTokenFilter) Version(versionstring string) *_kStemTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_kStemTokenFilter) KStemTokenFilterCaster() *types.KStemTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/kuromojianalyzer.go b/typedapi/esdsl/kuromojianalyzer.go new file mode 100644 index 0000000000..49d3d73253 --- /dev/null +++ b/typedapi/esdsl/kuromojianalyzer.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/kuromojitokenizationmode" +) + +type _kuromojiAnalyzer struct { + v *types.KuromojiAnalyzer +} + +func NewKuromojiAnalyzer(mode kuromojitokenizationmode.KuromojiTokenizationMode) *_kuromojiAnalyzer { + + tmp := &_kuromojiAnalyzer{v: types.NewKuromojiAnalyzer()} + + tmp.Mode(mode) + + return tmp + +} + +func (s *_kuromojiAnalyzer) Mode(mode kuromojitokenizationmode.KuromojiTokenizationMode) *_kuromojiAnalyzer { + + s.v.Mode = mode + return s +} + +func (s *_kuromojiAnalyzer) UserDictionary(userdictionary string) *_kuromojiAnalyzer { + + s.v.UserDictionary = &userdictionary + + return s +} + +func (s *_kuromojiAnalyzer) KuromojiAnalyzerCaster() *types.KuromojiAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/kuromojiiterationmarkcharfilter.go b/typedapi/esdsl/kuromojiiterationmarkcharfilter.go new file mode 100644 index 0000000000..75921f2ca4 --- /dev/null +++ b/typedapi/esdsl/kuromojiiterationmarkcharfilter.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _kuromojiIterationMarkCharFilter struct { + v *types.KuromojiIterationMarkCharFilter +} + +func NewKuromojiIterationMarkCharFilter(normalizekana bool, normalizekanji bool) *_kuromojiIterationMarkCharFilter { + + tmp := &_kuromojiIterationMarkCharFilter{v: types.NewKuromojiIterationMarkCharFilter()} + + tmp.NormalizeKana(normalizekana) + + tmp.NormalizeKanji(normalizekanji) + + return tmp + +} + +func (s *_kuromojiIterationMarkCharFilter) NormalizeKana(normalizekana bool) *_kuromojiIterationMarkCharFilter { + + s.v.NormalizeKana = normalizekana + + return s +} + +func (s *_kuromojiIterationMarkCharFilter) NormalizeKanji(normalizekanji bool) *_kuromojiIterationMarkCharFilter { + + s.v.NormalizeKanji = normalizekanji + + return s +} + +func (s *_kuromojiIterationMarkCharFilter) Version(versionstring string) *_kuromojiIterationMarkCharFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_kuromojiIterationMarkCharFilter) KuromojiIterationMarkCharFilterCaster() *types.KuromojiIterationMarkCharFilter { + return s.v +} diff --git a/typedapi/esdsl/kuromojipartofspeechtokenfilter.go b/typedapi/esdsl/kuromojipartofspeechtokenfilter.go new file mode 100644 index 0000000000..dc51885318 --- /dev/null +++ b/typedapi/esdsl/kuromojipartofspeechtokenfilter.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _kuromojiPartOfSpeechTokenFilter struct { + v *types.KuromojiPartOfSpeechTokenFilter +} + +func NewKuromojiPartOfSpeechTokenFilter() *_kuromojiPartOfSpeechTokenFilter { + + return &_kuromojiPartOfSpeechTokenFilter{v: types.NewKuromojiPartOfSpeechTokenFilter()} + +} + +func (s *_kuromojiPartOfSpeechTokenFilter) Stoptags(stoptags ...string) *_kuromojiPartOfSpeechTokenFilter { + + for _, v := range stoptags { + + s.v.Stoptags = append(s.v.Stoptags, v) + + } + return s +} + +func (s *_kuromojiPartOfSpeechTokenFilter) Version(versionstring string) *_kuromojiPartOfSpeechTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_kuromojiPartOfSpeechTokenFilter) KuromojiPartOfSpeechTokenFilterCaster() *types.KuromojiPartOfSpeechTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/kuromojireadingformtokenfilter.go b/typedapi/esdsl/kuromojireadingformtokenfilter.go new file mode 100644 index 0000000000..bd8b670461 --- /dev/null +++ b/typedapi/esdsl/kuromojireadingformtokenfilter.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _kuromojiReadingFormTokenFilter struct { + v *types.KuromojiReadingFormTokenFilter +} + +func NewKuromojiReadingFormTokenFilter(useromaji bool) *_kuromojiReadingFormTokenFilter { + + tmp := &_kuromojiReadingFormTokenFilter{v: types.NewKuromojiReadingFormTokenFilter()} + + tmp.UseRomaji(useromaji) + + return tmp + +} + +func (s *_kuromojiReadingFormTokenFilter) UseRomaji(useromaji bool) *_kuromojiReadingFormTokenFilter { + + s.v.UseRomaji = useromaji + + return s +} + +func (s *_kuromojiReadingFormTokenFilter) Version(versionstring string) *_kuromojiReadingFormTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_kuromojiReadingFormTokenFilter) KuromojiReadingFormTokenFilterCaster() *types.KuromojiReadingFormTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/kuromojistemmertokenfilter.go b/typedapi/esdsl/kuromojistemmertokenfilter.go new file mode 100644 index 0000000000..e127c8ab23 --- /dev/null +++ b/typedapi/esdsl/kuromojistemmertokenfilter.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _kuromojiStemmerTokenFilter struct { + v *types.KuromojiStemmerTokenFilter +} + +func NewKuromojiStemmerTokenFilter(minimumlength int) *_kuromojiStemmerTokenFilter { + + tmp := &_kuromojiStemmerTokenFilter{v: types.NewKuromojiStemmerTokenFilter()} + + tmp.MinimumLength(minimumlength) + + return tmp + +} + +func (s *_kuromojiStemmerTokenFilter) MinimumLength(minimumlength int) *_kuromojiStemmerTokenFilter { + + s.v.MinimumLength = minimumlength + + return s +} + +func (s *_kuromojiStemmerTokenFilter) Version(versionstring string) *_kuromojiStemmerTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_kuromojiStemmerTokenFilter) KuromojiStemmerTokenFilterCaster() *types.KuromojiStemmerTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/kuromojitokenizer.go b/typedapi/esdsl/kuromojitokenizer.go new file mode 100644 index 0000000000..9b49ba74b4 --- /dev/null +++ b/typedapi/esdsl/kuromojitokenizer.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/kuromojitokenizationmode" +) + +type _kuromojiTokenizer struct { + v *types.KuromojiTokenizer +} + +func NewKuromojiTokenizer(mode kuromojitokenizationmode.KuromojiTokenizationMode) *_kuromojiTokenizer { + + tmp := &_kuromojiTokenizer{v: types.NewKuromojiTokenizer()} + + tmp.Mode(mode) + + return tmp + +} + +func (s *_kuromojiTokenizer) DiscardCompoundToken(discardcompoundtoken bool) *_kuromojiTokenizer { + + s.v.DiscardCompoundToken = &discardcompoundtoken + + return s +} + +func (s *_kuromojiTokenizer) DiscardPunctuation(discardpunctuation bool) *_kuromojiTokenizer { + + s.v.DiscardPunctuation = &discardpunctuation + + return s +} + +func (s *_kuromojiTokenizer) Mode(mode kuromojitokenizationmode.KuromojiTokenizationMode) *_kuromojiTokenizer { + + s.v.Mode = mode + return s +} + +func (s *_kuromojiTokenizer) NbestCost(nbestcost int) *_kuromojiTokenizer { + + s.v.NbestCost = &nbestcost + + return s +} + +func (s *_kuromojiTokenizer) NbestExamples(nbestexamples string) *_kuromojiTokenizer { + + s.v.NbestExamples = &nbestexamples + + return s +} + +func (s *_kuromojiTokenizer) UserDictionary(userdictionary string) *_kuromojiTokenizer { + + s.v.UserDictionary = &userdictionary + + return s +} + +func (s *_kuromojiTokenizer) UserDictionaryRules(userdictionaryrules ...string) *_kuromojiTokenizer { + + for _, v := range userdictionaryrules { + + s.v.UserDictionaryRules = append(s.v.UserDictionaryRules, v) + + } + return s +} + +func (s *_kuromojiTokenizer) Version(versionstring string) *_kuromojiTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_kuromojiTokenizer) KuromojiTokenizerCaster() *types.KuromojiTokenizer { + return s.v +} diff --git a/typedapi/esdsl/laplacesmoothingmodel.go b/typedapi/esdsl/laplacesmoothingmodel.go new file mode 100644 index 0000000000..aa519660f5 --- /dev/null +++ b/typedapi/esdsl/laplacesmoothingmodel.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _laplaceSmoothingModel struct { + v *types.LaplaceSmoothingModel +} + +// A smoothing model that uses an additive smoothing where a constant (typically +// `1.0` or smaller) is added to all counts to balance weights. +func NewLaplaceSmoothingModel(alpha types.Float64) *_laplaceSmoothingModel { + + tmp := &_laplaceSmoothingModel{v: types.NewLaplaceSmoothingModel()} + + tmp.Alpha(alpha) + + return tmp + +} + +// A constant that is added to all counts to balance weights. +func (s *_laplaceSmoothingModel) Alpha(alpha types.Float64) *_laplaceSmoothingModel { + + s.v.Alpha = alpha + + return s +} + +func (s *_laplaceSmoothingModel) SmoothingModelContainerCaster() *types.SmoothingModelContainer { + container := types.NewSmoothingModelContainer() + + container.Laplace = s.v + + return container +} + +func (s *_laplaceSmoothingModel) LaplaceSmoothingModelCaster() *types.LaplaceSmoothingModel { + return s.v +} diff --git a/typedapi/esdsl/latest.go b/typedapi/esdsl/latest.go new file mode 100644 index 0000000000..48414eebb4 --- /dev/null +++ b/typedapi/esdsl/latest.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _latest struct { + v *types.Latest +} + +func NewLatest() *_latest { + + return &_latest{v: types.NewLatest()} + +} + +// Specifies the date field that is used to identify the latest documents. +func (s *_latest) Sort(field string) *_latest { + + s.v.Sort = field + + return s +} + +// Specifies an array of one or more fields that are used to group the data. +func (s *_latest) UniqueKey(uniquekeys ...string) *_latest { + + for _, v := range uniquekeys { + + s.v.UniqueKey = append(s.v.UniqueKey, v) + + } + return s +} + +func (s *_latest) LatestCaster() *types.Latest { + return s.v +} diff --git a/typedapi/esdsl/latlongeolocation.go b/typedapi/esdsl/latlongeolocation.go new file mode 100644 index 0000000000..8c72164822 --- /dev/null +++ b/typedapi/esdsl/latlongeolocation.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _latLonGeoLocation struct { + v *types.LatLonGeoLocation +} + +func NewLatLonGeoLocation(lat types.Float64, lon types.Float64) *_latLonGeoLocation { + + tmp := &_latLonGeoLocation{v: types.NewLatLonGeoLocation()} + + tmp.Lat(lat) + + tmp.Lon(lon) + + return tmp + +} + +// Latitude +func (s *_latLonGeoLocation) Lat(lat types.Float64) *_latLonGeoLocation { + + s.v.Lat = lat + + return s +} + +// Longitude +func (s *_latLonGeoLocation) Lon(lon types.Float64) *_latLonGeoLocation { + + s.v.Lon = lon + + return s +} + +func (s *_latLonGeoLocation) LatLonGeoLocationCaster() *types.LatLonGeoLocation { + return s.v +} diff --git a/typedapi/esdsl/latviananalyzer.go b/typedapi/esdsl/latviananalyzer.go new file mode 100644 index 0000000000..69a1e15349 --- /dev/null +++ b/typedapi/esdsl/latviananalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _latvianAnalyzer struct { + v *types.LatvianAnalyzer +} + +func NewLatvianAnalyzer() *_latvianAnalyzer { + + return &_latvianAnalyzer{v: types.NewLatvianAnalyzer()} + +} + +func (s *_latvianAnalyzer) StemExclusion(stemexclusions ...string) *_latvianAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_latvianAnalyzer) Stopwords(stopwords ...string) *_latvianAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_latvianAnalyzer) StopwordsPath(stopwordspath string) *_latvianAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_latvianAnalyzer) LatvianAnalyzerCaster() *types.LatvianAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/learningtorank.go b/typedapi/esdsl/learningtorank.go new file mode 100644 index 0000000000..b2ea33cea3 --- /dev/null +++ b/typedapi/esdsl/learningtorank.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _learningToRank struct { + v *types.LearningToRank +} + +func NewLearningToRank(modelid string) *_learningToRank { + + tmp := &_learningToRank{v: types.NewLearningToRank()} + + tmp.ModelId(modelid) + + return tmp + +} + +// The unique identifier of the trained model uploaded to Elasticsearch +func (s *_learningToRank) ModelId(modelid string) *_learningToRank { + + s.v.ModelId = modelid + + return s +} + +// Named parameters to be passed to the query templates used for feature +func (s *_learningToRank) Params(params map[string]json.RawMessage) *_learningToRank { + + s.v.Params = params + return s +} + +func (s *_learningToRank) AddParam(key string, value json.RawMessage) *_learningToRank { + + var tmp map[string]json.RawMessage + if s.v.Params == nil { + s.v.Params = make(map[string]json.RawMessage) + } else { + tmp = s.v.Params + } + + tmp[key] = value + + s.v.Params = tmp + return s +} + +func (s *_learningToRank) RescoreCaster() *types.Rescore { + container := types.NewRescore() + + container.LearningToRank = s.v + + return container +} + +func (s *_learningToRank) LearningToRankCaster() *types.LearningToRank { + return s.v +} diff --git a/typedapi/esdsl/lengthtokenfilter.go b/typedapi/esdsl/lengthtokenfilter.go new file mode 100644 index 0000000000..951f3fdff1 --- /dev/null +++ b/typedapi/esdsl/lengthtokenfilter.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _lengthTokenFilter struct { + v *types.LengthTokenFilter +} + +func NewLengthTokenFilter() *_lengthTokenFilter { + + return &_lengthTokenFilter{v: types.NewLengthTokenFilter()} + +} + +func (s *_lengthTokenFilter) Max(max int) *_lengthTokenFilter { + + s.v.Max = &max + + return s +} + +func (s *_lengthTokenFilter) Min(min int) *_lengthTokenFilter { + + s.v.Min = &min + + return s +} + +func (s *_lengthTokenFilter) Version(versionstring string) *_lengthTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_lengthTokenFilter) LengthTokenFilterCaster() *types.LengthTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/lessthanvalidation.go b/typedapi/esdsl/lessthanvalidation.go new file mode 100644 index 0000000000..e226135cd1 --- /dev/null +++ b/typedapi/esdsl/lessthanvalidation.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _lessThanValidation struct { + v *types.LessThanValidation +} + +func NewLessThanValidation(constraint types.Float64) *_lessThanValidation { + + tmp := &_lessThanValidation{v: types.NewLessThanValidation()} + + tmp.Constraint(constraint) + + return tmp + +} + +func (s *_lessThanValidation) Constraint(constraint types.Float64) *_lessThanValidation { + + s.v.Constraint = constraint + + return s +} + +func (s *_lessThanValidation) LessThanValidationCaster() *types.LessThanValidation { + return s.v +} diff --git a/typedapi/esdsl/lettertokenizer.go b/typedapi/esdsl/lettertokenizer.go new file mode 100644 index 0000000000..a323173c19 --- /dev/null +++ b/typedapi/esdsl/lettertokenizer.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _letterTokenizer struct { + v *types.LetterTokenizer +} + +func NewLetterTokenizer() *_letterTokenizer { + + return &_letterTokenizer{v: types.NewLetterTokenizer()} + +} + +func (s *_letterTokenizer) Version(versionstring string) *_letterTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_letterTokenizer) LetterTokenizerCaster() *types.LetterTokenizer { + return s.v +} diff --git a/typedapi/esdsl/license.go b/typedapi/esdsl/license.go new file mode 100644 index 0000000000..8a57db8799 --- /dev/null +++ b/typedapi/esdsl/license.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/licensetype" +) + +type _license struct { + v *types.License +} + +func NewLicense(issuedto string, issuer string, signature string, type_ licensetype.LicenseType, uid string) *_license { + + tmp := &_license{v: types.NewLicense()} + + tmp.IssuedTo(issuedto) + + tmp.Issuer(issuer) + + tmp.Signature(signature) + + tmp.Type(type_) + + tmp.Uid(uid) + + return tmp + +} + +func (s *_license) ExpiryDateInMillis(epochtimeunitmillis int64) *_license { + + s.v.ExpiryDateInMillis = epochtimeunitmillis + + return s +} + +func (s *_license) IssueDateInMillis(epochtimeunitmillis int64) *_license { + + s.v.IssueDateInMillis = epochtimeunitmillis + + return s +} + +func (s *_license) IssuedTo(issuedto string) *_license { + + s.v.IssuedTo = issuedto + + return s +} + +func (s *_license) Issuer(issuer string) *_license { + + s.v.Issuer = issuer + + return s +} + +func (s *_license) MaxNodes(maxnodes int64) *_license { + + s.v.MaxNodes = &maxnodes + + return s +} + +func (s *_license) MaxResourceUnits(maxresourceunits int64) *_license { + + s.v.MaxResourceUnits = &maxresourceunits + + return s +} + +func (s *_license) Signature(signature string) *_license { + + s.v.Signature = signature + + return s +} + +func (s *_license) StartDateInMillis(epochtimeunitmillis int64) *_license { + + s.v.StartDateInMillis = &epochtimeunitmillis + + return s +} + +func (s *_license) Type(type_ licensetype.LicenseType) *_license { + + s.v.Type = type_ + return s +} + +func (s *_license) Uid(uid string) *_license { + + s.v.Uid = uid + + return s +} + +func (s *_license) LicenseCaster() *types.License { + return s.v +} diff --git a/typedapi/esdsl/like.go b/typedapi/esdsl/like.go new file mode 100644 index 0000000000..4b1e7314a6 --- /dev/null +++ b/typedapi/esdsl/like.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _like struct { + v types.Like +} + +func NewLike() *_like { + return &_like{v: nil} +} + +func (u *_like) String(string string) *_like { + + u.v = &string + + return u +} + +func (u *_like) LikeDocument(likedocument types.LikeDocumentVariant) *_like { + + u.v = &likedocument + + return u +} + +// Interface implementation for LikeDocument in Like union +func (u *_likeDocument) LikeCaster() *types.Like { + t := types.Like(u.v) + return &t +} + +func (u *_like) LikeCaster() *types.Like { + return &u.v +} diff --git a/typedapi/esdsl/likedocument.go b/typedapi/esdsl/likedocument.go new file mode 100644 index 0000000000..ea62af5003 --- /dev/null +++ b/typedapi/esdsl/likedocument.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" +) + +type _likeDocument struct { + v *types.LikeDocument +} + +func NewLikeDocument() *_likeDocument { + + return &_likeDocument{v: types.NewLikeDocument()} + +} + +// A document not present in the index. +func (s *_likeDocument) Doc(doc json.RawMessage) *_likeDocument { + + s.v.Doc = doc + + return s +} + +func (s *_likeDocument) Fields(fields ...string) *_likeDocument { + + for _, v := range fields { + + s.v.Fields = append(s.v.Fields, v) + + } + return s +} + +// ID of a document. +func (s *_likeDocument) Id_(id string) *_likeDocument { + + s.v.Id_ = &id + + return s +} + +// Index of a document. +func (s *_likeDocument) Index_(indexname string) *_likeDocument { + + s.v.Index_ = &indexname + + return s +} + +// Overrides the default analyzer. +func (s *_likeDocument) PerFieldAnalyzer(perfieldanalyzer map[string]string) *_likeDocument { + + s.v.PerFieldAnalyzer = perfieldanalyzer + return s +} + +func (s *_likeDocument) AddPerFieldAnalyzer(key string, value string) *_likeDocument { + + var tmp map[string]string + if s.v.PerFieldAnalyzer == nil { + s.v.PerFieldAnalyzer = make(map[string]string) + } else { + tmp = s.v.PerFieldAnalyzer + } + + tmp[key] = value + + s.v.PerFieldAnalyzer = tmp + return s +} + +func (s *_likeDocument) Routing(routing string) *_likeDocument { + + s.v.Routing = &routing + + return s +} + +func (s *_likeDocument) Version(versionnumber int64) *_likeDocument { + + s.v.Version = &versionnumber + + return s +} + +func (s *_likeDocument) VersionType(versiontype versiontype.VersionType) *_likeDocument { + + s.v.VersionType = &versiontype + return s +} + +func (s *_likeDocument) LikeDocumentCaster() *types.LikeDocument { + return s.v +} diff --git a/typedapi/esdsl/limittokencounttokenfilter.go b/typedapi/esdsl/limittokencounttokenfilter.go new file mode 100644 index 0000000000..96f4d7f490 --- /dev/null +++ b/typedapi/esdsl/limittokencounttokenfilter.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _limitTokenCountTokenFilter struct { + v *types.LimitTokenCountTokenFilter +} + +func NewLimitTokenCountTokenFilter() *_limitTokenCountTokenFilter { + + return &_limitTokenCountTokenFilter{v: types.NewLimitTokenCountTokenFilter()} + +} + +func (s *_limitTokenCountTokenFilter) ConsumeAllTokens(consumealltokens bool) *_limitTokenCountTokenFilter { + + s.v.ConsumeAllTokens = &consumealltokens + + return s +} + +func (s *_limitTokenCountTokenFilter) MaxTokenCount(stringifiedinteger types.StringifiedintegerVariant) *_limitTokenCountTokenFilter { + + s.v.MaxTokenCount = *stringifiedinteger.StringifiedintegerCaster() + + return s +} + +func (s *_limitTokenCountTokenFilter) Version(versionstring string) *_limitTokenCountTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_limitTokenCountTokenFilter) LimitTokenCountTokenFilterCaster() *types.LimitTokenCountTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/linearinterpolationsmoothingmodel.go b/typedapi/esdsl/linearinterpolationsmoothingmodel.go new file mode 100644 index 0000000000..6600dbeeb6 --- /dev/null +++ b/typedapi/esdsl/linearinterpolationsmoothingmodel.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _linearInterpolationSmoothingModel struct { + v *types.LinearInterpolationSmoothingModel +} + +// A smoothing model that takes the weighted mean of the unigrams, bigrams, and +// trigrams based on user supplied weights (lambdas). +func NewLinearInterpolationSmoothingModel(bigramlambda types.Float64, trigramlambda types.Float64, unigramlambda types.Float64) *_linearInterpolationSmoothingModel { + + tmp := &_linearInterpolationSmoothingModel{v: types.NewLinearInterpolationSmoothingModel()} + + tmp.BigramLambda(bigramlambda) + + tmp.TrigramLambda(trigramlambda) + + tmp.UnigramLambda(unigramlambda) + + return tmp + +} + +func (s *_linearInterpolationSmoothingModel) BigramLambda(bigramlambda types.Float64) *_linearInterpolationSmoothingModel { + + s.v.BigramLambda = bigramlambda + + return s +} + +func (s *_linearInterpolationSmoothingModel) TrigramLambda(trigramlambda types.Float64) *_linearInterpolationSmoothingModel { + + s.v.TrigramLambda = trigramlambda + + return s +} + +func (s *_linearInterpolationSmoothingModel) UnigramLambda(unigramlambda types.Float64) *_linearInterpolationSmoothingModel { + + s.v.UnigramLambda = unigramlambda + + return s +} + +func (s *_linearInterpolationSmoothingModel) SmoothingModelContainerCaster() *types.SmoothingModelContainer { + container := types.NewSmoothingModelContainer() + + container.LinearInterpolation = s.v + + return container +} + +func (s *_linearInterpolationSmoothingModel) LinearInterpolationSmoothingModelCaster() *types.LinearInterpolationSmoothingModel { + return s.v +} diff --git a/typedapi/esdsl/linearmovingaverageaggregation.go b/typedapi/esdsl/linearmovingaverageaggregation.go new file mode 100644 index 0000000000..c3dcd17b9e --- /dev/null +++ b/typedapi/esdsl/linearmovingaverageaggregation.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _linearMovingAverageAggregation struct { + v *types.LinearMovingAverageAggregation +} + +func NewLinearMovingAverageAggregation(settings types.EmptyObjectVariant) *_linearMovingAverageAggregation { + + tmp := &_linearMovingAverageAggregation{v: types.NewLinearMovingAverageAggregation()} + + tmp.Settings(settings) + + return tmp + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_linearMovingAverageAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_linearMovingAverageAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_linearMovingAverageAggregation) Format(format string) *_linearMovingAverageAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_linearMovingAverageAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_linearMovingAverageAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +func (s *_linearMovingAverageAggregation) Minimize(minimize bool) *_linearMovingAverageAggregation { + + s.v.Minimize = &minimize + + return s +} + +func (s *_linearMovingAverageAggregation) Predict(predict int) *_linearMovingAverageAggregation { + + s.v.Predict = &predict + + return s +} + +func (s *_linearMovingAverageAggregation) Settings(settings types.EmptyObjectVariant) *_linearMovingAverageAggregation { + + s.v.Settings = *settings.EmptyObjectCaster() + + return s +} + +func (s *_linearMovingAverageAggregation) Window(window int) *_linearMovingAverageAggregation { + + s.v.Window = &window + + return s +} + +func (s *_linearMovingAverageAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.MovingAvg = s.v + + return container +} + +func (s *_linearMovingAverageAggregation) LinearMovingAverageAggregationCaster() *types.LinearMovingAverageAggregation { + return s.v +} diff --git a/typedapi/esdsl/listtypevalidation.go b/typedapi/esdsl/listtypevalidation.go new file mode 100644 index 0000000000..bb254f0e67 --- /dev/null +++ b/typedapi/esdsl/listtypevalidation.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _listTypeValidation struct { + v *types.ListTypeValidation +} + +func NewListTypeValidation(constraint string) *_listTypeValidation { + + tmp := &_listTypeValidation{v: types.NewListTypeValidation()} + + tmp.Constraint(constraint) + + return tmp + +} + +func (s *_listTypeValidation) Constraint(constraint string) *_listTypeValidation { + + s.v.Constraint = constraint + + return s +} + +func (s *_listTypeValidation) ListTypeValidationCaster() *types.ListTypeValidation { + return s.v +} diff --git a/typedapi/esdsl/lithuaniananalyzer.go b/typedapi/esdsl/lithuaniananalyzer.go new file mode 100644 index 0000000000..a34a1d8f43 --- /dev/null +++ b/typedapi/esdsl/lithuaniananalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _lithuanianAnalyzer struct { + v *types.LithuanianAnalyzer +} + +func NewLithuanianAnalyzer() *_lithuanianAnalyzer { + + return &_lithuanianAnalyzer{v: types.NewLithuanianAnalyzer()} + +} + +func (s *_lithuanianAnalyzer) StemExclusion(stemexclusions ...string) *_lithuanianAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_lithuanianAnalyzer) Stopwords(stopwords ...string) *_lithuanianAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_lithuanianAnalyzer) StopwordsPath(stopwordspath string) *_lithuanianAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_lithuanianAnalyzer) LithuanianAnalyzerCaster() *types.LithuanianAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/loggingaction.go b/typedapi/esdsl/loggingaction.go new file mode 100644 index 0000000000..ae90155c1c --- /dev/null +++ b/typedapi/esdsl/loggingaction.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _loggingAction struct { + v *types.LoggingAction +} + +func NewLoggingAction(text string) *_loggingAction { + + tmp := &_loggingAction{v: types.NewLoggingAction()} + + tmp.Text(text) + + return tmp + +} + +func (s *_loggingAction) Category(category string) *_loggingAction { + + s.v.Category = &category + + return s +} + +func (s *_loggingAction) Level(level string) *_loggingAction { + + s.v.Level = &level + + return s +} + +func (s *_loggingAction) Text(text string) *_loggingAction { + + s.v.Text = text + + return s +} + +func (s *_loggingAction) LoggingActionCaster() *types.LoggingAction { + return s.v +} diff --git a/typedapi/esdsl/logstashpipeline.go b/typedapi/esdsl/logstashpipeline.go new file mode 100644 index 0000000000..f4330650a0 --- /dev/null +++ b/typedapi/esdsl/logstashpipeline.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _logstashPipeline struct { + v *types.LogstashPipeline +} + +func NewLogstashPipeline(description string, pipeline string, pipelinemetadata types.PipelineMetadataVariant, pipelinesettings types.PipelineSettingsVariant, username string) *_logstashPipeline { + + tmp := &_logstashPipeline{v: types.NewLogstashPipeline()} + + tmp.Description(description) + + tmp.Pipeline(pipeline) + + tmp.PipelineMetadata(pipelinemetadata) + + tmp.PipelineSettings(pipelinesettings) + + tmp.Username(username) + + return tmp + +} + +// A description of the pipeline. +// This description is not used by Elasticsearch or Logstash. +func (s *_logstashPipeline) Description(description string) *_logstashPipeline { + + s.v.Description = description + + return s +} + +// The date the pipeline was last updated. +// It must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. +func (s *_logstashPipeline) LastModified(datetime types.DateTimeVariant) *_logstashPipeline { + + s.v.LastModified = *datetime.DateTimeCaster() + + return s +} + +// The configuration for the pipeline. +func (s *_logstashPipeline) Pipeline(pipeline string) *_logstashPipeline { + + s.v.Pipeline = pipeline + + return s +} + +// Optional metadata about the pipeline, which can have any contents. +// This metadata is not generated or used by Elasticsearch or Logstash. +func (s *_logstashPipeline) PipelineMetadata(pipelinemetadata types.PipelineMetadataVariant) *_logstashPipeline { + + s.v.PipelineMetadata = *pipelinemetadata.PipelineMetadataCaster() + + return s +} + +// Settings for the pipeline. +// It supports only flat keys in dot notation. +func (s *_logstashPipeline) PipelineSettings(pipelinesettings types.PipelineSettingsVariant) *_logstashPipeline { + + s.v.PipelineSettings = *pipelinesettings.PipelineSettingsCaster() + + return s +} + +// The user who last updated the pipeline. +func (s *_logstashPipeline) Username(username string) *_logstashPipeline { + + s.v.Username = username + + return s +} + +func (s *_logstashPipeline) LogstashPipelineCaster() *types.LogstashPipeline { + return s.v +} diff --git a/typedapi/esdsl/longnumberproperty.go b/typedapi/esdsl/longnumberproperty.go new file mode 100644 index 0000000000..4aba7df82f --- /dev/null +++ b/typedapi/esdsl/longnumberproperty.go @@ -0,0 +1,220 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" +) + +type _longNumberProperty struct { + v *types.LongNumberProperty +} + +func NewLongNumberProperty() *_longNumberProperty { + + return &_longNumberProperty{v: types.NewLongNumberProperty()} + +} + +func (s *_longNumberProperty) Boost(boost types.Float64) *_longNumberProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_longNumberProperty) Coerce(coerce bool) *_longNumberProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_longNumberProperty) CopyTo(fields ...string) *_longNumberProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_longNumberProperty) DocValues(docvalues bool) *_longNumberProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_longNumberProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_longNumberProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_longNumberProperty) Fields(fields map[string]types.Property) *_longNumberProperty { + + s.v.Fields = fields + return s +} + +func (s *_longNumberProperty) AddField(key string, value types.PropertyVariant) *_longNumberProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_longNumberProperty) IgnoreAbove(ignoreabove int) *_longNumberProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_longNumberProperty) IgnoreMalformed(ignoremalformed bool) *_longNumberProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_longNumberProperty) Index(index bool) *_longNumberProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_longNumberProperty) Meta(meta map[string]string) *_longNumberProperty { + + s.v.Meta = meta + return s +} + +func (s *_longNumberProperty) AddMeta(key string, value string) *_longNumberProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_longNumberProperty) NullValue(nullvalue int64) *_longNumberProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_longNumberProperty) OnScriptError(onscripterror onscripterror.OnScriptError) *_longNumberProperty { + + s.v.OnScriptError = &onscripterror + return s +} + +func (s *_longNumberProperty) Properties(properties map[string]types.Property) *_longNumberProperty { + + s.v.Properties = properties + return s +} + +func (s *_longNumberProperty) AddProperty(key string, value types.PropertyVariant) *_longNumberProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_longNumberProperty) Script(script types.ScriptVariant) *_longNumberProperty { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_longNumberProperty) Store(store bool) *_longNumberProperty { + + s.v.Store = &store + + return s +} + +func (s *_longNumberProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_longNumberProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_longNumberProperty) TimeSeriesDimension(timeseriesdimension bool) *_longNumberProperty { + + s.v.TimeSeriesDimension = ×eriesdimension + + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_longNumberProperty) TimeSeriesMetric(timeseriesmetric timeseriesmetrictype.TimeSeriesMetricType) *_longNumberProperty { + + s.v.TimeSeriesMetric = ×eriesmetric + return s +} + +func (s *_longNumberProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_longNumberProperty) LongNumberPropertyCaster() *types.LongNumberProperty { + return s.v +} diff --git a/typedapi/esdsl/longrangeproperty.go b/typedapi/esdsl/longrangeproperty.go new file mode 100644 index 0000000000..9168a98caf --- /dev/null +++ b/typedapi/esdsl/longrangeproperty.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _longRangeProperty struct { + v *types.LongRangeProperty +} + +func NewLongRangeProperty() *_longRangeProperty { + + return &_longRangeProperty{v: types.NewLongRangeProperty()} + +} + +func (s *_longRangeProperty) Boost(boost types.Float64) *_longRangeProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_longRangeProperty) Coerce(coerce bool) *_longRangeProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_longRangeProperty) CopyTo(fields ...string) *_longRangeProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_longRangeProperty) DocValues(docvalues bool) *_longRangeProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_longRangeProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_longRangeProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_longRangeProperty) Fields(fields map[string]types.Property) *_longRangeProperty { + + s.v.Fields = fields + return s +} + +func (s *_longRangeProperty) AddField(key string, value types.PropertyVariant) *_longRangeProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_longRangeProperty) IgnoreAbove(ignoreabove int) *_longRangeProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_longRangeProperty) Index(index bool) *_longRangeProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_longRangeProperty) Meta(meta map[string]string) *_longRangeProperty { + + s.v.Meta = meta + return s +} + +func (s *_longRangeProperty) AddMeta(key string, value string) *_longRangeProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_longRangeProperty) Properties(properties map[string]types.Property) *_longRangeProperty { + + s.v.Properties = properties + return s +} + +func (s *_longRangeProperty) AddProperty(key string, value types.PropertyVariant) *_longRangeProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_longRangeProperty) Store(store bool) *_longRangeProperty { + + s.v.Store = &store + + return s +} + +func (s *_longRangeProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_longRangeProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_longRangeProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_longRangeProperty) LongRangePropertyCaster() *types.LongRangeProperty { + return s.v +} diff --git a/typedapi/esdsl/lowercasenormalizer.go b/typedapi/esdsl/lowercasenormalizer.go new file mode 100644 index 0000000000..d8bbe664d9 --- /dev/null +++ b/typedapi/esdsl/lowercasenormalizer.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _lowercaseNormalizer struct { + v *types.LowercaseNormalizer +} + +func NewLowercaseNormalizer() *_lowercaseNormalizer { + + return &_lowercaseNormalizer{v: types.NewLowercaseNormalizer()} + +} + +func (s *_lowercaseNormalizer) LowercaseNormalizerCaster() *types.LowercaseNormalizer { + return s.v +} diff --git a/typedapi/esdsl/lowercaseprocessor.go b/typedapi/esdsl/lowercaseprocessor.go new file mode 100644 index 0000000000..de45e7d9f9 --- /dev/null +++ b/typedapi/esdsl/lowercaseprocessor.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _lowercaseProcessor struct { + v *types.LowercaseProcessor +} + +// Converts a string to its lowercase equivalent. +// If the field is an array of strings, all members of the array will be +// converted. +func NewLowercaseProcessor() *_lowercaseProcessor { + + return &_lowercaseProcessor{v: types.NewLowercaseProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_lowercaseProcessor) Description(description string) *_lowercaseProcessor { + + s.v.Description = &description + + return s +} + +// The field to make lowercase. +func (s *_lowercaseProcessor) Field(field string) *_lowercaseProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_lowercaseProcessor) If(if_ types.ScriptVariant) *_lowercaseProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_lowercaseProcessor) IgnoreFailure(ignorefailure bool) *_lowercaseProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist or is `null`, the processor quietly +// exits without modifying the document. +func (s *_lowercaseProcessor) IgnoreMissing(ignoremissing bool) *_lowercaseProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_lowercaseProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_lowercaseProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_lowercaseProcessor) Tag(tag string) *_lowercaseProcessor { + + s.v.Tag = &tag + + return s +} + +// The field to assign the converted value to. +// By default, the field is updated in-place. +func (s *_lowercaseProcessor) TargetField(field string) *_lowercaseProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_lowercaseProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Lowercase = s.v + + return container +} + +func (s *_lowercaseProcessor) LowercaseProcessorCaster() *types.LowercaseProcessor { + return s.v +} diff --git a/typedapi/esdsl/lowercasetokenfilter.go b/typedapi/esdsl/lowercasetokenfilter.go new file mode 100644 index 0000000000..24f17336a4 --- /dev/null +++ b/typedapi/esdsl/lowercasetokenfilter.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _lowercaseTokenFilter struct { + v *types.LowercaseTokenFilter +} + +func NewLowercaseTokenFilter() *_lowercaseTokenFilter { + + return &_lowercaseTokenFilter{v: types.NewLowercaseTokenFilter()} + +} + +func (s *_lowercaseTokenFilter) Language(language string) *_lowercaseTokenFilter { + + s.v.Language = &language + + return s +} + +func (s *_lowercaseTokenFilter) Version(versionstring string) *_lowercaseTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_lowercaseTokenFilter) LowercaseTokenFilterCaster() *types.LowercaseTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/lowercasetokenizer.go b/typedapi/esdsl/lowercasetokenizer.go new file mode 100644 index 0000000000..c89b972404 --- /dev/null +++ b/typedapi/esdsl/lowercasetokenizer.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _lowercaseTokenizer struct { + v *types.LowercaseTokenizer +} + +func NewLowercaseTokenizer() *_lowercaseTokenizer { + + return &_lowercaseTokenizer{v: types.NewLowercaseTokenizer()} + +} + +func (s *_lowercaseTokenizer) Version(versionstring string) *_lowercaseTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_lowercaseTokenizer) LowercaseTokenizerCaster() *types.LowercaseTokenizer { + return s.v +} diff --git a/typedapi/esdsl/manageuserprivileges.go b/typedapi/esdsl/manageuserprivileges.go new file mode 100644 index 0000000000..176bd75d72 --- /dev/null +++ b/typedapi/esdsl/manageuserprivileges.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _manageUserPrivileges struct { + v *types.ManageUserPrivileges +} + +func NewManageUserPrivileges() *_manageUserPrivileges { + + return &_manageUserPrivileges{v: types.NewManageUserPrivileges()} + +} + +func (s *_manageUserPrivileges) Applications(applications ...string) *_manageUserPrivileges { + + for _, v := range applications { + + s.v.Applications = append(s.v.Applications, v) + + } + return s +} + +func (s *_manageUserPrivileges) ManageUserPrivilegesCaster() *types.ManageUserPrivileges { + return s.v +} diff --git a/typedapi/esdsl/mappingcharfilter.go b/typedapi/esdsl/mappingcharfilter.go new file mode 100644 index 0000000000..4b185c8b7f --- /dev/null +++ b/typedapi/esdsl/mappingcharfilter.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _mappingCharFilter struct { + v *types.MappingCharFilter +} + +func NewMappingCharFilter() *_mappingCharFilter { + + return &_mappingCharFilter{v: types.NewMappingCharFilter()} + +} + +func (s *_mappingCharFilter) Mappings(mappings ...string) *_mappingCharFilter { + + for _, v := range mappings { + + s.v.Mappings = append(s.v.Mappings, v) + + } + return s +} + +func (s *_mappingCharFilter) MappingsPath(mappingspath string) *_mappingCharFilter { + + s.v.MappingsPath = &mappingspath + + return s +} + +func (s *_mappingCharFilter) Version(versionstring string) *_mappingCharFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_mappingCharFilter) MappingCharFilterCaster() *types.MappingCharFilter { + return s.v +} diff --git a/typedapi/esdsl/mappinglimitsettings.go b/typedapi/esdsl/mappinglimitsettings.go new file mode 100644 index 0000000000..033460f7d8 --- /dev/null +++ b/typedapi/esdsl/mappinglimitsettings.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _mappingLimitSettings struct { + v *types.MappingLimitSettings +} + +func NewMappingLimitSettings() *_mappingLimitSettings { + + return &_mappingLimitSettings{v: types.NewMappingLimitSettings()} + +} + +func (s *_mappingLimitSettings) Coerce(coerce bool) *_mappingLimitSettings { + + s.v.Coerce = &coerce + + return s +} + +func (s *_mappingLimitSettings) Depth(depth types.MappingLimitSettingsDepthVariant) *_mappingLimitSettings { + + s.v.Depth = depth.MappingLimitSettingsDepthCaster() + + return s +} + +func (s *_mappingLimitSettings) DimensionFields(dimensionfields types.MappingLimitSettingsDimensionFieldsVariant) *_mappingLimitSettings { + + s.v.DimensionFields = dimensionfields.MappingLimitSettingsDimensionFieldsCaster() + + return s +} + +func (s *_mappingLimitSettings) FieldNameLength(fieldnamelength types.MappingLimitSettingsFieldNameLengthVariant) *_mappingLimitSettings { + + s.v.FieldNameLength = fieldnamelength.MappingLimitSettingsFieldNameLengthCaster() + + return s +} + +func (s *_mappingLimitSettings) IgnoreMalformed(ignoremalformed string) *_mappingLimitSettings { + + s.v.IgnoreMalformed = ignoremalformed + + return s +} + +func (s *_mappingLimitSettings) NestedFields(nestedfields types.MappingLimitSettingsNestedFieldsVariant) *_mappingLimitSettings { + + s.v.NestedFields = nestedfields.MappingLimitSettingsNestedFieldsCaster() + + return s +} + +func (s *_mappingLimitSettings) NestedObjects(nestedobjects types.MappingLimitSettingsNestedObjectsVariant) *_mappingLimitSettings { + + s.v.NestedObjects = nestedobjects.MappingLimitSettingsNestedObjectsCaster() + + return s +} + +func (s *_mappingLimitSettings) Source(source types.MappingLimitSettingsSourceFieldsVariant) *_mappingLimitSettings { + + s.v.Source = source.MappingLimitSettingsSourceFieldsCaster() + + return s +} + +func (s *_mappingLimitSettings) TotalFields(totalfields types.MappingLimitSettingsTotalFieldsVariant) *_mappingLimitSettings { + + s.v.TotalFields = totalfields.MappingLimitSettingsTotalFieldsCaster() + + return s +} + +func (s *_mappingLimitSettings) MappingLimitSettingsCaster() *types.MappingLimitSettings { + return s.v +} diff --git a/typedapi/esdsl/mappinglimitsettingsdepth.go b/typedapi/esdsl/mappinglimitsettingsdepth.go new file mode 100644 index 0000000000..77bc5d907e --- /dev/null +++ b/typedapi/esdsl/mappinglimitsettingsdepth.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _mappingLimitSettingsDepth struct { + v *types.MappingLimitSettingsDepth +} + +func NewMappingLimitSettingsDepth() *_mappingLimitSettingsDepth { + + return &_mappingLimitSettingsDepth{v: types.NewMappingLimitSettingsDepth()} + +} + +// The maximum depth for a field, which is measured as the number of inner +// objects. For instance, if all fields are defined +// at the root object level, then the depth is 1. If there is one object +// mapping, then the depth is 2, etc. +func (s *_mappingLimitSettingsDepth) Limit(limit int64) *_mappingLimitSettingsDepth { + + s.v.Limit = &limit + + return s +} + +func (s *_mappingLimitSettingsDepth) MappingLimitSettingsDepthCaster() *types.MappingLimitSettingsDepth { + return s.v +} diff --git a/typedapi/esdsl/mappinglimitsettingsdimensionfields.go b/typedapi/esdsl/mappinglimitsettingsdimensionfields.go new file mode 100644 index 0000000000..b72a2f7959 --- /dev/null +++ b/typedapi/esdsl/mappinglimitsettingsdimensionfields.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _mappingLimitSettingsDimensionFields struct { + v *types.MappingLimitSettingsDimensionFields +} + +func NewMappingLimitSettingsDimensionFields() *_mappingLimitSettingsDimensionFields { + + return &_mappingLimitSettingsDimensionFields{v: types.NewMappingLimitSettingsDimensionFields()} + +} + +// [preview] This functionality is in technical preview and may be changed or +// removed in a future release. +// Elastic will work to fix any issues, but features in technical preview are +// not subject to the support SLA of official GA features. +func (s *_mappingLimitSettingsDimensionFields) Limit(limit int64) *_mappingLimitSettingsDimensionFields { + + s.v.Limit = &limit + + return s +} + +func (s *_mappingLimitSettingsDimensionFields) MappingLimitSettingsDimensionFieldsCaster() *types.MappingLimitSettingsDimensionFields { + return s.v +} diff --git a/typedapi/esdsl/mappinglimitsettingsfieldnamelength.go b/typedapi/esdsl/mappinglimitsettingsfieldnamelength.go new file mode 100644 index 0000000000..6135d21524 --- /dev/null +++ b/typedapi/esdsl/mappinglimitsettingsfieldnamelength.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _mappingLimitSettingsFieldNameLength struct { + v *types.MappingLimitSettingsFieldNameLength +} + +func NewMappingLimitSettingsFieldNameLength() *_mappingLimitSettingsFieldNameLength { + + return &_mappingLimitSettingsFieldNameLength{v: types.NewMappingLimitSettingsFieldNameLength()} + +} + +// Setting for the maximum length of a field name. This setting isn’t really +// something that addresses mappings explosion but +// might still be useful if you want to limit the field length. It usually +// shouldn’t be necessary to set this setting. The +// default is okay unless a user starts to add a huge number of fields with +// really long names. Default is `Long.MAX_VALUE` (no limit). +func (s *_mappingLimitSettingsFieldNameLength) Limit(limit int64) *_mappingLimitSettingsFieldNameLength { + + s.v.Limit = &limit + + return s +} + +func (s *_mappingLimitSettingsFieldNameLength) MappingLimitSettingsFieldNameLengthCaster() *types.MappingLimitSettingsFieldNameLength { + return s.v +} diff --git a/typedapi/esdsl/mappinglimitsettingsnestedfields.go b/typedapi/esdsl/mappinglimitsettingsnestedfields.go new file mode 100644 index 0000000000..7f61f794b4 --- /dev/null +++ b/typedapi/esdsl/mappinglimitsettingsnestedfields.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _mappingLimitSettingsNestedFields struct { + v *types.MappingLimitSettingsNestedFields +} + +func NewMappingLimitSettingsNestedFields() *_mappingLimitSettingsNestedFields { + + return &_mappingLimitSettingsNestedFields{v: types.NewMappingLimitSettingsNestedFields()} + +} + +// The maximum number of distinct nested mappings in an index. The nested type +// should only be used in special cases, when +// arrays of objects need to be queried independently of each other. To +// safeguard against poorly designed mappings, this +// setting limits the number of unique nested types per index. +func (s *_mappingLimitSettingsNestedFields) Limit(limit int64) *_mappingLimitSettingsNestedFields { + + s.v.Limit = &limit + + return s +} + +func (s *_mappingLimitSettingsNestedFields) MappingLimitSettingsNestedFieldsCaster() *types.MappingLimitSettingsNestedFields { + return s.v +} diff --git a/typedapi/esdsl/mappinglimitsettingsnestedobjects.go b/typedapi/esdsl/mappinglimitsettingsnestedobjects.go new file mode 100644 index 0000000000..0efea97ce9 --- /dev/null +++ b/typedapi/esdsl/mappinglimitsettingsnestedobjects.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _mappingLimitSettingsNestedObjects struct { + v *types.MappingLimitSettingsNestedObjects +} + +func NewMappingLimitSettingsNestedObjects() *_mappingLimitSettingsNestedObjects { + + return &_mappingLimitSettingsNestedObjects{v: types.NewMappingLimitSettingsNestedObjects()} + +} + +// The maximum number of nested JSON objects that a single document can contain +// across all nested types. This limit helps +// to prevent out of memory errors when a document contains too many nested +// objects. +func (s *_mappingLimitSettingsNestedObjects) Limit(limit int64) *_mappingLimitSettingsNestedObjects { + + s.v.Limit = &limit + + return s +} + +func (s *_mappingLimitSettingsNestedObjects) MappingLimitSettingsNestedObjectsCaster() *types.MappingLimitSettingsNestedObjects { + return s.v +} diff --git a/typedapi/esdsl/mappinglimitsettingssourcefields.go b/typedapi/esdsl/mappinglimitsettingssourcefields.go new file mode 100644 index 0000000000..1e454ddecd --- /dev/null +++ b/typedapi/esdsl/mappinglimitsettingssourcefields.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sourcemode" +) + +type _mappingLimitSettingsSourceFields struct { + v *types.MappingLimitSettingsSourceFields +} + +func NewMappingLimitSettingsSourceFields(mode sourcemode.SourceMode) *_mappingLimitSettingsSourceFields { + + tmp := &_mappingLimitSettingsSourceFields{v: types.NewMappingLimitSettingsSourceFields()} + + tmp.Mode(mode) + + return tmp + +} + +func (s *_mappingLimitSettingsSourceFields) Mode(mode sourcemode.SourceMode) *_mappingLimitSettingsSourceFields { + + s.v.Mode = mode + return s +} + +func (s *_mappingLimitSettingsSourceFields) MappingLimitSettingsSourceFieldsCaster() *types.MappingLimitSettingsSourceFields { + return s.v +} diff --git a/typedapi/esdsl/mappinglimitsettingstotalfields.go b/typedapi/esdsl/mappinglimitsettingstotalfields.go new file mode 100644 index 0000000000..09c3b76f0d --- /dev/null +++ b/typedapi/esdsl/mappinglimitsettingstotalfields.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _mappingLimitSettingsTotalFields struct { + v *types.MappingLimitSettingsTotalFields +} + +func NewMappingLimitSettingsTotalFields() *_mappingLimitSettingsTotalFields { + + return &_mappingLimitSettingsTotalFields{v: types.NewMappingLimitSettingsTotalFields()} + +} + +// This setting determines what happens when a dynamically mapped field would +// exceed the total fields limit. When set +// to false (the default), the index request of the document that tries to add a +// dynamic field to the mapping will fail +// with the message Limit of total fields [X] has been exceeded. When set to +// true, the index request will not fail. +// Instead, fields that would exceed the limit are not added to the mapping, +// similar to dynamic: false. +// The fields that were not added to the mapping will be added to the _ignored +// field. +func (s *_mappingLimitSettingsTotalFields) IgnoreDynamicBeyondLimit(ignoredynamicbeyondlimit string) *_mappingLimitSettingsTotalFields { + + s.v.IgnoreDynamicBeyondLimit = ignoredynamicbeyondlimit + + return s +} + +// The maximum number of fields in an index. Field and object mappings, as well +// as field aliases count towards this limit. +// The limit is in place to prevent mappings and searches from becoming too +// large. Higher values can lead to performance +// degradations and memory issues, especially in clusters with a high load or +// few resources. +func (s *_mappingLimitSettingsTotalFields) Limit(limit string) *_mappingLimitSettingsTotalFields { + + s.v.Limit = limit + + return s +} + +func (s *_mappingLimitSettingsTotalFields) MappingLimitSettingsTotalFieldsCaster() *types.MappingLimitSettingsTotalFields { + return s.v +} diff --git a/typedapi/esdsl/matchallquery.go b/typedapi/esdsl/matchallquery.go new file mode 100644 index 0000000000..0d2ec18da8 --- /dev/null +++ b/typedapi/esdsl/matchallquery.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _matchAllQuery struct { + v *types.MatchAllQuery +} + +// Matches all users, giving them all a `_score` of 1.0. +func NewMatchAllQuery() *_matchAllQuery { + + return &_matchAllQuery{v: types.NewMatchAllQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_matchAllQuery) Boost(boost float32) *_matchAllQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_matchAllQuery) QueryName_(queryname_ string) *_matchAllQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_matchAllQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.MatchAll = s.v + + return container +} + +func (s *_matchAllQuery) ApiKeyQueryContainerCaster() *types.ApiKeyQueryContainer { + container := types.NewApiKeyQueryContainer() + + container.MatchAll = s.v + + return container +} + +func (s *_matchAllQuery) RoleQueryContainerCaster() *types.RoleQueryContainer { + container := types.NewRoleQueryContainer() + + container.MatchAll = s.v + + return container +} + +func (s *_matchAllQuery) UserQueryContainerCaster() *types.UserQueryContainer { + container := types.NewUserQueryContainer() + + container.MatchAll = s.v + + return container +} + +func (s *_matchAllQuery) MatchAllQueryCaster() *types.MatchAllQuery { + return s.v +} diff --git a/typedapi/esdsl/matchboolprefixquery.go b/typedapi/esdsl/matchboolprefixquery.go new file mode 100644 index 0000000000..bf27d3eef9 --- /dev/null +++ b/typedapi/esdsl/matchboolprefixquery.go @@ -0,0 +1,170 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" +) + +type _matchBoolPrefixQuery struct { + k string + v *types.MatchBoolPrefixQuery +} + +// Analyzes its input and constructs a `bool` query from the terms. +// Each term except the last is used in a `term` query. +// The last term is used in a prefix query. +func NewMatchBoolPrefixQuery(field string, query string) *_matchBoolPrefixQuery { + tmp := &_matchBoolPrefixQuery{ + k: field, + v: types.NewMatchBoolPrefixQuery(), + } + + tmp.Query(query) + return tmp +} + +// Analyzer used to convert the text in the query value into tokens. +func (s *_matchBoolPrefixQuery) Analyzer(analyzer string) *_matchBoolPrefixQuery { + + s.v.Analyzer = &analyzer + + return s +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_matchBoolPrefixQuery) Boost(boost float32) *_matchBoolPrefixQuery { + + s.v.Boost = &boost + + return s +} + +// Maximum edit distance allowed for matching. +// Can be applied to the term subqueries constructed for all terms but the final +// term. +func (s *_matchBoolPrefixQuery) Fuzziness(fuzziness types.FuzzinessVariant) *_matchBoolPrefixQuery { + + s.v.Fuzziness = *fuzziness.FuzzinessCaster() + + return s +} + +// Method used to rewrite the query. +// Can be applied to the term subqueries constructed for all terms but the final +// term. +func (s *_matchBoolPrefixQuery) FuzzyRewrite(multitermqueryrewrite string) *_matchBoolPrefixQuery { + + s.v.FuzzyRewrite = &multitermqueryrewrite + + return s +} + +// If `true`, edits for fuzzy matching include transpositions of two adjacent +// characters (for example, `ab` to `ba`). +// Can be applied to the term subqueries constructed for all terms but the final +// term. +func (s *_matchBoolPrefixQuery) FuzzyTranspositions(fuzzytranspositions bool) *_matchBoolPrefixQuery { + + s.v.FuzzyTranspositions = &fuzzytranspositions + + return s +} + +// Maximum number of terms to which the query will expand. +// Can be applied to the term subqueries constructed for all terms but the final +// term. +func (s *_matchBoolPrefixQuery) MaxExpansions(maxexpansions int) *_matchBoolPrefixQuery { + + s.v.MaxExpansions = &maxexpansions + + return s +} + +// Minimum number of clauses that must match for a document to be returned. +// Applied to the constructed bool query. +func (s *_matchBoolPrefixQuery) MinimumShouldMatch(minimumshouldmatch types.MinimumShouldMatchVariant) *_matchBoolPrefixQuery { + + s.v.MinimumShouldMatch = *minimumshouldmatch.MinimumShouldMatchCaster() + + return s +} + +// Boolean logic used to interpret text in the query value. +// Applied to the constructed bool query. +func (s *_matchBoolPrefixQuery) Operator(operator operator.Operator) *_matchBoolPrefixQuery { + + s.v.Operator = &operator + return s +} + +// Number of beginning characters left unchanged for fuzzy matching. +// Can be applied to the term subqueries constructed for all terms but the final +// term. +func (s *_matchBoolPrefixQuery) PrefixLength(prefixlength int) *_matchBoolPrefixQuery { + + s.v.PrefixLength = &prefixlength + + return s +} + +// Terms you wish to find in the provided field. +// The last term is used in a prefix query. +func (s *_matchBoolPrefixQuery) Query(query string) *_matchBoolPrefixQuery { + + s.v.Query = query + + return s +} + +func (s *_matchBoolPrefixQuery) QueryName_(queryname_ string) *_matchBoolPrefixQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_matchBoolPrefixQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.MatchBoolPrefix = map[string]types.MatchBoolPrefixQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleMatchBoolPrefixQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleMatchBoolPrefixQuery() *_matchBoolPrefixQuery { + return &_matchBoolPrefixQuery{ + k: "", + v: types.NewMatchBoolPrefixQuery(), + } +} + +func (s *_matchBoolPrefixQuery) MatchBoolPrefixQueryCaster() *types.MatchBoolPrefixQuery { + return s.v.MatchBoolPrefixQueryCaster() +} diff --git a/typedapi/esdsl/matchnonequery.go b/typedapi/esdsl/matchnonequery.go new file mode 100644 index 0000000000..2d1b0fb8f9 --- /dev/null +++ b/typedapi/esdsl/matchnonequery.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _matchNoneQuery struct { + v *types.MatchNoneQuery +} + +// Matches no documents. +func NewMatchNoneQuery() *_matchNoneQuery { + + return &_matchNoneQuery{v: types.NewMatchNoneQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_matchNoneQuery) Boost(boost float32) *_matchNoneQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_matchNoneQuery) QueryName_(queryname_ string) *_matchNoneQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_matchNoneQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.MatchNone = s.v + + return container +} + +func (s *_matchNoneQuery) MatchNoneQueryCaster() *types.MatchNoneQuery { + return s.v +} diff --git a/typedapi/esdsl/matchonlytextproperty.go b/typedapi/esdsl/matchonlytextproperty.go new file mode 100644 index 0000000000..4d74973b57 --- /dev/null +++ b/typedapi/esdsl/matchonlytextproperty.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _matchOnlyTextProperty struct { + v *types.MatchOnlyTextProperty +} + +func NewMatchOnlyTextProperty() *_matchOnlyTextProperty { + + return &_matchOnlyTextProperty{v: types.NewMatchOnlyTextProperty()} + +} + +// Allows you to copy the values of multiple fields into a group +// field, which can then be queried as a single field. +func (s *_matchOnlyTextProperty) CopyTo(fields ...string) *_matchOnlyTextProperty { + + s.v.CopyTo = fields + + return s +} + +// Multi-fields allow the same string value to be indexed in multiple ways for +// different purposes, such as one +// field for search and a multi-field for sorting and aggregations, or the same +// string value analyzed by different analyzers. +func (s *_matchOnlyTextProperty) Fields(fields map[string]types.Property) *_matchOnlyTextProperty { + + s.v.Fields = fields + return s +} + +func (s *_matchOnlyTextProperty) AddField(key string, value types.PropertyVariant) *_matchOnlyTextProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +// Metadata about the field. +func (s *_matchOnlyTextProperty) Meta(meta map[string]string) *_matchOnlyTextProperty { + + s.v.Meta = meta + return s +} + +func (s *_matchOnlyTextProperty) AddMeta(key string, value string) *_matchOnlyTextProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_matchOnlyTextProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_matchOnlyTextProperty) MatchOnlyTextPropertyCaster() *types.MatchOnlyTextProperty { + return s.v +} diff --git a/typedapi/esdsl/matchphraseprefixquery.go b/typedapi/esdsl/matchphraseprefixquery.go new file mode 100644 index 0000000000..939ef541b7 --- /dev/null +++ b/typedapi/esdsl/matchphraseprefixquery.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/zerotermsquery" +) + +type _matchPhrasePrefixQuery struct { + k string + v *types.MatchPhrasePrefixQuery +} + +// Returns documents that contain the words of a provided text, in the same +// order as provided. +// The last term of the provided text is treated as a prefix, matching any words +// that begin with that term. +func NewMatchPhrasePrefixQuery(field string, query string) *_matchPhrasePrefixQuery { + tmp := &_matchPhrasePrefixQuery{ + k: field, + v: types.NewMatchPhrasePrefixQuery(), + } + + tmp.Query(query) + return tmp +} + +// Analyzer used to convert text in the query value into tokens. +func (s *_matchPhrasePrefixQuery) Analyzer(analyzer string) *_matchPhrasePrefixQuery { + + s.v.Analyzer = &analyzer + + return s +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_matchPhrasePrefixQuery) Boost(boost float32) *_matchPhrasePrefixQuery { + + s.v.Boost = &boost + + return s +} + +// Maximum number of terms to which the last provided term of the query value +// will expand. +func (s *_matchPhrasePrefixQuery) MaxExpansions(maxexpansions int) *_matchPhrasePrefixQuery { + + s.v.MaxExpansions = &maxexpansions + + return s +} + +// Text you wish to find in the provided field. +func (s *_matchPhrasePrefixQuery) Query(query string) *_matchPhrasePrefixQuery { + + s.v.Query = query + + return s +} + +func (s *_matchPhrasePrefixQuery) QueryName_(queryname_ string) *_matchPhrasePrefixQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Maximum number of positions allowed between matching tokens. +func (s *_matchPhrasePrefixQuery) Slop(slop int) *_matchPhrasePrefixQuery { + + s.v.Slop = &slop + + return s +} + +// Indicates whether no documents are returned if the analyzer removes all +// tokens, such as when using a `stop` filter. +func (s *_matchPhrasePrefixQuery) ZeroTermsQuery(zerotermsquery zerotermsquery.ZeroTermsQuery) *_matchPhrasePrefixQuery { + + s.v.ZeroTermsQuery = &zerotermsquery + return s +} + +func (s *_matchPhrasePrefixQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.MatchPhrasePrefix = map[string]types.MatchPhrasePrefixQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleMatchPhrasePrefixQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleMatchPhrasePrefixQuery() *_matchPhrasePrefixQuery { + return &_matchPhrasePrefixQuery{ + k: "", + v: types.NewMatchPhrasePrefixQuery(), + } +} + +func (s *_matchPhrasePrefixQuery) MatchPhrasePrefixQueryCaster() *types.MatchPhrasePrefixQuery { + return s.v.MatchPhrasePrefixQueryCaster() +} diff --git a/typedapi/esdsl/matchphrasequery.go b/typedapi/esdsl/matchphrasequery.go new file mode 100644 index 0000000000..71bd423a69 --- /dev/null +++ b/typedapi/esdsl/matchphrasequery.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/zerotermsquery" +) + +type _matchPhraseQuery struct { + k string + v *types.MatchPhraseQuery +} + +// Analyzes the text and creates a phrase query out of the analyzed text. +func NewMatchPhraseQuery(field string, query string) *_matchPhraseQuery { + tmp := &_matchPhraseQuery{ + k: field, + v: types.NewMatchPhraseQuery(), + } + + tmp.Query(query) + return tmp +} + +// Analyzer used to convert the text in the query value into tokens. +func (s *_matchPhraseQuery) Analyzer(analyzer string) *_matchPhraseQuery { + + s.v.Analyzer = &analyzer + + return s +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_matchPhraseQuery) Boost(boost float32) *_matchPhraseQuery { + + s.v.Boost = &boost + + return s +} + +// Query terms that are analyzed and turned into a phrase query. +func (s *_matchPhraseQuery) Query(query string) *_matchPhraseQuery { + + s.v.Query = query + + return s +} + +func (s *_matchPhraseQuery) QueryName_(queryname_ string) *_matchPhraseQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Maximum number of positions allowed between matching tokens. +func (s *_matchPhraseQuery) Slop(slop int) *_matchPhraseQuery { + + s.v.Slop = &slop + + return s +} + +// Indicates whether no documents are returned if the `analyzer` removes all +// tokens, such as when using a `stop` filter. +func (s *_matchPhraseQuery) ZeroTermsQuery(zerotermsquery zerotermsquery.ZeroTermsQuery) *_matchPhraseQuery { + + s.v.ZeroTermsQuery = &zerotermsquery + return s +} + +func (s *_matchPhraseQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.MatchPhrase = map[string]types.MatchPhraseQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleMatchPhraseQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleMatchPhraseQuery() *_matchPhraseQuery { + return &_matchPhraseQuery{ + k: "", + v: types.NewMatchPhraseQuery(), + } +} + +func (s *_matchPhraseQuery) MatchPhraseQueryCaster() *types.MatchPhraseQuery { + return s.v.MatchPhraseQueryCaster() +} diff --git a/typedapi/esdsl/matchquery.go b/typedapi/esdsl/matchquery.go new file mode 100644 index 0000000000..b37ef8038a --- /dev/null +++ b/typedapi/esdsl/matchquery.go @@ -0,0 +1,214 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/zerotermsquery" +) + +type _matchQuery struct { + k string + v *types.MatchQuery +} + +// Returns roles that match a provided text, number, date or boolean value. +// The provided text is analyzed before matching. +func NewMatchQuery(field string, query string) *_matchQuery { + tmp := &_matchQuery{ + k: field, + v: types.NewMatchQuery(), + } + + tmp.Query(query) + return tmp +} + +// Analyzer used to convert the text in the query value into tokens. +func (s *_matchQuery) Analyzer(analyzer string) *_matchQuery { + + s.v.Analyzer = &analyzer + + return s +} + +// If `true`, match phrase queries are automatically created for multi-term +// synonyms. +func (s *_matchQuery) AutoGenerateSynonymsPhraseQuery(autogeneratesynonymsphrasequery bool) *_matchQuery { + + s.v.AutoGenerateSynonymsPhraseQuery = &autogeneratesynonymsphrasequery + + return s +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_matchQuery) Boost(boost float32) *_matchQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_matchQuery) CutoffFrequency(cutofffrequency types.Float64) *_matchQuery { + + s.v.CutoffFrequency = &cutofffrequency + + return s +} + +// Maximum edit distance allowed for matching. +func (s *_matchQuery) Fuzziness(fuzziness types.FuzzinessVariant) *_matchQuery { + + s.v.Fuzziness = *fuzziness.FuzzinessCaster() + + return s +} + +// Method used to rewrite the query. +func (s *_matchQuery) FuzzyRewrite(multitermqueryrewrite string) *_matchQuery { + + s.v.FuzzyRewrite = &multitermqueryrewrite + + return s +} + +// If `true`, edits for fuzzy matching include transpositions of two adjacent +// characters (for example, `ab` to `ba`). +func (s *_matchQuery) FuzzyTranspositions(fuzzytranspositions bool) *_matchQuery { + + s.v.FuzzyTranspositions = &fuzzytranspositions + + return s +} + +// If `true`, format-based errors, such as providing a text query value for a +// numeric field, are ignored. +func (s *_matchQuery) Lenient(lenient bool) *_matchQuery { + + s.v.Lenient = &lenient + + return s +} + +// Maximum number of terms to which the query will expand. +func (s *_matchQuery) MaxExpansions(maxexpansions int) *_matchQuery { + + s.v.MaxExpansions = &maxexpansions + + return s +} + +// Minimum number of clauses that must match for a document to be returned. +func (s *_matchQuery) MinimumShouldMatch(minimumshouldmatch types.MinimumShouldMatchVariant) *_matchQuery { + + s.v.MinimumShouldMatch = *minimumshouldmatch.MinimumShouldMatchCaster() + + return s +} + +// Boolean logic used to interpret text in the query value. +func (s *_matchQuery) Operator(operator operator.Operator) *_matchQuery { + + s.v.Operator = &operator + return s +} + +// Number of beginning characters left unchanged for fuzzy matching. +func (s *_matchQuery) PrefixLength(prefixlength int) *_matchQuery { + + s.v.PrefixLength = &prefixlength + + return s +} + +// Text, number, boolean value or date you wish to find in the provided field. +func (s *_matchQuery) Query(query string) *_matchQuery { + + s.v.Query = query + + return s +} + +func (s *_matchQuery) QueryName_(queryname_ string) *_matchQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Indicates whether no documents are returned if the `analyzer` removes all +// tokens, such as when using a `stop` filter. +func (s *_matchQuery) ZeroTermsQuery(zerotermsquery zerotermsquery.ZeroTermsQuery) *_matchQuery { + + s.v.ZeroTermsQuery = &zerotermsquery + return s +} + +func (s *_matchQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.Match = map[string]types.MatchQuery{ + s.k: *s.v, + } + return container +} + +func (s *_matchQuery) ApiKeyQueryContainerCaster() *types.ApiKeyQueryContainer { + container := types.NewApiKeyQueryContainer() + container.Match = map[string]types.MatchQuery{ + s.k: *s.v, + } + return container +} + +func (s *_matchQuery) RoleQueryContainerCaster() *types.RoleQueryContainer { + container := types.NewRoleQueryContainer() + container.Match = map[string]types.MatchQuery{ + s.k: *s.v, + } + return container +} + +func (s *_matchQuery) UserQueryContainerCaster() *types.UserQueryContainer { + container := types.NewUserQueryContainer() + container.Match = map[string]types.MatchQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleMatchQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleMatchQuery() *_matchQuery { + return &_matchQuery{ + k: "", + v: types.NewMatchQuery(), + } +} + +func (s *_matchQuery) MatchQueryCaster() *types.MatchQuery { + return s.v.MatchQueryCaster() +} diff --git a/typedapi/esdsl/matrixstatsaggregation.go b/typedapi/esdsl/matrixstatsaggregation.go new file mode 100644 index 0000000000..899a7a2b54 --- /dev/null +++ b/typedapi/esdsl/matrixstatsaggregation.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortmode" +) + +type _matrixStatsAggregation struct { + v *types.MatrixStatsAggregation +} + +// A numeric aggregation that computes the following statistics over a set of +// document fields: `count`, `mean`, `variance`, `skewness`, `kurtosis`, +// `covariance`, and `covariance`. +func NewMatrixStatsAggregation() *_matrixStatsAggregation { + + return &_matrixStatsAggregation{v: types.NewMatrixStatsAggregation()} + +} + +// An array of fields for computing the statistics. +func (s *_matrixStatsAggregation) Fields(fields ...string) *_matrixStatsAggregation { + + s.v.Fields = fields + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_matrixStatsAggregation) Missing(missing map[string]types.Float64) *_matrixStatsAggregation { + + s.v.Missing = missing + return s +} + +func (s *_matrixStatsAggregation) AddMissing(key string, value types.Float64) *_matrixStatsAggregation { + + var tmp map[string]types.Float64 + if s.v.Missing == nil { + s.v.Missing = make(map[string]types.Float64) + } else { + tmp = s.v.Missing + } + + tmp[key] = value + + s.v.Missing = tmp + return s +} + +// Array value the aggregation will use for array or multi-valued fields. +func (s *_matrixStatsAggregation) Mode(mode sortmode.SortMode) *_matrixStatsAggregation { + + s.v.Mode = &mode + return s +} + +func (s *_matrixStatsAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.MatrixStats = s.v + + return container +} + +func (s *_matrixStatsAggregation) MatrixStatsAggregationCaster() *types.MatrixStatsAggregation { + return s.v +} diff --git a/typedapi/esdsl/maxaggregation.go b/typedapi/esdsl/maxaggregation.go new file mode 100644 index 0000000000..8b7602196b --- /dev/null +++ b/typedapi/esdsl/maxaggregation.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _maxAggregation struct { + v *types.MaxAggregation +} + +// A single-value metrics aggregation that returns the maximum value among the +// numeric values extracted from the aggregated documents. +func NewMaxAggregation() *_maxAggregation { + + return &_maxAggregation{v: types.NewMaxAggregation()} + +} + +// The field on which to run the aggregation. +func (s *_maxAggregation) Field(field string) *_maxAggregation { + + s.v.Field = &field + + return s +} + +func (s *_maxAggregation) Format(format string) *_maxAggregation { + + s.v.Format = &format + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_maxAggregation) Missing(missing types.MissingVariant) *_maxAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_maxAggregation) Script(script types.ScriptVariant) *_maxAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_maxAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Max = s.v + + return container +} + +func (s *_maxAggregation) MaxAggregationCaster() *types.MaxAggregation { + return s.v +} diff --git a/typedapi/esdsl/maxbucketaggregation.go b/typedapi/esdsl/maxbucketaggregation.go new file mode 100644 index 0000000000..927d94781b --- /dev/null +++ b/typedapi/esdsl/maxbucketaggregation.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _maxBucketAggregation struct { + v *types.MaxBucketAggregation +} + +// A sibling pipeline aggregation which identifies the bucket(s) with the +// maximum value of a specified metric in a sibling aggregation and outputs both +// the value and the key(s) of the bucket(s). +func NewMaxBucketAggregation() *_maxBucketAggregation { + + return &_maxBucketAggregation{v: types.NewMaxBucketAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_maxBucketAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_maxBucketAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_maxBucketAggregation) Format(format string) *_maxBucketAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_maxBucketAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_maxBucketAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +func (s *_maxBucketAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.MaxBucket = s.v + + return container +} + +func (s *_maxBucketAggregation) MaxBucketAggregationCaster() *types.MaxBucketAggregation { + return s.v +} diff --git a/typedapi/esdsl/maxmind.go b/typedapi/esdsl/maxmind.go new file mode 100644 index 0000000000..fa21365866 --- /dev/null +++ b/typedapi/esdsl/maxmind.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _maxmind struct { + v *types.Maxmind +} + +func NewMaxmind() *_maxmind { + + return &_maxmind{v: types.NewMaxmind()} + +} + +func (s *_maxmind) AccountId(id string) *_maxmind { + + s.v.AccountId = id + + return s +} + +func (s *_maxmind) DatabaseConfigurationCaster() *types.DatabaseConfiguration { + container := types.NewDatabaseConfiguration() + + container.Maxmind = s.v + + return container +} + +func (s *_maxmind) DatabaseConfigurationFullCaster() *types.DatabaseConfigurationFull { + container := types.NewDatabaseConfigurationFull() + + container.Maxmind = s.v + + return container +} + +func (s *_maxmind) MaxmindCaster() *types.Maxmind { + return s.v +} diff --git a/typedapi/esdsl/medianabsolutedeviationaggregation.go b/typedapi/esdsl/medianabsolutedeviationaggregation.go new file mode 100644 index 0000000000..015fc63fef --- /dev/null +++ b/typedapi/esdsl/medianabsolutedeviationaggregation.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _medianAbsoluteDeviationAggregation struct { + v *types.MedianAbsoluteDeviationAggregation +} + +// A single-value aggregation that approximates the median absolute deviation of +// its search results. +func NewMedianAbsoluteDeviationAggregation() *_medianAbsoluteDeviationAggregation { + + return &_medianAbsoluteDeviationAggregation{v: types.NewMedianAbsoluteDeviationAggregation()} + +} + +// Limits the maximum number of nodes used by the underlying TDigest algorithm +// to `20 * compression`, enabling control of memory usage and approximation +// error. +func (s *_medianAbsoluteDeviationAggregation) Compression(compression types.Float64) *_medianAbsoluteDeviationAggregation { + + s.v.Compression = &compression + + return s +} + +// The field on which to run the aggregation. +func (s *_medianAbsoluteDeviationAggregation) Field(field string) *_medianAbsoluteDeviationAggregation { + + s.v.Field = &field + + return s +} + +func (s *_medianAbsoluteDeviationAggregation) Format(format string) *_medianAbsoluteDeviationAggregation { + + s.v.Format = &format + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_medianAbsoluteDeviationAggregation) Missing(missing types.MissingVariant) *_medianAbsoluteDeviationAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_medianAbsoluteDeviationAggregation) Script(script types.ScriptVariant) *_medianAbsoluteDeviationAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_medianAbsoluteDeviationAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.MedianAbsoluteDeviation = s.v + + return container +} + +func (s *_medianAbsoluteDeviationAggregation) MedianAbsoluteDeviationAggregationCaster() *types.MedianAbsoluteDeviationAggregation { + return s.v +} diff --git a/typedapi/esdsl/merge.go b/typedapi/esdsl/merge.go new file mode 100644 index 0000000000..a2738d81ea --- /dev/null +++ b/typedapi/esdsl/merge.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _merge struct { + v *types.Merge +} + +func NewMerge() *_merge { + + return &_merge{v: types.NewMerge()} + +} + +func (s *_merge) Scheduler(scheduler types.MergeSchedulerVariant) *_merge { + + s.v.Scheduler = scheduler.MergeSchedulerCaster() + + return s +} + +func (s *_merge) MergeCaster() *types.Merge { + return s.v +} diff --git a/typedapi/esdsl/mergescheduler.go b/typedapi/esdsl/mergescheduler.go new file mode 100644 index 0000000000..8323f18d4e --- /dev/null +++ b/typedapi/esdsl/mergescheduler.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _mergeScheduler struct { + v *types.MergeScheduler +} + +func NewMergeScheduler() *_mergeScheduler { + + return &_mergeScheduler{v: types.NewMergeScheduler()} + +} + +func (s *_mergeScheduler) MaxMergeCount(stringifiedinteger types.StringifiedintegerVariant) *_mergeScheduler { + + s.v.MaxMergeCount = *stringifiedinteger.StringifiedintegerCaster() + + return s +} + +func (s *_mergeScheduler) MaxThreadCount(stringifiedinteger types.StringifiedintegerVariant) *_mergeScheduler { + + s.v.MaxThreadCount = *stringifiedinteger.StringifiedintegerCaster() + + return s +} + +func (s *_mergeScheduler) MergeSchedulerCaster() *types.MergeScheduler { + return s.v +} diff --git a/typedapi/esdsl/message.go b/typedapi/esdsl/message.go new file mode 100644 index 0000000000..668a8c6066 --- /dev/null +++ b/typedapi/esdsl/message.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _message struct { + v *types.Message +} + +func NewMessage(role string) *_message { + + tmp := &_message{v: types.NewMessage()} + + tmp.Role(role) + + return tmp + +} + +// The content of the message. +func (s *_message) Content(messagecontent types.MessageContentVariant) *_message { + + s.v.Content = *messagecontent.MessageContentCaster() + + return s +} + +// The role of the message author. +func (s *_message) Role(role string) *_message { + + s.v.Role = role + + return s +} + +// The tool call that this message is responding to. +func (s *_message) ToolCallId(id string) *_message { + + s.v.ToolCallId = &id + + return s +} + +// The tool calls generated by the model. +func (s *_message) ToolCalls(toolcalls ...types.ToolCallVariant) *_message { + + for _, v := range toolcalls { + + s.v.ToolCalls = append(s.v.ToolCalls, *v.ToolCallCaster()) + + } + return s +} + +func (s *_message) MessageCaster() *types.Message { + return s.v +} diff --git a/typedapi/esdsl/messagecontent.go b/typedapi/esdsl/messagecontent.go new file mode 100644 index 0000000000..447ad3147c --- /dev/null +++ b/typedapi/esdsl/messagecontent.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _messageContent struct { + v types.MessageContent +} + +func NewMessageContent() *_messageContent { + return &_messageContent{v: nil} +} + +func (u *_messageContent) String(string string) *_messageContent { + + u.v = &string + + return u +} + +func (u *_messageContent) ContentObjects(contentobjects ...types.ContentObjectVariant) *_messageContent { + + u.v = make([]types.ContentObject, len(contentobjects)) + for i, v := range contentobjects { + u.v.([]types.ContentObject)[i] = *v.ContentObjectCaster() + } + + return u +} + +func (u *_messageContent) MessageContentCaster() *types.MessageContent { + return &u.v +} diff --git a/typedapi/esdsl/metadata.go b/typedapi/esdsl/metadata.go new file mode 100644 index 0000000000..bba97cbc9c --- /dev/null +++ b/typedapi/esdsl/metadata.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// This is provide an API for type alias. +type _metadata struct { + v types.Metadata +} + +func NewMetadata(metadata map[string]json.RawMessage) *_metadata { + return &_metadata{v: make(map[string]json.RawMessage, 0)} +} + +func (u *_metadata) MetadataCaster() *types.Metadata { + return &u.v +} diff --git a/typedapi/esdsl/mgetoperation.go b/typedapi/esdsl/mgetoperation.go new file mode 100644 index 0000000000..dac52888a4 --- /dev/null +++ b/typedapi/esdsl/mgetoperation.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" +) + +type _mgetOperation struct { + v *types.MgetOperation +} + +func NewMgetOperation() *_mgetOperation { + + return &_mgetOperation{v: types.NewMgetOperation()} + +} + +// The unique document ID. +func (s *_mgetOperation) Id_(id string) *_mgetOperation { + + s.v.Id_ = id + + return s +} + +// The index that contains the document. +func (s *_mgetOperation) Index_(indexname string) *_mgetOperation { + + s.v.Index_ = &indexname + + return s +} + +// The key for the primary shard the document resides on. Required if routing is +// used during indexing. +func (s *_mgetOperation) Routing(routing string) *_mgetOperation { + + s.v.Routing = &routing + + return s +} + +// If `false`, excludes all _source fields. +func (s *_mgetOperation) Source_(sourceconfig types.SourceConfigVariant) *_mgetOperation { + + s.v.Source_ = *sourceconfig.SourceConfigCaster() + + return s +} + +// The stored fields you want to retrieve. +func (s *_mgetOperation) StoredFields(fields ...string) *_mgetOperation { + + s.v.StoredFields = fields + + return s +} + +func (s *_mgetOperation) Version(versionnumber int64) *_mgetOperation { + + s.v.Version = &versionnumber + + return s +} + +func (s *_mgetOperation) VersionType(versiontype versiontype.VersionType) *_mgetOperation { + + s.v.VersionType = &versiontype + return s +} + +func (s *_mgetOperation) MgetOperationCaster() *types.MgetOperation { + return s.v +} diff --git a/typedapi/esdsl/migrateaction.go b/typedapi/esdsl/migrateaction.go new file mode 100644 index 0000000000..5573dd50dc --- /dev/null +++ b/typedapi/esdsl/migrateaction.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _migrateAction struct { + v *types.MigrateAction +} + +func NewMigrateAction() *_migrateAction { + + return &_migrateAction{v: types.NewMigrateAction()} + +} + +func (s *_migrateAction) Enabled(enabled bool) *_migrateAction { + + s.v.Enabled = &enabled + + return s +} + +func (s *_migrateAction) MigrateActionCaster() *types.MigrateAction { + return s.v +} diff --git a/typedapi/esdsl/migratereindex.go b/typedapi/esdsl/migratereindex.go new file mode 100644 index 0000000000..51c3e7f76b --- /dev/null +++ b/typedapi/esdsl/migratereindex.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/modeenum" +) + +type _migrateReindex struct { + v *types.MigrateReindex +} + +func NewMigrateReindex(mode modeenum.ModeEnum, source types.SourceIndexVariant) *_migrateReindex { + + tmp := &_migrateReindex{v: types.NewMigrateReindex()} + + tmp.Mode(mode) + + tmp.Source(source) + + return tmp + +} + +// Reindex mode. Currently only 'upgrade' is supported. +func (s *_migrateReindex) Mode(mode modeenum.ModeEnum) *_migrateReindex { + + s.v.Mode = mode + return s +} + +// The source index or data stream (only data streams are currently supported). +func (s *_migrateReindex) Source(source types.SourceIndexVariant) *_migrateReindex { + + s.v.Source = *source.SourceIndexCaster() + + return s +} + +func (s *_migrateReindex) MigrateReindexCaster() *types.MigrateReindex { + return s.v +} diff --git a/typedapi/esdsl/minaggregation.go b/typedapi/esdsl/minaggregation.go new file mode 100644 index 0000000000..59eec46650 --- /dev/null +++ b/typedapi/esdsl/minaggregation.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _minAggregation struct { + v *types.MinAggregation +} + +// A single-value metrics aggregation that returns the minimum value among +// numeric values extracted from the aggregated documents. +func NewMinAggregation() *_minAggregation { + + return &_minAggregation{v: types.NewMinAggregation()} + +} + +// The field on which to run the aggregation. +func (s *_minAggregation) Field(field string) *_minAggregation { + + s.v.Field = &field + + return s +} + +func (s *_minAggregation) Format(format string) *_minAggregation { + + s.v.Format = &format + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_minAggregation) Missing(missing types.MissingVariant) *_minAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_minAggregation) Script(script types.ScriptVariant) *_minAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_minAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Min = s.v + + return container +} + +func (s *_minAggregation) MinAggregationCaster() *types.MinAggregation { + return s.v +} diff --git a/typedapi/esdsl/minbucketaggregation.go b/typedapi/esdsl/minbucketaggregation.go new file mode 100644 index 0000000000..67bbe50605 --- /dev/null +++ b/typedapi/esdsl/minbucketaggregation.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _minBucketAggregation struct { + v *types.MinBucketAggregation +} + +// A sibling pipeline aggregation which identifies the bucket(s) with the +// minimum value of a specified metric in a sibling aggregation and outputs both +// the value and the key(s) of the bucket(s). +func NewMinBucketAggregation() *_minBucketAggregation { + + return &_minBucketAggregation{v: types.NewMinBucketAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_minBucketAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_minBucketAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_minBucketAggregation) Format(format string) *_minBucketAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_minBucketAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_minBucketAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +func (s *_minBucketAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.MinBucket = s.v + + return container +} + +func (s *_minBucketAggregation) MinBucketAggregationCaster() *types.MinBucketAggregation { + return s.v +} diff --git a/typedapi/esdsl/minimumshouldmatch.go b/typedapi/esdsl/minimumshouldmatch.go new file mode 100644 index 0000000000..f0ed3206dc --- /dev/null +++ b/typedapi/esdsl/minimumshouldmatch.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _minimumShouldMatch struct { + v types.MinimumShouldMatch +} + +func NewMinimumShouldMatch() *_minimumShouldMatch { + return &_minimumShouldMatch{v: nil} +} + +func (u *_minimumShouldMatch) Int(int int) *_minimumShouldMatch { + + u.v = &int + + return u +} + +func (u *_minimumShouldMatch) String(string string) *_minimumShouldMatch { + + u.v = &string + + return u +} + +func (u *_minimumShouldMatch) MinimumShouldMatchCaster() *types.MinimumShouldMatch { + return &u.v +} diff --git a/typedapi/esdsl/missing.go b/typedapi/esdsl/missing.go new file mode 100644 index 0000000000..65d3a50db8 --- /dev/null +++ b/typedapi/esdsl/missing.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _missing struct { + v types.Missing +} + +func NewMissing() *_missing { + return &_missing{v: nil} +} + +func (u *_missing) String(string string) *_missing { + + u.v = &string + + return u +} + +func (u *_missing) Int(int int) *_missing { + + u.v = &int + + return u +} + +func (u *_missing) Float64(float64 types.Float64) *_missing { + + u.v = &float64 + + return u +} + +func (u *_missing) Bool(bool bool) *_missing { + + u.v = &bool + + return u +} + +func (u *_missing) MissingCaster() *types.Missing { + return &u.v +} diff --git a/typedapi/esdsl/missingaggregation.go b/typedapi/esdsl/missingaggregation.go new file mode 100644 index 0000000000..5d63be70ef --- /dev/null +++ b/typedapi/esdsl/missingaggregation.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _missingAggregation struct { + v *types.MissingAggregation +} + +func NewMissingAggregation() *_missingAggregation { + + return &_missingAggregation{v: types.NewMissingAggregation()} + +} + +// The name of the field. +func (s *_missingAggregation) Field(field string) *_missingAggregation { + + s.v.Field = &field + + return s +} + +func (s *_missingAggregation) Missing(missing types.MissingVariant) *_missingAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_missingAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Missing = s.v + + return container +} + +func (s *_missingAggregation) ApiKeyAggregationContainerCaster() *types.ApiKeyAggregationContainer { + container := types.NewApiKeyAggregationContainer() + + container.Missing = s.v + + return container +} + +func (s *_missingAggregation) MissingAggregationCaster() *types.MissingAggregation { + return s.v +} diff --git a/typedapi/esdsl/modelplotconfig.go b/typedapi/esdsl/modelplotconfig.go new file mode 100644 index 0000000000..5a84c4d48d --- /dev/null +++ b/typedapi/esdsl/modelplotconfig.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _modelPlotConfig struct { + v *types.ModelPlotConfig +} + +func NewModelPlotConfig() *_modelPlotConfig { + + return &_modelPlotConfig{v: types.NewModelPlotConfig()} + +} + +// If true, enables calculation and storage of the model change annotations for +// each entity that is being analyzed. +func (s *_modelPlotConfig) AnnotationsEnabled(annotationsenabled bool) *_modelPlotConfig { + + s.v.AnnotationsEnabled = &annotationsenabled + + return s +} + +// If true, enables calculation and storage of the model bounds for each entity +// that is being analyzed. +func (s *_modelPlotConfig) Enabled(enabled bool) *_modelPlotConfig { + + s.v.Enabled = &enabled + + return s +} + +// Limits data collection to this comma separated list of partition or by field +// values. If terms are not specified or it is an empty string, no filtering is +// applied. Wildcards are not supported. Only the specified terms can be viewed +// when using the Single Metric Viewer. +func (s *_modelPlotConfig) Terms(field string) *_modelPlotConfig { + + s.v.Terms = &field + + return s +} + +func (s *_modelPlotConfig) ModelPlotConfigCaster() *types.ModelPlotConfig { + return s.v +} diff --git a/typedapi/esdsl/morelikethisquery.go b/typedapi/esdsl/morelikethisquery.go new file mode 100644 index 0000000000..cf56fdcce7 --- /dev/null +++ b/typedapi/esdsl/morelikethisquery.go @@ -0,0 +1,232 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" +) + +type _moreLikeThisQuery struct { + v *types.MoreLikeThisQuery +} + +// Returns documents that are "like" a given set of documents. +func NewMoreLikeThisQuery() *_moreLikeThisQuery { + + return &_moreLikeThisQuery{v: types.NewMoreLikeThisQuery()} + +} + +// The analyzer that is used to analyze the free form text. +// Defaults to the analyzer associated with the first field in fields. +func (s *_moreLikeThisQuery) Analyzer(analyzer string) *_moreLikeThisQuery { + + s.v.Analyzer = &analyzer + + return s +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_moreLikeThisQuery) Boost(boost float32) *_moreLikeThisQuery { + + s.v.Boost = &boost + + return s +} + +// Each term in the formed query could be further boosted by their tf-idf score. +// This sets the boost factor to use when using this feature. +// Defaults to deactivated (0). +func (s *_moreLikeThisQuery) BoostTerms(boostterms types.Float64) *_moreLikeThisQuery { + + s.v.BoostTerms = &boostterms + + return s +} + +// Controls whether the query should fail (throw an exception) if any of the +// specified fields are not of the supported types (`text` or `keyword`). +func (s *_moreLikeThisQuery) FailOnUnsupportedField(failonunsupportedfield bool) *_moreLikeThisQuery { + + s.v.FailOnUnsupportedField = &failonunsupportedfield + + return s +} + +// A list of fields to fetch and analyze the text from. +// Defaults to the `index.query.default_field` index setting, which has a +// default value of `*`. +func (s *_moreLikeThisQuery) Fields(fields ...string) *_moreLikeThisQuery { + + for _, v := range fields { + + s.v.Fields = append(s.v.Fields, v) + + } + return s +} + +// Specifies whether the input documents should also be included in the search +// results returned. +func (s *_moreLikeThisQuery) Include(include bool) *_moreLikeThisQuery { + + s.v.Include = &include + + return s +} + +// Specifies free form text and/or a single or multiple documents for which you +// want to find similar documents. +func (s *_moreLikeThisQuery) Like(likes ...types.LikeVariant) *_moreLikeThisQuery { + + s.v.Like = make([]types.Like, len(likes)) + for i, v := range likes { + s.v.Like[i] = *v.LikeCaster() + } + + return s +} + +// The maximum document frequency above which the terms are ignored from the +// input document. +func (s *_moreLikeThisQuery) MaxDocFreq(maxdocfreq int) *_moreLikeThisQuery { + + s.v.MaxDocFreq = &maxdocfreq + + return s +} + +// The maximum number of query terms that can be selected. +func (s *_moreLikeThisQuery) MaxQueryTerms(maxqueryterms int) *_moreLikeThisQuery { + + s.v.MaxQueryTerms = &maxqueryterms + + return s +} + +// The maximum word length above which the terms are ignored. +// Defaults to unbounded (`0`). +func (s *_moreLikeThisQuery) MaxWordLength(maxwordlength int) *_moreLikeThisQuery { + + s.v.MaxWordLength = &maxwordlength + + return s +} + +// The minimum document frequency below which the terms are ignored from the +// input document. +func (s *_moreLikeThisQuery) MinDocFreq(mindocfreq int) *_moreLikeThisQuery { + + s.v.MinDocFreq = &mindocfreq + + return s +} + +// The minimum term frequency below which the terms are ignored from the input +// document. +func (s *_moreLikeThisQuery) MinTermFreq(mintermfreq int) *_moreLikeThisQuery { + + s.v.MinTermFreq = &mintermfreq + + return s +} + +// The minimum word length below which the terms are ignored. +func (s *_moreLikeThisQuery) MinWordLength(minwordlength int) *_moreLikeThisQuery { + + s.v.MinWordLength = &minwordlength + + return s +} + +// After the disjunctive query has been formed, this parameter controls the +// number of terms that must match. +func (s *_moreLikeThisQuery) MinimumShouldMatch(minimumshouldmatch types.MinimumShouldMatchVariant) *_moreLikeThisQuery { + + s.v.MinimumShouldMatch = *minimumshouldmatch.MinimumShouldMatchCaster() + + return s +} + +func (s *_moreLikeThisQuery) QueryName_(queryname_ string) *_moreLikeThisQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_moreLikeThisQuery) Routing(routing string) *_moreLikeThisQuery { + + s.v.Routing = &routing + + return s +} + +// An array of stop words. +// Any word in this set is ignored. +func (s *_moreLikeThisQuery) StopWords(stopwords ...string) *_moreLikeThisQuery { + + s.v.StopWords = stopwords + + return s +} + +// Used in combination with `like` to exclude documents that match a set of +// terms. +func (s *_moreLikeThisQuery) Unlike(unlikes ...types.LikeVariant) *_moreLikeThisQuery { + + s.v.Unlike = make([]types.Like, len(unlikes)) + for i, v := range unlikes { + s.v.Unlike[i] = *v.LikeCaster() + } + + return s +} + +func (s *_moreLikeThisQuery) Version(versionnumber int64) *_moreLikeThisQuery { + + s.v.Version = &versionnumber + + return s +} + +func (s *_moreLikeThisQuery) VersionType(versiontype versiontype.VersionType) *_moreLikeThisQuery { + + s.v.VersionType = &versiontype + return s +} + +func (s *_moreLikeThisQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.MoreLikeThis = s.v + + return container +} + +func (s *_moreLikeThisQuery) MoreLikeThisQueryCaster() *types.MoreLikeThisQuery { + return s.v +} diff --git a/typedapi/esdsl/movingaverageaggregation.go b/typedapi/esdsl/movingaverageaggregation.go new file mode 100644 index 0000000000..933633c735 --- /dev/null +++ b/typedapi/esdsl/movingaverageaggregation.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _movingAverageAggregation struct { + v types.MovingAverageAggregation +} + +func NewMovingAverageAggregation() *_movingAverageAggregation { + return &_movingAverageAggregation{v: nil} +} + +func (u *_movingAverageAggregation) LinearMovingAverageAggregation(linearmovingaverageaggregation types.LinearMovingAverageAggregationVariant) *_movingAverageAggregation { + + u.v = &linearmovingaverageaggregation + + return u +} + +// Interface implementation for LinearMovingAverageAggregation in MovingAverageAggregation union +func (u *_linearMovingAverageAggregation) MovingAverageAggregationCaster() *types.MovingAverageAggregation { + t := types.MovingAverageAggregation(u.v) + return &t +} + +func (u *_movingAverageAggregation) SimpleMovingAverageAggregation(simplemovingaverageaggregation types.SimpleMovingAverageAggregationVariant) *_movingAverageAggregation { + + u.v = &simplemovingaverageaggregation + + return u +} + +// Interface implementation for SimpleMovingAverageAggregation in MovingAverageAggregation union +func (u *_simpleMovingAverageAggregation) MovingAverageAggregationCaster() *types.MovingAverageAggregation { + t := types.MovingAverageAggregation(u.v) + return &t +} + +func (u *_movingAverageAggregation) EwmaMovingAverageAggregation(ewmamovingaverageaggregation types.EwmaMovingAverageAggregationVariant) *_movingAverageAggregation { + + u.v = &ewmamovingaverageaggregation + + return u +} + +// Interface implementation for EwmaMovingAverageAggregation in MovingAverageAggregation union +func (u *_ewmaMovingAverageAggregation) MovingAverageAggregationCaster() *types.MovingAverageAggregation { + t := types.MovingAverageAggregation(u.v) + return &t +} + +func (u *_movingAverageAggregation) HoltMovingAverageAggregation(holtmovingaverageaggregation types.HoltMovingAverageAggregationVariant) *_movingAverageAggregation { + + u.v = &holtmovingaverageaggregation + + return u +} + +// Interface implementation for HoltMovingAverageAggregation in MovingAverageAggregation union +func (u *_holtMovingAverageAggregation) MovingAverageAggregationCaster() *types.MovingAverageAggregation { + t := types.MovingAverageAggregation(u.v) + return &t +} + +func (u *_movingAverageAggregation) HoltWintersMovingAverageAggregation(holtwintersmovingaverageaggregation types.HoltWintersMovingAverageAggregationVariant) *_movingAverageAggregation { + + u.v = &holtwintersmovingaverageaggregation + + return u +} + +// Interface implementation for HoltWintersMovingAverageAggregation in MovingAverageAggregation union +func (u *_holtWintersMovingAverageAggregation) MovingAverageAggregationCaster() *types.MovingAverageAggregation { + t := types.MovingAverageAggregation(u.v) + return &t +} + +func (u *_movingAverageAggregation) MovingAverageAggregationCaster() *types.MovingAverageAggregation { + return &u.v +} diff --git a/typedapi/esdsl/movingfunctionaggregation.go b/typedapi/esdsl/movingfunctionaggregation.go new file mode 100644 index 0000000000..f3cdcc12ef --- /dev/null +++ b/typedapi/esdsl/movingfunctionaggregation.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _movingFunctionAggregation struct { + v *types.MovingFunctionAggregation +} + +// Given an ordered series of data, "slides" a window across the data and runs a +// custom script on each window of data. +// For convenience, a number of common functions are predefined such as `min`, +// `max`, and moving averages. +func NewMovingFunctionAggregation() *_movingFunctionAggregation { + + return &_movingFunctionAggregation{v: types.NewMovingFunctionAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_movingFunctionAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_movingFunctionAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_movingFunctionAggregation) Format(format string) *_movingFunctionAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_movingFunctionAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_movingFunctionAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +// The script that should be executed on each window of data. +func (s *_movingFunctionAggregation) Script(script string) *_movingFunctionAggregation { + + s.v.Script = &script + + return s +} + +// By default, the window consists of the last n values excluding the current +// bucket. +// Increasing `shift` by 1, moves the starting window position by 1 to the +// right. +func (s *_movingFunctionAggregation) Shift(shift int) *_movingFunctionAggregation { + + s.v.Shift = &shift + + return s +} + +// The size of window to "slide" across the histogram. +func (s *_movingFunctionAggregation) Window(window int) *_movingFunctionAggregation { + + s.v.Window = &window + + return s +} + +func (s *_movingFunctionAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.MovingFn = s.v + + return container +} + +func (s *_movingFunctionAggregation) MovingFunctionAggregationCaster() *types.MovingFunctionAggregation { + return s.v +} diff --git a/typedapi/esdsl/movingpercentilesaggregation.go b/typedapi/esdsl/movingpercentilesaggregation.go new file mode 100644 index 0000000000..4abb4c8c38 --- /dev/null +++ b/typedapi/esdsl/movingpercentilesaggregation.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _movingPercentilesAggregation struct { + v *types.MovingPercentilesAggregation +} + +// Given an ordered series of percentiles, "slides" a window across those +// percentiles and computes cumulative percentiles. +func NewMovingPercentilesAggregation() *_movingPercentilesAggregation { + + return &_movingPercentilesAggregation{v: types.NewMovingPercentilesAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_movingPercentilesAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_movingPercentilesAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_movingPercentilesAggregation) Format(format string) *_movingPercentilesAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_movingPercentilesAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_movingPercentilesAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +func (s *_movingPercentilesAggregation) Keyed(keyed bool) *_movingPercentilesAggregation { + + s.v.Keyed = &keyed + + return s +} + +// By default, the window consists of the last n values excluding the current +// bucket. +// Increasing `shift` by 1, moves the starting window position by 1 to the +// right. +func (s *_movingPercentilesAggregation) Shift(shift int) *_movingPercentilesAggregation { + + s.v.Shift = &shift + + return s +} + +// The size of window to "slide" across the histogram. +func (s *_movingPercentilesAggregation) Window(window int) *_movingPercentilesAggregation { + + s.v.Window = &window + + return s +} + +func (s *_movingPercentilesAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.MovingPercentiles = s.v + + return container +} + +func (s *_movingPercentilesAggregation) MovingPercentilesAggregationCaster() *types.MovingPercentilesAggregation { + return s.v +} diff --git a/typedapi/esdsl/msearchrequestitem.go b/typedapi/esdsl/msearchrequestitem.go new file mode 100644 index 0000000000..eb641f9329 --- /dev/null +++ b/typedapi/esdsl/msearchrequestitem.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _msearchRequestItem struct { + v types.MsearchRequestItem +} + +func NewMsearchRequestItem() *_msearchRequestItem { + return &_msearchRequestItem{v: nil} +} + +func (u *_msearchRequestItem) MultisearchHeader(multisearchheader types.MultisearchHeaderVariant) *_msearchRequestItem { + + u.v = &multisearchheader + + return u +} + +// Interface implementation for MultisearchHeader in MsearchRequestItem union +func (u *_multisearchHeader) MsearchRequestItemCaster() *types.MsearchRequestItem { + t := types.MsearchRequestItem(u.v) + return &t +} + +func (u *_msearchRequestItem) MultisearchBody(multisearchbody types.MultisearchBodyVariant) *_msearchRequestItem { + + u.v = &multisearchbody + + return u +} + +// Interface implementation for MultisearchBody in MsearchRequestItem union +func (u *_multisearchBody) MsearchRequestItemCaster() *types.MsearchRequestItem { + t := types.MsearchRequestItem(u.v) + return &t +} + +func (u *_msearchRequestItem) MsearchRequestItemCaster() *types.MsearchRequestItem { + return &u.v +} diff --git a/typedapi/esdsl/mtermvectorsoperation.go b/typedapi/esdsl/mtermvectorsoperation.go new file mode 100644 index 0000000000..6cd44c1d27 --- /dev/null +++ b/typedapi/esdsl/mtermvectorsoperation.go @@ -0,0 +1,150 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" +) + +type _mTermVectorsOperation struct { + v *types.MTermVectorsOperation +} + +func NewMTermVectorsOperation() *_mTermVectorsOperation { + + return &_mTermVectorsOperation{v: types.NewMTermVectorsOperation()} + +} + +// An artificial document (a document not present in the index) for which you +// want to retrieve term vectors. +func (s *_mTermVectorsOperation) Doc(doc json.RawMessage) *_mTermVectorsOperation { + + s.v.Doc = doc + + return s +} + +// If `true`, the response includes the document count, sum of document +// frequencies, and sum of total term frequencies. +func (s *_mTermVectorsOperation) FieldStatistics(fieldstatistics bool) *_mTermVectorsOperation { + + s.v.FieldStatistics = &fieldstatistics + + return s +} + +// Comma-separated list or wildcard expressions of fields to include in the +// statistics. +// Used as the default list unless a specific field list is provided in the +// `completion_fields` or `fielddata_fields` parameters. +func (s *_mTermVectorsOperation) Fields(fields ...string) *_mTermVectorsOperation { + + s.v.Fields = fields + + return s +} + +// Filter terms based on their tf-idf scores. +func (s *_mTermVectorsOperation) Filter(filter types.TermVectorsFilterVariant) *_mTermVectorsOperation { + + s.v.Filter = filter.TermVectorsFilterCaster() + + return s +} + +// The ID of the document. +func (s *_mTermVectorsOperation) Id_(id string) *_mTermVectorsOperation { + + s.v.Id_ = &id + + return s +} + +// The index of the document. +func (s *_mTermVectorsOperation) Index_(indexname string) *_mTermVectorsOperation { + + s.v.Index_ = &indexname + + return s +} + +// If `true`, the response includes term offsets. +func (s *_mTermVectorsOperation) Offsets(offsets bool) *_mTermVectorsOperation { + + s.v.Offsets = &offsets + + return s +} + +// If `true`, the response includes term payloads. +func (s *_mTermVectorsOperation) Payloads(payloads bool) *_mTermVectorsOperation { + + s.v.Payloads = &payloads + + return s +} + +// If `true`, the response includes term positions. +func (s *_mTermVectorsOperation) Positions(positions bool) *_mTermVectorsOperation { + + s.v.Positions = &positions + + return s +} + +// Custom value used to route operations to a specific shard. +func (s *_mTermVectorsOperation) Routing(routing string) *_mTermVectorsOperation { + + s.v.Routing = &routing + + return s +} + +// If true, the response includes term frequency and document frequency. +func (s *_mTermVectorsOperation) TermStatistics(termstatistics bool) *_mTermVectorsOperation { + + s.v.TermStatistics = &termstatistics + + return s +} + +// If `true`, returns the document version as part of a hit. +func (s *_mTermVectorsOperation) Version(versionnumber int64) *_mTermVectorsOperation { + + s.v.Version = &versionnumber + + return s +} + +// Specific version type. +func (s *_mTermVectorsOperation) VersionType(versiontype versiontype.VersionType) *_mTermVectorsOperation { + + s.v.VersionType = &versiontype + return s +} + +func (s *_mTermVectorsOperation) MTermVectorsOperationCaster() *types.MTermVectorsOperation { + return s.v +} diff --git a/typedapi/esdsl/multimatchquery.go b/typedapi/esdsl/multimatchquery.go new file mode 100644 index 0000000000..6b242a7348 --- /dev/null +++ b/typedapi/esdsl/multimatchquery.go @@ -0,0 +1,217 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/textquerytype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/zerotermsquery" +) + +type _multiMatchQuery struct { + v *types.MultiMatchQuery +} + +// Enables you to search for a provided text, number, date or boolean value +// across multiple fields. +// The provided text is analyzed before matching. +func NewMultiMatchQuery(query string) *_multiMatchQuery { + + tmp := &_multiMatchQuery{v: types.NewMultiMatchQuery()} + + tmp.Query(query) + + return tmp + +} + +// Analyzer used to convert the text in the query value into tokens. +func (s *_multiMatchQuery) Analyzer(analyzer string) *_multiMatchQuery { + + s.v.Analyzer = &analyzer + + return s +} + +// If `true`, match phrase queries are automatically created for multi-term +// synonyms. +func (s *_multiMatchQuery) AutoGenerateSynonymsPhraseQuery(autogeneratesynonymsphrasequery bool) *_multiMatchQuery { + + s.v.AutoGenerateSynonymsPhraseQuery = &autogeneratesynonymsphrasequery + + return s +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_multiMatchQuery) Boost(boost float32) *_multiMatchQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_multiMatchQuery) CutoffFrequency(cutofffrequency types.Float64) *_multiMatchQuery { + + s.v.CutoffFrequency = &cutofffrequency + + return s +} + +// The fields to be queried. +// Defaults to the `index.query.default_field` index settings, which in turn +// defaults to `*`. +func (s *_multiMatchQuery) Fields(fields ...string) *_multiMatchQuery { + + s.v.Fields = fields + + return s +} + +// Maximum edit distance allowed for matching. +func (s *_multiMatchQuery) Fuzziness(fuzziness types.FuzzinessVariant) *_multiMatchQuery { + + s.v.Fuzziness = *fuzziness.FuzzinessCaster() + + return s +} + +// Method used to rewrite the query. +func (s *_multiMatchQuery) FuzzyRewrite(multitermqueryrewrite string) *_multiMatchQuery { + + s.v.FuzzyRewrite = &multitermqueryrewrite + + return s +} + +// If `true`, edits for fuzzy matching include transpositions of two adjacent +// characters (for example, `ab` to `ba`). +// Can be applied to the term subqueries constructed for all terms but the final +// term. +func (s *_multiMatchQuery) FuzzyTranspositions(fuzzytranspositions bool) *_multiMatchQuery { + + s.v.FuzzyTranspositions = &fuzzytranspositions + + return s +} + +// If `true`, format-based errors, such as providing a text query value for a +// numeric field, are ignored. +func (s *_multiMatchQuery) Lenient(lenient bool) *_multiMatchQuery { + + s.v.Lenient = &lenient + + return s +} + +// Maximum number of terms to which the query will expand. +func (s *_multiMatchQuery) MaxExpansions(maxexpansions int) *_multiMatchQuery { + + s.v.MaxExpansions = &maxexpansions + + return s +} + +// Minimum number of clauses that must match for a document to be returned. +func (s *_multiMatchQuery) MinimumShouldMatch(minimumshouldmatch types.MinimumShouldMatchVariant) *_multiMatchQuery { + + s.v.MinimumShouldMatch = *minimumshouldmatch.MinimumShouldMatchCaster() + + return s +} + +// Boolean logic used to interpret text in the query value. +func (s *_multiMatchQuery) Operator(operator operator.Operator) *_multiMatchQuery { + + s.v.Operator = &operator + return s +} + +// Number of beginning characters left unchanged for fuzzy matching. +func (s *_multiMatchQuery) PrefixLength(prefixlength int) *_multiMatchQuery { + + s.v.PrefixLength = &prefixlength + + return s +} + +// Text, number, boolean value or date you wish to find in the provided field. +func (s *_multiMatchQuery) Query(query string) *_multiMatchQuery { + + s.v.Query = query + + return s +} + +func (s *_multiMatchQuery) QueryName_(queryname_ string) *_multiMatchQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Maximum number of positions allowed between matching tokens. +func (s *_multiMatchQuery) Slop(slop int) *_multiMatchQuery { + + s.v.Slop = &slop + + return s +} + +// Determines how scores for each per-term blended query and scores across +// groups are combined. +func (s *_multiMatchQuery) TieBreaker(tiebreaker types.Float64) *_multiMatchQuery { + + s.v.TieBreaker = &tiebreaker + + return s +} + +// How `the` multi_match query is executed internally. +func (s *_multiMatchQuery) Type(type_ textquerytype.TextQueryType) *_multiMatchQuery { + + s.v.Type = &type_ + return s +} + +// Indicates whether no documents are returned if the `analyzer` removes all +// tokens, such as when using a `stop` filter. +func (s *_multiMatchQuery) ZeroTermsQuery(zerotermsquery zerotermsquery.ZeroTermsQuery) *_multiMatchQuery { + + s.v.ZeroTermsQuery = &zerotermsquery + return s +} + +func (s *_multiMatchQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.MultiMatch = s.v + + return container +} + +func (s *_multiMatchQuery) MultiMatchQueryCaster() *types.MultiMatchQuery { + return s.v +} diff --git a/typedapi/esdsl/multiplexertokenfilter.go b/typedapi/esdsl/multiplexertokenfilter.go new file mode 100644 index 0000000000..135671aeb3 --- /dev/null +++ b/typedapi/esdsl/multiplexertokenfilter.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _multiplexerTokenFilter struct { + v *types.MultiplexerTokenFilter +} + +func NewMultiplexerTokenFilter() *_multiplexerTokenFilter { + + return &_multiplexerTokenFilter{v: types.NewMultiplexerTokenFilter()} + +} + +func (s *_multiplexerTokenFilter) Filters(filters ...string) *_multiplexerTokenFilter { + + for _, v := range filters { + + s.v.Filters = append(s.v.Filters, v) + + } + return s +} + +func (s *_multiplexerTokenFilter) PreserveOriginal(stringifiedboolean types.StringifiedbooleanVariant) *_multiplexerTokenFilter { + + s.v.PreserveOriginal = *stringifiedboolean.StringifiedbooleanCaster() + + return s +} + +func (s *_multiplexerTokenFilter) Version(versionstring string) *_multiplexerTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_multiplexerTokenFilter) MultiplexerTokenFilterCaster() *types.MultiplexerTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/multisearchbody.go b/typedapi/esdsl/multisearchbody.go new file mode 100644 index 0000000000..82e06a85ad --- /dev/null +++ b/typedapi/esdsl/multisearchbody.go @@ -0,0 +1,374 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _multisearchBody struct { + v *types.MultisearchBody +} + +func NewMultisearchBody() *_multisearchBody { + + return &_multisearchBody{v: types.NewMultisearchBody()} + +} + +func (s *_multisearchBody) Aggregations(aggregations map[string]types.Aggregations) *_multisearchBody { + + s.v.Aggregations = aggregations + return s +} + +func (s *_multisearchBody) AddAggregation(key string, value types.AggregationsVariant) *_multisearchBody { + + var tmp map[string]types.Aggregations + if s.v.Aggregations == nil { + s.v.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = s.v.Aggregations + } + + tmp[key] = *value.AggregationsCaster() + + s.v.Aggregations = tmp + return s +} + +func (s *_multisearchBody) Collapse(collapse types.FieldCollapseVariant) *_multisearchBody { + + s.v.Collapse = collapse.FieldCollapseCaster() + + return s +} + +// Array of wildcard (*) patterns. The request returns doc values for field +// names matching these patterns in the hits.fields property of the response. +func (s *_multisearchBody) DocvalueFields(docvaluefields ...types.FieldAndFormatVariant) *_multisearchBody { + + for _, v := range docvaluefields { + + s.v.DocvalueFields = append(s.v.DocvalueFields, *v.FieldAndFormatCaster()) + + } + return s +} + +// If true, returns detailed information about score computation as part of a +// hit. +func (s *_multisearchBody) Explain(explain bool) *_multisearchBody { + + s.v.Explain = &explain + + return s +} + +// Configuration of search extensions defined by Elasticsearch plugins. +func (s *_multisearchBody) Ext(ext map[string]json.RawMessage) *_multisearchBody { + + s.v.Ext = ext + return s +} + +func (s *_multisearchBody) AddExt(key string, value json.RawMessage) *_multisearchBody { + + var tmp map[string]json.RawMessage + if s.v.Ext == nil { + s.v.Ext = make(map[string]json.RawMessage) + } else { + tmp = s.v.Ext + } + + tmp[key] = value + + s.v.Ext = tmp + return s +} + +// Array of wildcard (*) patterns. The request returns values for field names +// matching these patterns in the hits.fields property of the response. +func (s *_multisearchBody) Fields(fields ...types.FieldAndFormatVariant) *_multisearchBody { + + for _, v := range fields { + + s.v.Fields = append(s.v.Fields, *v.FieldAndFormatCaster()) + + } + return s +} + +// Starting document offset. By default, you cannot page through more than +// 10,000 +// hits using the from and size parameters. To page through more hits, use the +// search_after parameter. +func (s *_multisearchBody) From(from int) *_multisearchBody { + + s.v.From = &from + + return s +} + +func (s *_multisearchBody) Highlight(highlight types.HighlightVariant) *_multisearchBody { + + s.v.Highlight = highlight.HighlightCaster() + + return s +} + +// Boosts the _score of documents from specified indices. +func (s *_multisearchBody) IndicesBoost(indicesboost []map[string]types.Float64) *_multisearchBody { + + s.v.IndicesBoost = indicesboost + + return s +} + +// Defines the approximate kNN search to run. +func (s *_multisearchBody) Knn(knns ...types.KnnSearchVariant) *_multisearchBody { + + s.v.Knn = make([]types.KnnSearch, len(knns)) + for i, v := range knns { + s.v.Knn[i] = *v.KnnSearchCaster() + } + + return s +} + +// Minimum _score for matching documents. Documents with a lower _score are +// not included in the search results. +func (s *_multisearchBody) MinScore(minscore types.Float64) *_multisearchBody { + + s.v.MinScore = &minscore + + return s +} + +// Limits the search to a point in time (PIT). If you provide a PIT, you +// cannot specify an in the request path. +func (s *_multisearchBody) Pit(pit types.PointInTimeReferenceVariant) *_multisearchBody { + + s.v.Pit = pit.PointInTimeReferenceCaster() + + return s +} + +func (s *_multisearchBody) PostFilter(postfilter types.QueryVariant) *_multisearchBody { + + s.v.PostFilter = postfilter.QueryCaster() + + return s +} + +func (s *_multisearchBody) Profile(profile bool) *_multisearchBody { + + s.v.Profile = &profile + + return s +} + +// Defines the search definition using the Query DSL. +func (s *_multisearchBody) Query(query types.QueryVariant) *_multisearchBody { + + s.v.Query = query.QueryCaster() + + return s +} + +func (s *_multisearchBody) Rescore(rescores ...types.RescoreVariant) *_multisearchBody { + + s.v.Rescore = make([]types.Rescore, len(rescores)) + for i, v := range rescores { + s.v.Rescore[i] = *v.RescoreCaster() + } + + return s +} + +// Defines one or more runtime fields in the search request. These fields take +// precedence over mapped fields with the same name. +func (s *_multisearchBody) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *_multisearchBody { + + s.v.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() + + return s +} + +// Retrieve a script evaluation (based on different fields) for each hit. +func (s *_multisearchBody) ScriptFields(scriptfields map[string]types.ScriptField) *_multisearchBody { + + s.v.ScriptFields = scriptfields + return s +} + +func (s *_multisearchBody) AddScriptField(key string, value types.ScriptFieldVariant) *_multisearchBody { + + var tmp map[string]types.ScriptField + if s.v.ScriptFields == nil { + s.v.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = s.v.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + + s.v.ScriptFields = tmp + return s +} + +func (s *_multisearchBody) SearchAfter(sortresults ...types.FieldValueVariant) *_multisearchBody { + + for _, v := range sortresults { + s.v.SearchAfter = append(s.v.SearchAfter, *v.FieldValueCaster()) + } + + return s +} + +// If true, returns sequence number and primary term of the last modification +// of each hit. See Optimistic concurrency control. +func (s *_multisearchBody) SeqNoPrimaryTerm(seqnoprimaryterm bool) *_multisearchBody { + + s.v.SeqNoPrimaryTerm = &seqnoprimaryterm + + return s +} + +// The number of hits to return. By default, you cannot page through more +// than 10,000 hits using the from and size parameters. To page through more +// hits, use the search_after parameter. +func (s *_multisearchBody) Size(size int) *_multisearchBody { + + s.v.Size = &size + + return s +} + +func (s *_multisearchBody) Sort(sorts ...types.SortCombinationsVariant) *_multisearchBody { + + for _, v := range sorts { + s.v.Sort = append(s.v.Sort, *v.SortCombinationsCaster()) + } + + return s +} + +// Indicates which source fields are returned for matching documents. These +// fields are returned in the hits._source property of the search response. +func (s *_multisearchBody) Source_(sourceconfig types.SourceConfigVariant) *_multisearchBody { + + s.v.Source_ = *sourceconfig.SourceConfigCaster() + + return s +} + +// Stats groups to associate with the search. Each group maintains a statistics +// aggregation for its associated searches. You can retrieve these stats using +// the indices stats API. +func (s *_multisearchBody) Stats(stats ...string) *_multisearchBody { + + for _, v := range stats { + + s.v.Stats = append(s.v.Stats, v) + + } + return s +} + +// List of stored fields to return as part of a hit. If no fields are specified, +// no stored fields are included in the response. If this field is specified, +// the _source +// parameter defaults to false. You can pass _source: true to return both source +// fields +// and stored fields in the search response. +func (s *_multisearchBody) StoredFields(fields ...string) *_multisearchBody { + + s.v.StoredFields = fields + + return s +} + +func (s *_multisearchBody) Suggest(suggest types.SuggesterVariant) *_multisearchBody { + + s.v.Suggest = suggest.SuggesterCaster() + + return s +} + +// Maximum number of documents to collect for each shard. If a query reaches +// this +// limit, Elasticsearch terminates the query early. Elasticsearch collects +// documents +// before sorting. Defaults to 0, which does not terminate query execution +// early. +func (s *_multisearchBody) TerminateAfter(terminateafter int64) *_multisearchBody { + + s.v.TerminateAfter = &terminateafter + + return s +} + +// Specifies the period of time to wait for a response from each shard. If no +// response +// is received before the timeout expires, the request fails and returns an +// error. +// Defaults to no timeout. +func (s *_multisearchBody) Timeout(timeout string) *_multisearchBody { + + s.v.Timeout = &timeout + + return s +} + +// If true, calculate and return document scores, even if the scores are not +// used for sorting. +func (s *_multisearchBody) TrackScores(trackscores bool) *_multisearchBody { + + s.v.TrackScores = &trackscores + + return s +} + +// Number of hits matching the query to count accurately. If true, the exact +// number of hits is returned at the cost of some performance. If false, the +// response does not include the total number of hits matching the query. +// Defaults to 10,000 hits. +func (s *_multisearchBody) TrackTotalHits(trackhits types.TrackHitsVariant) *_multisearchBody { + + s.v.TrackTotalHits = *trackhits.TrackHitsCaster() + + return s +} + +// If true, returns document version as part of a hit. +func (s *_multisearchBody) Version(version bool) *_multisearchBody { + + s.v.Version = &version + + return s +} + +func (s *_multisearchBody) MultisearchBodyCaster() *types.MultisearchBody { + return s.v +} diff --git a/typedapi/esdsl/multisearchheader.go b/typedapi/esdsl/multisearchheader.go new file mode 100644 index 0000000000..b26a2c2804 --- /dev/null +++ b/typedapi/esdsl/multisearchheader.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype" +) + +type _multisearchHeader struct { + v *types.MultisearchHeader +} + +func NewMultisearchHeader() *_multisearchHeader { + + return &_multisearchHeader{v: types.NewMultisearchHeader()} + +} + +func (s *_multisearchHeader) AllowNoIndices(allownoindices bool) *_multisearchHeader { + + s.v.AllowNoIndices = &allownoindices + + return s +} + +func (s *_multisearchHeader) AllowPartialSearchResults(allowpartialsearchresults bool) *_multisearchHeader { + + s.v.AllowPartialSearchResults = &allowpartialsearchresults + + return s +} + +func (s *_multisearchHeader) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *_multisearchHeader { + + s.v.CcsMinimizeRoundtrips = &ccsminimizeroundtrips + + return s +} + +func (s *_multisearchHeader) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *_multisearchHeader { + + s.v.ExpandWildcards = expandwildcards + + return s +} + +func (s *_multisearchHeader) IgnoreThrottled(ignorethrottled bool) *_multisearchHeader { + + s.v.IgnoreThrottled = &ignorethrottled + + return s +} + +func (s *_multisearchHeader) IgnoreUnavailable(ignoreunavailable bool) *_multisearchHeader { + + s.v.IgnoreUnavailable = &ignoreunavailable + + return s +} + +func (s *_multisearchHeader) Index(indices ...string) *_multisearchHeader { + + s.v.Index = indices + + return s +} + +func (s *_multisearchHeader) Preference(preference string) *_multisearchHeader { + + s.v.Preference = &preference + + return s +} + +func (s *_multisearchHeader) RequestCache(requestcache bool) *_multisearchHeader { + + s.v.RequestCache = &requestcache + + return s +} + +func (s *_multisearchHeader) Routing(routing string) *_multisearchHeader { + + s.v.Routing = &routing + + return s +} + +func (s *_multisearchHeader) SearchType(searchtype searchtype.SearchType) *_multisearchHeader { + + s.v.SearchType = &searchtype + return s +} + +func (s *_multisearchHeader) MultisearchHeaderCaster() *types.MultisearchHeader { + return s.v +} diff --git a/typedapi/esdsl/multitermlookup.go b/typedapi/esdsl/multitermlookup.go new file mode 100644 index 0000000000..eb27e03223 --- /dev/null +++ b/typedapi/esdsl/multitermlookup.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _multiTermLookup struct { + v *types.MultiTermLookup +} + +func NewMultiTermLookup() *_multiTermLookup { + + return &_multiTermLookup{v: types.NewMultiTermLookup()} + +} + +// A fields from which to retrieve terms. +func (s *_multiTermLookup) Field(field string) *_multiTermLookup { + + s.v.Field = field + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_multiTermLookup) Missing(missing types.MissingVariant) *_multiTermLookup { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_multiTermLookup) MultiTermLookupCaster() *types.MultiTermLookup { + return s.v +} diff --git a/typedapi/esdsl/multitermsaggregation.go b/typedapi/esdsl/multitermsaggregation.go new file mode 100644 index 0000000000..5fcc7d5e14 --- /dev/null +++ b/typedapi/esdsl/multitermsaggregation.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationcollectmode" +) + +type _multiTermsAggregation struct { + v *types.MultiTermsAggregation +} + +// A multi-bucket value source based aggregation where buckets are dynamically +// built - one per unique set of values. +func NewMultiTermsAggregation() *_multiTermsAggregation { + + return &_multiTermsAggregation{v: types.NewMultiTermsAggregation()} + +} + +// Specifies the strategy for data collection. +func (s *_multiTermsAggregation) CollectMode(collectmode termsaggregationcollectmode.TermsAggregationCollectMode) *_multiTermsAggregation { + + s.v.CollectMode = &collectmode + return s +} + +// The minimum number of documents in a bucket for it to be returned. +func (s *_multiTermsAggregation) MinDocCount(mindoccount int64) *_multiTermsAggregation { + + s.v.MinDocCount = &mindoccount + + return s +} + +// Specifies the sort order of the buckets. +// Defaults to sorting by descending document count. +func (s *_multiTermsAggregation) Order(aggregateorder types.AggregateOrderVariant) *_multiTermsAggregation { + + s.v.Order = *aggregateorder.AggregateOrderCaster() + + return s +} + +// The minimum number of documents in a bucket on each shard for it to be +// returned. +func (s *_multiTermsAggregation) ShardMinDocCount(shardmindoccount int64) *_multiTermsAggregation { + + s.v.ShardMinDocCount = &shardmindoccount + + return s +} + +// The number of candidate terms produced by each shard. +// By default, `shard_size` will be automatically estimated based on the number +// of shards and the `size` parameter. +func (s *_multiTermsAggregation) ShardSize(shardsize int) *_multiTermsAggregation { + + s.v.ShardSize = &shardsize + + return s +} + +// Calculates the doc count error on per term basis. +func (s *_multiTermsAggregation) ShowTermDocCountError(showtermdoccounterror bool) *_multiTermsAggregation { + + s.v.ShowTermDocCountError = &showtermdoccounterror + + return s +} + +// The number of term buckets should be returned out of the overall terms list. +func (s *_multiTermsAggregation) Size(size int) *_multiTermsAggregation { + + s.v.Size = &size + + return s +} + +// The field from which to generate sets of terms. +func (s *_multiTermsAggregation) Terms(terms ...types.MultiTermLookupVariant) *_multiTermsAggregation { + + for _, v := range terms { + + s.v.Terms = append(s.v.Terms, *v.MultiTermLookupCaster()) + + } + return s +} + +func (s *_multiTermsAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.MultiTerms = s.v + + return container +} + +func (s *_multiTermsAggregation) MultiTermsAggregationCaster() *types.MultiTermsAggregation { + return s.v +} diff --git a/typedapi/esdsl/murmur3hashproperty.go b/typedapi/esdsl/murmur3hashproperty.go new file mode 100644 index 0000000000..b654f579cf --- /dev/null +++ b/typedapi/esdsl/murmur3hashproperty.go @@ -0,0 +1,153 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _murmur3HashProperty struct { + v *types.Murmur3HashProperty +} + +func NewMurmur3HashProperty() *_murmur3HashProperty { + + return &_murmur3HashProperty{v: types.NewMurmur3HashProperty()} + +} + +func (s *_murmur3HashProperty) CopyTo(fields ...string) *_murmur3HashProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_murmur3HashProperty) DocValues(docvalues bool) *_murmur3HashProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_murmur3HashProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_murmur3HashProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_murmur3HashProperty) Fields(fields map[string]types.Property) *_murmur3HashProperty { + + s.v.Fields = fields + return s +} + +func (s *_murmur3HashProperty) AddField(key string, value types.PropertyVariant) *_murmur3HashProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_murmur3HashProperty) IgnoreAbove(ignoreabove int) *_murmur3HashProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +// Metadata about the field. +func (s *_murmur3HashProperty) Meta(meta map[string]string) *_murmur3HashProperty { + + s.v.Meta = meta + return s +} + +func (s *_murmur3HashProperty) AddMeta(key string, value string) *_murmur3HashProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_murmur3HashProperty) Properties(properties map[string]types.Property) *_murmur3HashProperty { + + s.v.Properties = properties + return s +} + +func (s *_murmur3HashProperty) AddProperty(key string, value types.PropertyVariant) *_murmur3HashProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_murmur3HashProperty) Store(store bool) *_murmur3HashProperty { + + s.v.Store = &store + + return s +} + +func (s *_murmur3HashProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_murmur3HashProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_murmur3HashProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_murmur3HashProperty) Murmur3HashPropertyCaster() *types.Murmur3HashProperty { + return s.v +} diff --git a/typedapi/esdsl/mutualinformationheuristic.go b/typedapi/esdsl/mutualinformationheuristic.go new file mode 100644 index 0000000000..57b06a3fbf --- /dev/null +++ b/typedapi/esdsl/mutualinformationheuristic.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _mutualInformationHeuristic struct { + v *types.MutualInformationHeuristic +} + +func NewMutualInformationHeuristic() *_mutualInformationHeuristic { + + return &_mutualInformationHeuristic{v: types.NewMutualInformationHeuristic()} + +} + +// Set to `false` if you defined a custom background filter that represents a +// different set of documents that you want to compare to. +func (s *_mutualInformationHeuristic) BackgroundIsSuperset(backgroundissuperset bool) *_mutualInformationHeuristic { + + s.v.BackgroundIsSuperset = &backgroundissuperset + + return s +} + +// Set to `false` to filter out the terms that appear less often in the subset +// than in documents outside the subset. +func (s *_mutualInformationHeuristic) IncludeNegatives(includenegatives bool) *_mutualInformationHeuristic { + + s.v.IncludeNegatives = &includenegatives + + return s +} + +func (s *_mutualInformationHeuristic) MutualInformationHeuristicCaster() *types.MutualInformationHeuristic { + return s.v +} diff --git a/typedapi/esdsl/names.go b/typedapi/esdsl/names.go new file mode 100644 index 0000000000..d0d92e9f19 --- /dev/null +++ b/typedapi/esdsl/names.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _names struct { + v types.Names +} + +func NewNames() *_names { + return &_names{v: []string{}} +} + +func (u *_names) NamesCaster() *types.Names { + return &u.v +} diff --git a/typedapi/esdsl/nerinferenceoptions.go b/typedapi/esdsl/nerinferenceoptions.go new file mode 100644 index 0000000000..c1ce4f4f4f --- /dev/null +++ b/typedapi/esdsl/nerinferenceoptions.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _nerInferenceOptions struct { + v *types.NerInferenceOptions +} + +// Named entity recognition configuration for inference. +func NewNerInferenceOptions() *_nerInferenceOptions { + + return &_nerInferenceOptions{v: types.NewNerInferenceOptions()} + +} + +// The token classification labels. Must be IOB formatted tags +func (s *_nerInferenceOptions) ClassificationLabels(classificationlabels ...string) *_nerInferenceOptions { + + for _, v := range classificationlabels { + + s.v.ClassificationLabels = append(s.v.ClassificationLabels, v) + + } + return s +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_nerInferenceOptions) ResultsField(resultsfield string) *_nerInferenceOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +// The tokenization options +func (s *_nerInferenceOptions) Tokenization(tokenization types.TokenizationConfigContainerVariant) *_nerInferenceOptions { + + s.v.Tokenization = tokenization.TokenizationConfigContainerCaster() + + return s +} + +func (s *_nerInferenceOptions) Vocabulary(vocabulary types.VocabularyVariant) *_nerInferenceOptions { + + s.v.Vocabulary = vocabulary.VocabularyCaster() + + return s +} + +func (s *_nerInferenceOptions) InferenceConfigCreateContainerCaster() *types.InferenceConfigCreateContainer { + container := types.NewInferenceConfigCreateContainer() + + container.Ner = s.v + + return container +} + +func (s *_nerInferenceOptions) NerInferenceOptionsCaster() *types.NerInferenceOptions { + return s.v +} diff --git a/typedapi/esdsl/nerinferenceupdateoptions.go b/typedapi/esdsl/nerinferenceupdateoptions.go new file mode 100644 index 0000000000..ba0dd3c8c6 --- /dev/null +++ b/typedapi/esdsl/nerinferenceupdateoptions.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _nerInferenceUpdateOptions struct { + v *types.NerInferenceUpdateOptions +} + +// Named entity recognition configuration for inference. +func NewNerInferenceUpdateOptions() *_nerInferenceUpdateOptions { + + return &_nerInferenceUpdateOptions{v: types.NewNerInferenceUpdateOptions()} + +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_nerInferenceUpdateOptions) ResultsField(resultsfield string) *_nerInferenceUpdateOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +// The tokenization options to update when inferring +func (s *_nerInferenceUpdateOptions) Tokenization(tokenization types.NlpTokenizationUpdateOptionsVariant) *_nerInferenceUpdateOptions { + + s.v.Tokenization = tokenization.NlpTokenizationUpdateOptionsCaster() + + return s +} + +func (s *_nerInferenceUpdateOptions) InferenceConfigUpdateContainerCaster() *types.InferenceConfigUpdateContainer { + container := types.NewInferenceConfigUpdateContainer() + + container.Ner = s.v + + return container +} + +func (s *_nerInferenceUpdateOptions) NerInferenceUpdateOptionsCaster() *types.NerInferenceUpdateOptions { + return s.v +} diff --git a/typedapi/esdsl/nestedaggregation.go b/typedapi/esdsl/nestedaggregation.go new file mode 100644 index 0000000000..ca919bddb2 --- /dev/null +++ b/typedapi/esdsl/nestedaggregation.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _nestedAggregation struct { + v *types.NestedAggregation +} + +// A special single bucket aggregation that enables aggregating nested +// documents. +func NewNestedAggregation() *_nestedAggregation { + + return &_nestedAggregation{v: types.NewNestedAggregation()} + +} + +// The path to the field of type `nested`. +func (s *_nestedAggregation) Path(field string) *_nestedAggregation { + + s.v.Path = &field + + return s +} + +func (s *_nestedAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Nested = s.v + + return container +} + +func (s *_nestedAggregation) NestedAggregationCaster() *types.NestedAggregation { + return s.v +} diff --git a/typedapi/esdsl/nestedproperty.go b/typedapi/esdsl/nestedproperty.go new file mode 100644 index 0000000000..f379a84cb2 --- /dev/null +++ b/typedapi/esdsl/nestedproperty.go @@ -0,0 +1,167 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _nestedProperty struct { + v *types.NestedProperty +} + +func NewNestedProperty() *_nestedProperty { + + return &_nestedProperty{v: types.NewNestedProperty()} + +} + +func (s *_nestedProperty) CopyTo(fields ...string) *_nestedProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_nestedProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_nestedProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_nestedProperty) Enabled(enabled bool) *_nestedProperty { + + s.v.Enabled = &enabled + + return s +} + +func (s *_nestedProperty) Fields(fields map[string]types.Property) *_nestedProperty { + + s.v.Fields = fields + return s +} + +func (s *_nestedProperty) AddField(key string, value types.PropertyVariant) *_nestedProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_nestedProperty) IgnoreAbove(ignoreabove int) *_nestedProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_nestedProperty) IncludeInParent(includeinparent bool) *_nestedProperty { + + s.v.IncludeInParent = &includeinparent + + return s +} + +func (s *_nestedProperty) IncludeInRoot(includeinroot bool) *_nestedProperty { + + s.v.IncludeInRoot = &includeinroot + + return s +} + +// Metadata about the field. +func (s *_nestedProperty) Meta(meta map[string]string) *_nestedProperty { + + s.v.Meta = meta + return s +} + +func (s *_nestedProperty) AddMeta(key string, value string) *_nestedProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_nestedProperty) Properties(properties map[string]types.Property) *_nestedProperty { + + s.v.Properties = properties + return s +} + +func (s *_nestedProperty) AddProperty(key string, value types.PropertyVariant) *_nestedProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_nestedProperty) Store(store bool) *_nestedProperty { + + s.v.Store = &store + + return s +} + +func (s *_nestedProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_nestedProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_nestedProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_nestedProperty) NestedPropertyCaster() *types.NestedProperty { + return s.v +} diff --git a/typedapi/esdsl/nestedquery.go b/typedapi/esdsl/nestedquery.go new file mode 100644 index 0000000000..4203edefbc --- /dev/null +++ b/typedapi/esdsl/nestedquery.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/childscoremode" +) + +type _nestedQuery struct { + v *types.NestedQuery +} + +// Wraps another query to search nested fields. +// If an object matches the search, the nested query returns the root parent +// document. +func NewNestedQuery(query types.QueryVariant) *_nestedQuery { + + tmp := &_nestedQuery{v: types.NewNestedQuery()} + + tmp.Query(query) + + return tmp + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_nestedQuery) Boost(boost float32) *_nestedQuery { + + s.v.Boost = &boost + + return s +} + +// Indicates whether to ignore an unmapped path and not return any documents +// instead of an error. +func (s *_nestedQuery) IgnoreUnmapped(ignoreunmapped bool) *_nestedQuery { + + s.v.IgnoreUnmapped = &ignoreunmapped + + return s +} + +// If defined, each search hit will contain inner hits. +func (s *_nestedQuery) InnerHits(innerhits types.InnerHitsVariant) *_nestedQuery { + + s.v.InnerHits = innerhits.InnerHitsCaster() + + return s +} + +// Path to the nested object you wish to search. +func (s *_nestedQuery) Path(field string) *_nestedQuery { + + s.v.Path = field + + return s +} + +// Query you wish to run on nested objects in the path. +func (s *_nestedQuery) Query(query types.QueryVariant) *_nestedQuery { + + s.v.Query = *query.QueryCaster() + + return s +} + +func (s *_nestedQuery) QueryName_(queryname_ string) *_nestedQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// How scores for matching child objects affect the root parent document’s +// relevance score. +func (s *_nestedQuery) ScoreMode(scoremode childscoremode.ChildScoreMode) *_nestedQuery { + + s.v.ScoreMode = &scoremode + return s +} + +func (s *_nestedQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.Nested = s.v + + return container +} + +func (s *_nestedQuery) NestedQueryCaster() *types.NestedQuery { + return s.v +} diff --git a/typedapi/esdsl/nestedsortvalue.go b/typedapi/esdsl/nestedsortvalue.go new file mode 100644 index 0000000000..ae2676d306 --- /dev/null +++ b/typedapi/esdsl/nestedsortvalue.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _nestedSortValue struct { + v *types.NestedSortValue +} + +func NewNestedSortValue() *_nestedSortValue { + + return &_nestedSortValue{v: types.NewNestedSortValue()} + +} + +func (s *_nestedSortValue) Filter(filter types.QueryVariant) *_nestedSortValue { + + s.v.Filter = filter.QueryCaster() + + return s +} + +func (s *_nestedSortValue) MaxChildren(maxchildren int) *_nestedSortValue { + + s.v.MaxChildren = &maxchildren + + return s +} + +func (s *_nestedSortValue) Nested(nested types.NestedSortValueVariant) *_nestedSortValue { + + s.v.Nested = nested.NestedSortValueCaster() + + return s +} + +func (s *_nestedSortValue) Path(field string) *_nestedSortValue { + + s.v.Path = field + + return s +} + +func (s *_nestedSortValue) NestedSortValueCaster() *types.NestedSortValue { + return s.v +} diff --git a/typedapi/esdsl/networkdirectionprocessor.go b/typedapi/esdsl/networkdirectionprocessor.go new file mode 100644 index 0000000000..0d82d29861 --- /dev/null +++ b/typedapi/esdsl/networkdirectionprocessor.go @@ -0,0 +1,148 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _networkDirectionProcessor struct { + v *types.NetworkDirectionProcessor +} + +// Calculates the network direction given a source IP address, destination IP +// address, and a list of internal networks. +func NewNetworkDirectionProcessor() *_networkDirectionProcessor { + + return &_networkDirectionProcessor{v: types.NewNetworkDirectionProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_networkDirectionProcessor) Description(description string) *_networkDirectionProcessor { + + s.v.Description = &description + + return s +} + +// Field containing the destination IP address. +func (s *_networkDirectionProcessor) DestinationIp(field string) *_networkDirectionProcessor { + + s.v.DestinationIp = &field + + return s +} + +// Conditionally execute the processor. +func (s *_networkDirectionProcessor) If(if_ types.ScriptVariant) *_networkDirectionProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_networkDirectionProcessor) IgnoreFailure(ignorefailure bool) *_networkDirectionProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If true and any required fields are missing, the processor quietly exits +// without modifying the document. +func (s *_networkDirectionProcessor) IgnoreMissing(ignoremissing bool) *_networkDirectionProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// List of internal networks. Supports IPv4 and IPv6 addresses and ranges in +// CIDR notation. Also supports the named ranges listed below. These may be +// constructed with template snippets. Must specify only one of +// internal_networks or internal_networks_field. +func (s *_networkDirectionProcessor) InternalNetworks(internalnetworks ...string) *_networkDirectionProcessor { + + for _, v := range internalnetworks { + + s.v.InternalNetworks = append(s.v.InternalNetworks, v) + + } + return s +} + +// A field on the given document to read the internal_networks configuration +// from. +func (s *_networkDirectionProcessor) InternalNetworksField(field string) *_networkDirectionProcessor { + + s.v.InternalNetworksField = &field + + return s +} + +// Handle failures for the processor. +func (s *_networkDirectionProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_networkDirectionProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Field containing the source IP address. +func (s *_networkDirectionProcessor) SourceIp(field string) *_networkDirectionProcessor { + + s.v.SourceIp = &field + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_networkDirectionProcessor) Tag(tag string) *_networkDirectionProcessor { + + s.v.Tag = &tag + + return s +} + +// Output field for the network direction. +func (s *_networkDirectionProcessor) TargetField(field string) *_networkDirectionProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_networkDirectionProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.NetworkDirection = s.v + + return container +} + +func (s *_networkDirectionProcessor) NetworkDirectionProcessorCaster() *types.NetworkDirectionProcessor { + return s.v +} diff --git a/typedapi/esdsl/nevercondition.go b/typedapi/esdsl/nevercondition.go new file mode 100644 index 0000000000..af16d48bc0 --- /dev/null +++ b/typedapi/esdsl/nevercondition.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _neverCondition struct { + v *types.NeverCondition +} + +func NewNeverCondition() *_neverCondition { + + return &_neverCondition{v: types.NewNeverCondition()} + +} + +func (s *_neverCondition) WatcherConditionCaster() *types.WatcherCondition { + container := types.NewWatcherCondition() + + container.Never = s.v + + return container +} + +func (s *_neverCondition) NeverConditionCaster() *types.NeverCondition { + return s.v +} diff --git a/typedapi/esdsl/ngramtokenfilter.go b/typedapi/esdsl/ngramtokenfilter.go new file mode 100644 index 0000000000..bd9277711c --- /dev/null +++ b/typedapi/esdsl/ngramtokenfilter.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _nGramTokenFilter struct { + v *types.NGramTokenFilter +} + +func NewNGramTokenFilter() *_nGramTokenFilter { + + return &_nGramTokenFilter{v: types.NewNGramTokenFilter()} + +} + +func (s *_nGramTokenFilter) MaxGram(maxgram int) *_nGramTokenFilter { + + s.v.MaxGram = &maxgram + + return s +} + +func (s *_nGramTokenFilter) MinGram(mingram int) *_nGramTokenFilter { + + s.v.MinGram = &mingram + + return s +} + +func (s *_nGramTokenFilter) PreserveOriginal(stringifiedboolean types.StringifiedbooleanVariant) *_nGramTokenFilter { + + s.v.PreserveOriginal = *stringifiedboolean.StringifiedbooleanCaster() + + return s +} + +func (s *_nGramTokenFilter) Version(versionstring string) *_nGramTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_nGramTokenFilter) NGramTokenFilterCaster() *types.NGramTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/ngramtokenizer.go b/typedapi/esdsl/ngramtokenizer.go new file mode 100644 index 0000000000..3b5befcc8d --- /dev/null +++ b/typedapi/esdsl/ngramtokenizer.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenchar" +) + +type _nGramTokenizer struct { + v *types.NGramTokenizer +} + +func NewNGramTokenizer() *_nGramTokenizer { + + return &_nGramTokenizer{v: types.NewNGramTokenizer()} + +} + +func (s *_nGramTokenizer) CustomTokenChars(customtokenchars string) *_nGramTokenizer { + + s.v.CustomTokenChars = &customtokenchars + + return s +} + +func (s *_nGramTokenizer) MaxGram(maxgram int) *_nGramTokenizer { + + s.v.MaxGram = &maxgram + + return s +} + +func (s *_nGramTokenizer) MinGram(mingram int) *_nGramTokenizer { + + s.v.MinGram = &mingram + + return s +} + +func (s *_nGramTokenizer) TokenChars(tokenchars ...tokenchar.TokenChar) *_nGramTokenizer { + + for _, v := range tokenchars { + + s.v.TokenChars = append(s.v.TokenChars, v) + + } + return s +} + +func (s *_nGramTokenizer) Version(versionstring string) *_nGramTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_nGramTokenizer) NGramTokenizerCaster() *types.NGramTokenizer { + return s.v +} diff --git a/typedapi/esdsl/nlpberttokenizationconfig.go b/typedapi/esdsl/nlpberttokenizationconfig.go new file mode 100644 index 0000000000..b3765aa95d --- /dev/null +++ b/typedapi/esdsl/nlpberttokenizationconfig.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenizationtruncate" +) + +type _nlpBertTokenizationConfig struct { + v *types.NlpBertTokenizationConfig +} + +// Indicates MPNET tokenization and its options +func NewNlpBertTokenizationConfig() *_nlpBertTokenizationConfig { + + return &_nlpBertTokenizationConfig{v: types.NewNlpBertTokenizationConfig()} + +} + +// Should the tokenizer lower case the text +func (s *_nlpBertTokenizationConfig) DoLowerCase(dolowercase bool) *_nlpBertTokenizationConfig { + + s.v.DoLowerCase = &dolowercase + + return s +} + +// Maximum input sequence length for the model +func (s *_nlpBertTokenizationConfig) MaxSequenceLength(maxsequencelength int) *_nlpBertTokenizationConfig { + + s.v.MaxSequenceLength = &maxsequencelength + + return s +} + +// Tokenization spanning options. Special value of -1 indicates no spanning +// takes place +func (s *_nlpBertTokenizationConfig) Span(span int) *_nlpBertTokenizationConfig { + + s.v.Span = &span + + return s +} + +// Should tokenization input be automatically truncated before sending to the +// model for inference +func (s *_nlpBertTokenizationConfig) Truncate(truncate tokenizationtruncate.TokenizationTruncate) *_nlpBertTokenizationConfig { + + s.v.Truncate = &truncate + return s +} + +// Is tokenization completed with special tokens +func (s *_nlpBertTokenizationConfig) WithSpecialTokens(withspecialtokens bool) *_nlpBertTokenizationConfig { + + s.v.WithSpecialTokens = &withspecialtokens + + return s +} + +func (s *_nlpBertTokenizationConfig) TokenizationConfigContainerCaster() *types.TokenizationConfigContainer { + container := types.NewTokenizationConfigContainer() + + container.Mpnet = s.v + + return container +} + +func (s *_nlpBertTokenizationConfig) NlpBertTokenizationConfigCaster() *types.NlpBertTokenizationConfig { + return s.v +} diff --git a/typedapi/esdsl/nlprobertatokenizationconfig.go b/typedapi/esdsl/nlprobertatokenizationconfig.go new file mode 100644 index 0000000000..6495c1f7c7 --- /dev/null +++ b/typedapi/esdsl/nlprobertatokenizationconfig.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenizationtruncate" +) + +type _nlpRobertaTokenizationConfig struct { + v *types.NlpRobertaTokenizationConfig +} + +// Indicates RoBERTa tokenization and its options +func NewNlpRobertaTokenizationConfig() *_nlpRobertaTokenizationConfig { + + return &_nlpRobertaTokenizationConfig{v: types.NewNlpRobertaTokenizationConfig()} + +} + +// Should the tokenizer prefix input with a space character +func (s *_nlpRobertaTokenizationConfig) AddPrefixSpace(addprefixspace bool) *_nlpRobertaTokenizationConfig { + + s.v.AddPrefixSpace = &addprefixspace + + return s +} + +// Should the tokenizer lower case the text +func (s *_nlpRobertaTokenizationConfig) DoLowerCase(dolowercase bool) *_nlpRobertaTokenizationConfig { + + s.v.DoLowerCase = &dolowercase + + return s +} + +// Maximum input sequence length for the model +func (s *_nlpRobertaTokenizationConfig) MaxSequenceLength(maxsequencelength int) *_nlpRobertaTokenizationConfig { + + s.v.MaxSequenceLength = &maxsequencelength + + return s +} + +// Tokenization spanning options. Special value of -1 indicates no spanning +// takes place +func (s *_nlpRobertaTokenizationConfig) Span(span int) *_nlpRobertaTokenizationConfig { + + s.v.Span = &span + + return s +} + +// Should tokenization input be automatically truncated before sending to the +// model for inference +func (s *_nlpRobertaTokenizationConfig) Truncate(truncate tokenizationtruncate.TokenizationTruncate) *_nlpRobertaTokenizationConfig { + + s.v.Truncate = &truncate + return s +} + +// Is tokenization completed with special tokens +func (s *_nlpRobertaTokenizationConfig) WithSpecialTokens(withspecialtokens bool) *_nlpRobertaTokenizationConfig { + + s.v.WithSpecialTokens = &withspecialtokens + + return s +} + +func (s *_nlpRobertaTokenizationConfig) TokenizationConfigContainerCaster() *types.TokenizationConfigContainer { + container := types.NewTokenizationConfigContainer() + + container.Roberta = s.v + + return container +} + +func (s *_nlpRobertaTokenizationConfig) NlpRobertaTokenizationConfigCaster() *types.NlpRobertaTokenizationConfig { + return s.v +} diff --git a/typedapi/esdsl/nlptokenizationupdateoptions.go b/typedapi/esdsl/nlptokenizationupdateoptions.go new file mode 100644 index 0000000000..978fe07a30 --- /dev/null +++ b/typedapi/esdsl/nlptokenizationupdateoptions.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenizationtruncate" +) + +type _nlpTokenizationUpdateOptions struct { + v *types.NlpTokenizationUpdateOptions +} + +func NewNlpTokenizationUpdateOptions() *_nlpTokenizationUpdateOptions { + + return &_nlpTokenizationUpdateOptions{v: types.NewNlpTokenizationUpdateOptions()} + +} + +// Span options to apply +func (s *_nlpTokenizationUpdateOptions) Span(span int) *_nlpTokenizationUpdateOptions { + + s.v.Span = &span + + return s +} + +// Truncate options to apply +func (s *_nlpTokenizationUpdateOptions) Truncate(truncate tokenizationtruncate.TokenizationTruncate) *_nlpTokenizationUpdateOptions { + + s.v.Truncate = &truncate + return s +} + +func (s *_nlpTokenizationUpdateOptions) NlpTokenizationUpdateOptionsCaster() *types.NlpTokenizationUpdateOptions { + return s.v +} diff --git a/typedapi/esdsl/norianalyzer.go b/typedapi/esdsl/norianalyzer.go new file mode 100644 index 0000000000..1dba7a5541 --- /dev/null +++ b/typedapi/esdsl/norianalyzer.go @@ -0,0 +1,70 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noridecompoundmode" +) + +type _noriAnalyzer struct { + v *types.NoriAnalyzer +} + +func NewNoriAnalyzer() *_noriAnalyzer { + + return &_noriAnalyzer{v: types.NewNoriAnalyzer()} + +} + +func (s *_noriAnalyzer) DecompoundMode(decompoundmode noridecompoundmode.NoriDecompoundMode) *_noriAnalyzer { + + s.v.DecompoundMode = &decompoundmode + return s +} + +func (s *_noriAnalyzer) Stoptags(stoptags ...string) *_noriAnalyzer { + + for _, v := range stoptags { + + s.v.Stoptags = append(s.v.Stoptags, v) + + } + return s +} + +func (s *_noriAnalyzer) UserDictionary(userdictionary string) *_noriAnalyzer { + + s.v.UserDictionary = &userdictionary + + return s +} + +func (s *_noriAnalyzer) Version(versionstring string) *_noriAnalyzer { + + s.v.Version = &versionstring + + return s +} + +func (s *_noriAnalyzer) NoriAnalyzerCaster() *types.NoriAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/noripartofspeechtokenfilter.go b/typedapi/esdsl/noripartofspeechtokenfilter.go new file mode 100644 index 0000000000..d66b4bb066 --- /dev/null +++ b/typedapi/esdsl/noripartofspeechtokenfilter.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _noriPartOfSpeechTokenFilter struct { + v *types.NoriPartOfSpeechTokenFilter +} + +func NewNoriPartOfSpeechTokenFilter() *_noriPartOfSpeechTokenFilter { + + return &_noriPartOfSpeechTokenFilter{v: types.NewNoriPartOfSpeechTokenFilter()} + +} + +func (s *_noriPartOfSpeechTokenFilter) Stoptags(stoptags ...string) *_noriPartOfSpeechTokenFilter { + + for _, v := range stoptags { + + s.v.Stoptags = append(s.v.Stoptags, v) + + } + return s +} + +func (s *_noriPartOfSpeechTokenFilter) Version(versionstring string) *_noriPartOfSpeechTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_noriPartOfSpeechTokenFilter) NoriPartOfSpeechTokenFilterCaster() *types.NoriPartOfSpeechTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/noritokenizer.go b/typedapi/esdsl/noritokenizer.go new file mode 100644 index 0000000000..8206906fec --- /dev/null +++ b/typedapi/esdsl/noritokenizer.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noridecompoundmode" +) + +type _noriTokenizer struct { + v *types.NoriTokenizer +} + +func NewNoriTokenizer() *_noriTokenizer { + + return &_noriTokenizer{v: types.NewNoriTokenizer()} + +} + +func (s *_noriTokenizer) DecompoundMode(decompoundmode noridecompoundmode.NoriDecompoundMode) *_noriTokenizer { + + s.v.DecompoundMode = &decompoundmode + return s +} + +func (s *_noriTokenizer) DiscardPunctuation(discardpunctuation bool) *_noriTokenizer { + + s.v.DiscardPunctuation = &discardpunctuation + + return s +} + +func (s *_noriTokenizer) UserDictionary(userdictionary string) *_noriTokenizer { + + s.v.UserDictionary = &userdictionary + + return s +} + +func (s *_noriTokenizer) UserDictionaryRules(userdictionaryrules ...string) *_noriTokenizer { + + for _, v := range userdictionaryrules { + + s.v.UserDictionaryRules = append(s.v.UserDictionaryRules, v) + + } + return s +} + +func (s *_noriTokenizer) Version(versionstring string) *_noriTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_noriTokenizer) NoriTokenizerCaster() *types.NoriTokenizer { + return s.v +} diff --git a/typedapi/esdsl/normalizeaggregation.go b/typedapi/esdsl/normalizeaggregation.go new file mode 100644 index 0000000000..83e62eb793 --- /dev/null +++ b/typedapi/esdsl/normalizeaggregation.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalizemethod" +) + +type _normalizeAggregation struct { + v *types.NormalizeAggregation +} + +// A parent pipeline aggregation which calculates the specific +// normalized/rescaled value for a specific bucket value. +func NewNormalizeAggregation() *_normalizeAggregation { + + return &_normalizeAggregation{v: types.NewNormalizeAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_normalizeAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_normalizeAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_normalizeAggregation) Format(format string) *_normalizeAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_normalizeAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_normalizeAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +// The specific method to apply. +func (s *_normalizeAggregation) Method(method normalizemethod.NormalizeMethod) *_normalizeAggregation { + + s.v.Method = &method + return s +} + +func (s *_normalizeAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Normalize = s.v + + return container +} + +func (s *_normalizeAggregation) NormalizeAggregationCaster() *types.NormalizeAggregation { + return s.v +} diff --git a/typedapi/esdsl/normalizer.go b/typedapi/esdsl/normalizer.go new file mode 100644 index 0000000000..670b3368ab --- /dev/null +++ b/typedapi/esdsl/normalizer.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _normalizer struct { + v types.Normalizer +} + +func NewNormalizer() *_normalizer { + return &_normalizer{v: nil} +} + +func (u *_normalizer) LowercaseNormalizer(lowercasenormalizer types.LowercaseNormalizerVariant) *_normalizer { + + u.v = &lowercasenormalizer + + return u +} + +// Interface implementation for LowercaseNormalizer in Normalizer union +func (u *_lowercaseNormalizer) NormalizerCaster() *types.Normalizer { + t := types.Normalizer(u.v) + return &t +} + +func (u *_normalizer) CustomNormalizer(customnormalizer types.CustomNormalizerVariant) *_normalizer { + + u.v = &customnormalizer + + return u +} + +// Interface implementation for CustomNormalizer in Normalizer union +func (u *_customNormalizer) NormalizerCaster() *types.Normalizer { + t := types.Normalizer(u.v) + return &t +} + +func (u *_normalizer) NormalizerCaster() *types.Normalizer { + return &u.v +} diff --git a/typedapi/esdsl/norwegiananalyzer.go b/typedapi/esdsl/norwegiananalyzer.go new file mode 100644 index 0000000000..e546f35744 --- /dev/null +++ b/typedapi/esdsl/norwegiananalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _norwegianAnalyzer struct { + v *types.NorwegianAnalyzer +} + +func NewNorwegianAnalyzer() *_norwegianAnalyzer { + + return &_norwegianAnalyzer{v: types.NewNorwegianAnalyzer()} + +} + +func (s *_norwegianAnalyzer) StemExclusion(stemexclusions ...string) *_norwegianAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_norwegianAnalyzer) Stopwords(stopwords ...string) *_norwegianAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_norwegianAnalyzer) StopwordsPath(stopwordspath string) *_norwegianAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_norwegianAnalyzer) NorwegianAnalyzerCaster() *types.NorwegianAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/numberrangequery.go b/typedapi/esdsl/numberrangequery.go new file mode 100644 index 0000000000..6b510fbefb --- /dev/null +++ b/typedapi/esdsl/numberrangequery.go @@ -0,0 +1,157 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation" +) + +type _numberRangeQuery struct { + k string + v *types.NumberRangeQuery +} + +// Returns documents that contain terms within a provided range. +func NewNumberRangeQuery(key string) *_numberRangeQuery { + return &_numberRangeQuery{ + k: key, + v: types.NewNumberRangeQuery(), + } +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_numberRangeQuery) Boost(boost float32) *_numberRangeQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_numberRangeQuery) From(from types.Float64) *_numberRangeQuery { + + s.v.From = &from + + return s +} + +// Greater than. +func (s *_numberRangeQuery) Gt(gt types.Float64) *_numberRangeQuery { + + s.v.Gt = > + + return s +} + +// Greater than or equal to. +func (s *_numberRangeQuery) Gte(gte types.Float64) *_numberRangeQuery { + + s.v.Gte = >e + + return s +} + +// Less than. +func (s *_numberRangeQuery) Lt(lt types.Float64) *_numberRangeQuery { + + s.v.Lt = < + + return s +} + +// Less than or equal to. +func (s *_numberRangeQuery) Lte(lte types.Float64) *_numberRangeQuery { + + s.v.Lte = <e + + return s +} + +func (s *_numberRangeQuery) QueryName_(queryname_ string) *_numberRangeQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Indicates how the range query matches values for `range` fields. +func (s *_numberRangeQuery) Relation(relation rangerelation.RangeRelation) *_numberRangeQuery { + + s.v.Relation = &relation + return s +} + +func (s *_numberRangeQuery) To(to types.Float64) *_numberRangeQuery { + + s.v.To = &to + + return s +} + +func (s *_numberRangeQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.Range = map[string]types.RangeQuery{ + s.k: *s.v, + } + return container +} + +func (s *_numberRangeQuery) ApiKeyQueryContainerCaster() *types.ApiKeyQueryContainer { + container := types.NewApiKeyQueryContainer() + container.Range = map[string]types.RangeQuery{ + s.k: *s.v, + } + return container +} + +func (s *_numberRangeQuery) RoleQueryContainerCaster() *types.RoleQueryContainer { + container := types.NewRoleQueryContainer() + container.Range = map[string]types.RangeQuery{ + s.k: *s.v, + } + return container +} + +func (s *_numberRangeQuery) UserQueryContainerCaster() *types.UserQueryContainer { + container := types.NewUserQueryContainer() + container.Range = map[string]types.RangeQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleNumberRangeQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleNumberRangeQuery() *_numberRangeQuery { + return &_numberRangeQuery{ + k: "", + v: types.NewNumberRangeQuery(), + } +} + +func (s *_numberRangeQuery) NumberRangeQueryCaster() *types.NumberRangeQuery { + return s.v.NumberRangeQueryCaster() +} diff --git a/typedapi/esdsl/numericdecayfunction.go b/typedapi/esdsl/numericdecayfunction.go new file mode 100644 index 0000000000..e4ffd0b3e5 --- /dev/null +++ b/typedapi/esdsl/numericdecayfunction.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode" +) + +type _numericDecayFunction struct { + v *types.NumericDecayFunction +} + +// Function that scores a document with a linear decay, depending on the +// distance of a numeric field value of the document from an origin. +func NewNumericDecayFunction() *_numericDecayFunction { + + return &_numericDecayFunction{v: types.NewNumericDecayFunction()} + +} + +func (s *_numericDecayFunction) DecayFunctionBasedoubledouble(decayfunctionbasedoubledouble map[string]types.DecayPlacementdoubledouble) *_numericDecayFunction { + + s.v.DecayFunctionBasedoubledouble = decayfunctionbasedoubledouble + return s +} + +func (s *_numericDecayFunction) AddDecayFunctionBasedoubledouble(key string, value types.DecayPlacementdoubledoubleVariant) *_numericDecayFunction { + + var tmp map[string]types.DecayPlacementdoubledouble + if s.v.DecayFunctionBasedoubledouble == nil { + s.v.DecayFunctionBasedoubledouble = make(map[string]types.DecayPlacementdoubledouble) + } else { + tmp = s.v.DecayFunctionBasedoubledouble + } + + tmp[key] = *value.DecayPlacementdoubledoubleCaster() + + s.v.DecayFunctionBasedoubledouble = tmp + return s +} + +// Determines how the distance is calculated when a field used for computing the +// decay contains multiple values. +func (s *_numericDecayFunction) MultiValueMode(multivaluemode multivaluemode.MultiValueMode) *_numericDecayFunction { + + s.v.MultiValueMode = &multivaluemode + return s +} + +func (s *_numericDecayFunction) FunctionScoreCaster() *types.FunctionScore { + container := types.NewFunctionScore() + + container.Linear = s.v + + return container +} + +func (s *_numericDecayFunction) NumericDecayFunctionCaster() *types.NumericDecayFunction { + return s.v +} diff --git a/typedapi/esdsl/numericfielddata.go b/typedapi/esdsl/numericfielddata.go new file mode 100644 index 0000000000..35aa74b6c9 --- /dev/null +++ b/typedapi/esdsl/numericfielddata.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/numericfielddataformat" +) + +type _numericFielddata struct { + v *types.NumericFielddata +} + +func NewNumericFielddata(format numericfielddataformat.NumericFielddataFormat) *_numericFielddata { + + tmp := &_numericFielddata{v: types.NewNumericFielddata()} + + tmp.Format(format) + + return tmp + +} + +func (s *_numericFielddata) Format(format numericfielddataformat.NumericFielddataFormat) *_numericFielddata { + + s.v.Format = format + return s +} + +func (s *_numericFielddata) NumericFielddataCaster() *types.NumericFielddata { + return s.v +} diff --git a/typedapi/esdsl/objectproperty.go b/typedapi/esdsl/objectproperty.go new file mode 100644 index 0000000000..80a11f1484 --- /dev/null +++ b/typedapi/esdsl/objectproperty.go @@ -0,0 +1,160 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/subobjects" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _objectProperty struct { + v *types.ObjectProperty +} + +func NewObjectProperty() *_objectProperty { + + return &_objectProperty{v: types.NewObjectProperty()} + +} + +func (s *_objectProperty) CopyTo(fields ...string) *_objectProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_objectProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_objectProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_objectProperty) Enabled(enabled bool) *_objectProperty { + + s.v.Enabled = &enabled + + return s +} + +func (s *_objectProperty) Fields(fields map[string]types.Property) *_objectProperty { + + s.v.Fields = fields + return s +} + +func (s *_objectProperty) AddField(key string, value types.PropertyVariant) *_objectProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_objectProperty) IgnoreAbove(ignoreabove int) *_objectProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +// Metadata about the field. +func (s *_objectProperty) Meta(meta map[string]string) *_objectProperty { + + s.v.Meta = meta + return s +} + +func (s *_objectProperty) AddMeta(key string, value string) *_objectProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_objectProperty) Properties(properties map[string]types.Property) *_objectProperty { + + s.v.Properties = properties + return s +} + +func (s *_objectProperty) AddProperty(key string, value types.PropertyVariant) *_objectProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_objectProperty) Store(store bool) *_objectProperty { + + s.v.Store = &store + + return s +} + +func (s *_objectProperty) Subobjects(subobjects subobjects.Subobjects) *_objectProperty { + + s.v.Subobjects = &subobjects + return s +} + +func (s *_objectProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_objectProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_objectProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_objectProperty) ObjectPropertyCaster() *types.ObjectProperty { + return s.v +} diff --git a/typedapi/esdsl/onehotencodingpreprocessor.go b/typedapi/esdsl/onehotencodingpreprocessor.go new file mode 100644 index 0000000000..a9ed5cb4bf --- /dev/null +++ b/typedapi/esdsl/onehotencodingpreprocessor.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _oneHotEncodingPreprocessor struct { + v *types.OneHotEncodingPreprocessor +} + +func NewOneHotEncodingPreprocessor(field string) *_oneHotEncodingPreprocessor { + + tmp := &_oneHotEncodingPreprocessor{v: types.NewOneHotEncodingPreprocessor()} + + tmp.Field(field) + + return tmp + +} + +func (s *_oneHotEncodingPreprocessor) Field(field string) *_oneHotEncodingPreprocessor { + + s.v.Field = field + + return s +} + +func (s *_oneHotEncodingPreprocessor) HotMap(hotmap map[string]string) *_oneHotEncodingPreprocessor { + + s.v.HotMap = hotmap + return s +} + +func (s *_oneHotEncodingPreprocessor) AddHotMap(key string, value string) *_oneHotEncodingPreprocessor { + + var tmp map[string]string + if s.v.HotMap == nil { + s.v.HotMap = make(map[string]string) + } else { + tmp = s.v.HotMap + } + + tmp[key] = value + + s.v.HotMap = tmp + return s +} + +func (s *_oneHotEncodingPreprocessor) PreprocessorCaster() *types.Preprocessor { + container := types.NewPreprocessor() + + container.OneHotEncoding = s.v + + return container +} + +func (s *_oneHotEncodingPreprocessor) OneHotEncodingPreprocessorCaster() *types.OneHotEncodingPreprocessor { + return s.v +} diff --git a/typedapi/esdsl/openaiservicesettings.go b/typedapi/esdsl/openaiservicesettings.go new file mode 100644 index 0000000000..14db649da7 --- /dev/null +++ b/typedapi/esdsl/openaiservicesettings.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _openAIServiceSettings struct { + v *types.OpenAIServiceSettings +} + +func NewOpenAIServiceSettings(apikey string, modelid string) *_openAIServiceSettings { + + tmp := &_openAIServiceSettings{v: types.NewOpenAIServiceSettings()} + + tmp.ApiKey(apikey) + + tmp.ModelId(modelid) + + return tmp + +} + +// A valid API key of your OpenAI account. +// You can find your OpenAI API keys in your OpenAI account under the API keys +// section. +// +// IMPORTANT: You need to provide the API key only once, during the inference +// model creation. +// The get inference endpoint API does not retrieve your API key. +// After creating the inference model, you cannot change the associated API key. +// If you want to use a different API key, delete the inference model and +// recreate it with the same name and the updated API key. +func (s *_openAIServiceSettings) ApiKey(apikey string) *_openAIServiceSettings { + + s.v.ApiKey = apikey + + return s +} + +// The number of dimensions the resulting output embeddings should have. +// It is supported only in `text-embedding-3` and later models. +// If it is not set, the OpenAI defined default for the model is used. +func (s *_openAIServiceSettings) Dimensions(dimensions int) *_openAIServiceSettings { + + s.v.Dimensions = &dimensions + + return s +} + +// The name of the model to use for the inference task. +// Refer to the OpenAI documentation for the list of available text embedding +// models. +func (s *_openAIServiceSettings) ModelId(modelid string) *_openAIServiceSettings { + + s.v.ModelId = modelid + + return s +} + +// The unique identifier for your organization. +// You can find the Organization ID in your OpenAI account under *Settings > +// Organizations*. +func (s *_openAIServiceSettings) OrganizationId(organizationid string) *_openAIServiceSettings { + + s.v.OrganizationId = &organizationid + + return s +} + +// This setting helps to minimize the number of rate limit errors returned from +// OpenAI. +// The `openai` service sets a default number of requests allowed per minute +// depending on the task type. +// For `text_embedding`, it is set to `3000`. +// For `completion`, it is set to `500`. +func (s *_openAIServiceSettings) RateLimit(ratelimit types.RateLimitSettingVariant) *_openAIServiceSettings { + + s.v.RateLimit = ratelimit.RateLimitSettingCaster() + + return s +} + +// The URL endpoint to use for the requests. +// It can be changed for testing purposes. +func (s *_openAIServiceSettings) Url(url string) *_openAIServiceSettings { + + s.v.Url = &url + + return s +} + +func (s *_openAIServiceSettings) OpenAIServiceSettingsCaster() *types.OpenAIServiceSettings { + return s.v +} diff --git a/typedapi/esdsl/openaitasksettings.go b/typedapi/esdsl/openaitasksettings.go new file mode 100644 index 0000000000..c30c85b2fe --- /dev/null +++ b/typedapi/esdsl/openaitasksettings.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _openAITaskSettings struct { + v *types.OpenAITaskSettings +} + +func NewOpenAITaskSettings() *_openAITaskSettings { + + return &_openAITaskSettings{v: types.NewOpenAITaskSettings()} + +} + +// For a `completion` or `text_embedding` task, specify the user issuing the +// request. +// This informaiton can be used for abuse detection. +func (s *_openAITaskSettings) User(user string) *_openAITaskSettings { + + s.v.User = &user + + return s +} + +func (s *_openAITaskSettings) OpenAITaskSettingsCaster() *types.OpenAITaskSettings { + return s.v +} diff --git a/typedapi/esdsl/operationcontainer.go b/typedapi/esdsl/operationcontainer.go new file mode 100644 index 0000000000..c7d1f15ce1 --- /dev/null +++ b/typedapi/esdsl/operationcontainer.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _operationContainer struct { + v *types.OperationContainer +} + +func NewOperationContainer() *_operationContainer { + return &_operationContainer{v: types.NewOperationContainer()} +} + +// AdditionalOperationContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_operationContainer) AdditionalOperationContainerProperty(key string, value json.RawMessage) *_operationContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalOperationContainerProperty = tmp + return s +} + +// Index the specified document if it does not already exist. +// The following line must contain the source data to be indexed. +func (s *_operationContainer) Create(create types.CreateOperationVariant) *_operationContainer { + + s.v.Create = create.CreateOperationCaster() + + return s +} + +// Remove the specified document from the index. +func (s *_operationContainer) Delete(delete types.DeleteOperationVariant) *_operationContainer { + + s.v.Delete = delete.DeleteOperationCaster() + + return s +} + +// Index the specified document. +// If the document exists, it replaces the document and increments the version. +// The following line must contain the source data to be indexed. +func (s *_operationContainer) Index(index types.IndexOperationVariant) *_operationContainer { + + s.v.Index = index.IndexOperationCaster() + + return s +} + +// Perform a partial document update. +// The following line must contain the partial document and update options. +func (s *_operationContainer) Update(update types.UpdateOperationVariant) *_operationContainer { + + s.v.Update = update.UpdateOperationCaster() + + return s +} + +func (s *_operationContainer) OperationContainerCaster() *types.OperationContainer { + return s.v +} diff --git a/typedapi/esdsl/page.go b/typedapi/esdsl/page.go new file mode 100644 index 0000000000..219e71959f --- /dev/null +++ b/typedapi/esdsl/page.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _page struct { + v *types.Page +} + +func NewPage() *_page { + + return &_page{v: types.NewPage()} + +} + +// Skips the specified number of items. +func (s *_page) From(from int) *_page { + + s.v.From = &from + + return s +} + +// Specifies the maximum number of items to obtain. +func (s *_page) Size(size int) *_page { + + s.v.Size = &size + + return s +} + +func (s *_page) PageCaster() *types.Page { + return s.v +} diff --git a/typedapi/esdsl/pagerdutyaction.go b/typedapi/esdsl/pagerdutyaction.go new file mode 100644 index 0000000000..d020b4fa24 --- /dev/null +++ b/typedapi/esdsl/pagerdutyaction.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/pagerdutyeventtype" +) + +type _pagerDutyAction struct { + v *types.PagerDutyAction +} + +func NewPagerDutyAction(attachpayload bool, description string, incidentkey string) *_pagerDutyAction { + + tmp := &_pagerDutyAction{v: types.NewPagerDutyAction()} + + tmp.AttachPayload(attachpayload) + + tmp.Description(description) + + tmp.IncidentKey(incidentkey) + + return tmp + +} + +func (s *_pagerDutyAction) Account(account string) *_pagerDutyAction { + + s.v.Account = &account + + return s +} + +func (s *_pagerDutyAction) AttachPayload(attachpayload bool) *_pagerDutyAction { + + s.v.AttachPayload = attachpayload + + return s +} + +func (s *_pagerDutyAction) Client(client string) *_pagerDutyAction { + + s.v.Client = &client + + return s +} + +func (s *_pagerDutyAction) ClientUrl(clienturl string) *_pagerDutyAction { + + s.v.ClientUrl = &clienturl + + return s +} + +func (s *_pagerDutyAction) Contexts(contexts ...types.PagerDutyContextVariant) *_pagerDutyAction { + + for _, v := range contexts { + + s.v.Contexts = append(s.v.Contexts, *v.PagerDutyContextCaster()) + + } + return s +} + +func (s *_pagerDutyAction) Description(description string) *_pagerDutyAction { + + s.v.Description = description + + return s +} + +func (s *_pagerDutyAction) EventType(eventtype pagerdutyeventtype.PagerDutyEventType) *_pagerDutyAction { + + s.v.EventType = &eventtype + return s +} + +func (s *_pagerDutyAction) IncidentKey(incidentkey string) *_pagerDutyAction { + + s.v.IncidentKey = incidentkey + + return s +} + +func (s *_pagerDutyAction) Proxy(proxy types.PagerDutyEventProxyVariant) *_pagerDutyAction { + + s.v.Proxy = proxy.PagerDutyEventProxyCaster() + + return s +} + +func (s *_pagerDutyAction) PagerDutyActionCaster() *types.PagerDutyAction { + return s.v +} diff --git a/typedapi/esdsl/pagerdutycontext.go b/typedapi/esdsl/pagerdutycontext.go new file mode 100644 index 0000000000..5d906bdbcd --- /dev/null +++ b/typedapi/esdsl/pagerdutycontext.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/pagerdutycontexttype" +) + +type _pagerDutyContext struct { + v *types.PagerDutyContext +} + +func NewPagerDutyContext(type_ pagerdutycontexttype.PagerDutyContextType) *_pagerDutyContext { + + tmp := &_pagerDutyContext{v: types.NewPagerDutyContext()} + + tmp.Type(type_) + + return tmp + +} + +func (s *_pagerDutyContext) Href(href string) *_pagerDutyContext { + + s.v.Href = &href + + return s +} + +func (s *_pagerDutyContext) Src(src string) *_pagerDutyContext { + + s.v.Src = &src + + return s +} + +func (s *_pagerDutyContext) Type(type_ pagerdutycontexttype.PagerDutyContextType) *_pagerDutyContext { + + s.v.Type = type_ + return s +} + +func (s *_pagerDutyContext) PagerDutyContextCaster() *types.PagerDutyContext { + return s.v +} diff --git a/typedapi/esdsl/pagerdutyeventproxy.go b/typedapi/esdsl/pagerdutyeventproxy.go new file mode 100644 index 0000000000..54953c74b6 --- /dev/null +++ b/typedapi/esdsl/pagerdutyeventproxy.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _pagerDutyEventProxy struct { + v *types.PagerDutyEventProxy +} + +func NewPagerDutyEventProxy() *_pagerDutyEventProxy { + + return &_pagerDutyEventProxy{v: types.NewPagerDutyEventProxy()} + +} + +func (s *_pagerDutyEventProxy) Host(host string) *_pagerDutyEventProxy { + + s.v.Host = &host + + return s +} + +func (s *_pagerDutyEventProxy) Port(port int) *_pagerDutyEventProxy { + + s.v.Port = &port + + return s +} + +func (s *_pagerDutyEventProxy) PagerDutyEventProxyCaster() *types.PagerDutyEventProxy { + return s.v +} diff --git a/typedapi/esdsl/painlesscontextsetup.go b/typedapi/esdsl/painlesscontextsetup.go new file mode 100644 index 0000000000..a1e0ee6e91 --- /dev/null +++ b/typedapi/esdsl/painlesscontextsetup.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _painlessContextSetup struct { + v *types.PainlessContextSetup +} + +func NewPainlessContextSetup(document json.RawMessage) *_painlessContextSetup { + + tmp := &_painlessContextSetup{v: types.NewPainlessContextSetup()} + + tmp.Document(document) + + return tmp + +} + +// Document that's temporarily indexed in-memory and accessible from the script. +func (s *_painlessContextSetup) Document(document json.RawMessage) *_painlessContextSetup { + + s.v.Document = document + + return s +} + +// Index containing a mapping that's compatible with the indexed document. +// You may specify a remote index by prefixing the index with the remote cluster +// alias. +// For example, `remote1:my_index` indicates that you want to run the painless +// script against the "my_index" index on the "remote1" cluster. +// This request will be forwarded to the "remote1" cluster if you have +// configured a connection to that remote cluster. +// +// NOTE: Wildcards are not accepted in the index expression for this endpoint. +// The expression `*:myindex` will return the error "No such remote cluster" and +// the expression `logs*` or `remote1:logs*` will return the error "index not +// found". +func (s *_painlessContextSetup) Index(indexname string) *_painlessContextSetup { + + s.v.Index = indexname + + return s +} + +// Use this parameter to specify a query for computing a score. +func (s *_painlessContextSetup) Query(query types.QueryVariant) *_painlessContextSetup { + + s.v.Query = query.QueryCaster() + + return s +} + +func (s *_painlessContextSetup) PainlessContextSetupCaster() *types.PainlessContextSetup { + return s.v +} diff --git a/typedapi/esdsl/parentaggregation.go b/typedapi/esdsl/parentaggregation.go new file mode 100644 index 0000000000..a27023dfad --- /dev/null +++ b/typedapi/esdsl/parentaggregation.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _parentAggregation struct { + v *types.ParentAggregation +} + +// A special single bucket aggregation that selects parent documents that have +// the specified type, as defined in a `join` field. +func NewParentAggregation() *_parentAggregation { + + return &_parentAggregation{v: types.NewParentAggregation()} + +} + +// The child type that should be selected. +func (s *_parentAggregation) Type(relationname string) *_parentAggregation { + + s.v.Type = &relationname + + return s +} + +func (s *_parentAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Parent = s.v + + return container +} + +func (s *_parentAggregation) ParentAggregationCaster() *types.ParentAggregation { + return s.v +} diff --git a/typedapi/esdsl/parentidquery.go b/typedapi/esdsl/parentidquery.go new file mode 100644 index 0000000000..6e165c480a --- /dev/null +++ b/typedapi/esdsl/parentidquery.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _parentIdQuery struct { + v *types.ParentIdQuery +} + +// Returns child documents joined to a specific parent document. +func NewParentIdQuery() *_parentIdQuery { + + return &_parentIdQuery{v: types.NewParentIdQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_parentIdQuery) Boost(boost float32) *_parentIdQuery { + + s.v.Boost = &boost + + return s +} + +// ID of the parent document. +func (s *_parentIdQuery) Id(id string) *_parentIdQuery { + + s.v.Id = &id + + return s +} + +// Indicates whether to ignore an unmapped `type` and not return any documents +// instead of an error. +func (s *_parentIdQuery) IgnoreUnmapped(ignoreunmapped bool) *_parentIdQuery { + + s.v.IgnoreUnmapped = &ignoreunmapped + + return s +} + +func (s *_parentIdQuery) QueryName_(queryname_ string) *_parentIdQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Name of the child relationship mapped for the `join` field. +func (s *_parentIdQuery) Type(relationname string) *_parentIdQuery { + + s.v.Type = &relationname + + return s +} + +func (s *_parentIdQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.ParentId = s.v + + return container +} + +func (s *_parentIdQuery) ParentIdQueryCaster() *types.ParentIdQuery { + return s.v +} diff --git a/typedapi/esdsl/passthroughinferenceoptions.go b/typedapi/esdsl/passthroughinferenceoptions.go new file mode 100644 index 0000000000..9b8dbb542e --- /dev/null +++ b/typedapi/esdsl/passthroughinferenceoptions.go @@ -0,0 +1,70 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _passThroughInferenceOptions struct { + v *types.PassThroughInferenceOptions +} + +// Pass through configuration for inference. +func NewPassThroughInferenceOptions() *_passThroughInferenceOptions { + + return &_passThroughInferenceOptions{v: types.NewPassThroughInferenceOptions()} + +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_passThroughInferenceOptions) ResultsField(resultsfield string) *_passThroughInferenceOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +// The tokenization options +func (s *_passThroughInferenceOptions) Tokenization(tokenization types.TokenizationConfigContainerVariant) *_passThroughInferenceOptions { + + s.v.Tokenization = tokenization.TokenizationConfigContainerCaster() + + return s +} + +func (s *_passThroughInferenceOptions) Vocabulary(vocabulary types.VocabularyVariant) *_passThroughInferenceOptions { + + s.v.Vocabulary = vocabulary.VocabularyCaster() + + return s +} + +func (s *_passThroughInferenceOptions) InferenceConfigCreateContainerCaster() *types.InferenceConfigCreateContainer { + container := types.NewInferenceConfigCreateContainer() + + container.PassThrough = s.v + + return container +} + +func (s *_passThroughInferenceOptions) PassThroughInferenceOptionsCaster() *types.PassThroughInferenceOptions { + return s.v +} diff --git a/typedapi/esdsl/passthroughinferenceupdateoptions.go b/typedapi/esdsl/passthroughinferenceupdateoptions.go new file mode 100644 index 0000000000..153138a2ac --- /dev/null +++ b/typedapi/esdsl/passthroughinferenceupdateoptions.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _passThroughInferenceUpdateOptions struct { + v *types.PassThroughInferenceUpdateOptions +} + +// Pass through configuration for inference. +func NewPassThroughInferenceUpdateOptions() *_passThroughInferenceUpdateOptions { + + return &_passThroughInferenceUpdateOptions{v: types.NewPassThroughInferenceUpdateOptions()} + +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_passThroughInferenceUpdateOptions) ResultsField(resultsfield string) *_passThroughInferenceUpdateOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +// The tokenization options to update when inferring +func (s *_passThroughInferenceUpdateOptions) Tokenization(tokenization types.NlpTokenizationUpdateOptionsVariant) *_passThroughInferenceUpdateOptions { + + s.v.Tokenization = tokenization.NlpTokenizationUpdateOptionsCaster() + + return s +} + +func (s *_passThroughInferenceUpdateOptions) InferenceConfigUpdateContainerCaster() *types.InferenceConfigUpdateContainer { + container := types.NewInferenceConfigUpdateContainer() + + container.PassThrough = s.v + + return container +} + +func (s *_passThroughInferenceUpdateOptions) PassThroughInferenceUpdateOptionsCaster() *types.PassThroughInferenceUpdateOptions { + return s.v +} diff --git a/typedapi/esdsl/passthroughobjectproperty.go b/typedapi/esdsl/passthroughobjectproperty.go new file mode 100644 index 0000000000..795d12eced --- /dev/null +++ b/typedapi/esdsl/passthroughobjectproperty.go @@ -0,0 +1,167 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _passthroughObjectProperty struct { + v *types.PassthroughObjectProperty +} + +func NewPassthroughObjectProperty() *_passthroughObjectProperty { + + return &_passthroughObjectProperty{v: types.NewPassthroughObjectProperty()} + +} + +func (s *_passthroughObjectProperty) CopyTo(fields ...string) *_passthroughObjectProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_passthroughObjectProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_passthroughObjectProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_passthroughObjectProperty) Enabled(enabled bool) *_passthroughObjectProperty { + + s.v.Enabled = &enabled + + return s +} + +func (s *_passthroughObjectProperty) Fields(fields map[string]types.Property) *_passthroughObjectProperty { + + s.v.Fields = fields + return s +} + +func (s *_passthroughObjectProperty) AddField(key string, value types.PropertyVariant) *_passthroughObjectProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_passthroughObjectProperty) IgnoreAbove(ignoreabove int) *_passthroughObjectProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +// Metadata about the field. +func (s *_passthroughObjectProperty) Meta(meta map[string]string) *_passthroughObjectProperty { + + s.v.Meta = meta + return s +} + +func (s *_passthroughObjectProperty) AddMeta(key string, value string) *_passthroughObjectProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_passthroughObjectProperty) Priority(priority int) *_passthroughObjectProperty { + + s.v.Priority = &priority + + return s +} + +func (s *_passthroughObjectProperty) Properties(properties map[string]types.Property) *_passthroughObjectProperty { + + s.v.Properties = properties + return s +} + +func (s *_passthroughObjectProperty) AddProperty(key string, value types.PropertyVariant) *_passthroughObjectProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_passthroughObjectProperty) Store(store bool) *_passthroughObjectProperty { + + s.v.Store = &store + + return s +} + +func (s *_passthroughObjectProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_passthroughObjectProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_passthroughObjectProperty) TimeSeriesDimension(timeseriesdimension bool) *_passthroughObjectProperty { + + s.v.TimeSeriesDimension = ×eriesdimension + + return s +} + +func (s *_passthroughObjectProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_passthroughObjectProperty) PassthroughObjectPropertyCaster() *types.PassthroughObjectProperty { + return s.v +} diff --git a/typedapi/esdsl/pathhierarchytokenizer.go b/typedapi/esdsl/pathhierarchytokenizer.go new file mode 100644 index 0000000000..1b6aabab1f --- /dev/null +++ b/typedapi/esdsl/pathhierarchytokenizer.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _pathHierarchyTokenizer struct { + v *types.PathHierarchyTokenizer +} + +func NewPathHierarchyTokenizer() *_pathHierarchyTokenizer { + + return &_pathHierarchyTokenizer{v: types.NewPathHierarchyTokenizer()} + +} + +func (s *_pathHierarchyTokenizer) BufferSize(stringifiedinteger types.StringifiedintegerVariant) *_pathHierarchyTokenizer { + + s.v.BufferSize = *stringifiedinteger.StringifiedintegerCaster() + + return s +} + +func (s *_pathHierarchyTokenizer) Delimiter(delimiter string) *_pathHierarchyTokenizer { + + s.v.Delimiter = &delimiter + + return s +} + +func (s *_pathHierarchyTokenizer) Replacement(replacement string) *_pathHierarchyTokenizer { + + s.v.Replacement = &replacement + + return s +} + +func (s *_pathHierarchyTokenizer) Reverse(stringifiedboolean types.StringifiedbooleanVariant) *_pathHierarchyTokenizer { + + s.v.Reverse = *stringifiedboolean.StringifiedbooleanCaster() + + return s +} + +func (s *_pathHierarchyTokenizer) Skip(stringifiedinteger types.StringifiedintegerVariant) *_pathHierarchyTokenizer { + + s.v.Skip = *stringifiedinteger.StringifiedintegerCaster() + + return s +} + +func (s *_pathHierarchyTokenizer) Version(versionstring string) *_pathHierarchyTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_pathHierarchyTokenizer) PathHierarchyTokenizerCaster() *types.PathHierarchyTokenizer { + return s.v +} diff --git a/typedapi/esdsl/patternanalyzer.go b/typedapi/esdsl/patternanalyzer.go new file mode 100644 index 0000000000..1d8678d364 --- /dev/null +++ b/typedapi/esdsl/patternanalyzer.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _patternAnalyzer struct { + v *types.PatternAnalyzer +} + +func NewPatternAnalyzer() *_patternAnalyzer { + + return &_patternAnalyzer{v: types.NewPatternAnalyzer()} + +} + +// Java regular expression flags. Flags should be pipe-separated, eg +// "CASE_INSENSITIVE|COMMENTS". +func (s *_patternAnalyzer) Flags(flags string) *_patternAnalyzer { + + s.v.Flags = &flags + + return s +} + +// Should terms be lowercased or not. +// Defaults to `true`. +func (s *_patternAnalyzer) Lowercase(lowercase bool) *_patternAnalyzer { + + s.v.Lowercase = &lowercase + + return s +} + +// A Java regular expression. +// Defaults to `\W+`. +func (s *_patternAnalyzer) Pattern(pattern string) *_patternAnalyzer { + + s.v.Pattern = &pattern + + return s +} + +// A pre-defined stop words list like `_english_` or an array containing a list +// of stop words. +// Defaults to `_none_`. +func (s *_patternAnalyzer) Stopwords(stopwords ...string) *_patternAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +// The path to a file containing stop words. +func (s *_patternAnalyzer) StopwordsPath(stopwordspath string) *_patternAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_patternAnalyzer) Version(versionstring string) *_patternAnalyzer { + + s.v.Version = &versionstring + + return s +} + +func (s *_patternAnalyzer) PatternAnalyzerCaster() *types.PatternAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/patterncapturetokenfilter.go b/typedapi/esdsl/patterncapturetokenfilter.go new file mode 100644 index 0000000000..fc9df485c6 --- /dev/null +++ b/typedapi/esdsl/patterncapturetokenfilter.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _patternCaptureTokenFilter struct { + v *types.PatternCaptureTokenFilter +} + +func NewPatternCaptureTokenFilter() *_patternCaptureTokenFilter { + + return &_patternCaptureTokenFilter{v: types.NewPatternCaptureTokenFilter()} + +} + +func (s *_patternCaptureTokenFilter) Patterns(patterns ...string) *_patternCaptureTokenFilter { + + for _, v := range patterns { + + s.v.Patterns = append(s.v.Patterns, v) + + } + return s +} + +func (s *_patternCaptureTokenFilter) PreserveOriginal(stringifiedboolean types.StringifiedbooleanVariant) *_patternCaptureTokenFilter { + + s.v.PreserveOriginal = *stringifiedboolean.StringifiedbooleanCaster() + + return s +} + +func (s *_patternCaptureTokenFilter) Version(versionstring string) *_patternCaptureTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_patternCaptureTokenFilter) PatternCaptureTokenFilterCaster() *types.PatternCaptureTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/patternreplacecharfilter.go b/typedapi/esdsl/patternreplacecharfilter.go new file mode 100644 index 0000000000..a8ac159a37 --- /dev/null +++ b/typedapi/esdsl/patternreplacecharfilter.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _patternReplaceCharFilter struct { + v *types.PatternReplaceCharFilter +} + +func NewPatternReplaceCharFilter(pattern string) *_patternReplaceCharFilter { + + tmp := &_patternReplaceCharFilter{v: types.NewPatternReplaceCharFilter()} + + tmp.Pattern(pattern) + + return tmp + +} + +func (s *_patternReplaceCharFilter) Flags(flags string) *_patternReplaceCharFilter { + + s.v.Flags = &flags + + return s +} + +func (s *_patternReplaceCharFilter) Pattern(pattern string) *_patternReplaceCharFilter { + + s.v.Pattern = pattern + + return s +} + +func (s *_patternReplaceCharFilter) Replacement(replacement string) *_patternReplaceCharFilter { + + s.v.Replacement = &replacement + + return s +} + +func (s *_patternReplaceCharFilter) Version(versionstring string) *_patternReplaceCharFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_patternReplaceCharFilter) PatternReplaceCharFilterCaster() *types.PatternReplaceCharFilter { + return s.v +} diff --git a/typedapi/esdsl/patternreplacetokenfilter.go b/typedapi/esdsl/patternreplacetokenfilter.go new file mode 100644 index 0000000000..3b7e56afe3 --- /dev/null +++ b/typedapi/esdsl/patternreplacetokenfilter.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _patternReplaceTokenFilter struct { + v *types.PatternReplaceTokenFilter +} + +func NewPatternReplaceTokenFilter(pattern string) *_patternReplaceTokenFilter { + + tmp := &_patternReplaceTokenFilter{v: types.NewPatternReplaceTokenFilter()} + + tmp.Pattern(pattern) + + return tmp + +} + +func (s *_patternReplaceTokenFilter) All(all bool) *_patternReplaceTokenFilter { + + s.v.All = &all + + return s +} + +func (s *_patternReplaceTokenFilter) Flags(flags string) *_patternReplaceTokenFilter { + + s.v.Flags = &flags + + return s +} + +func (s *_patternReplaceTokenFilter) Pattern(pattern string) *_patternReplaceTokenFilter { + + s.v.Pattern = pattern + + return s +} + +func (s *_patternReplaceTokenFilter) Replacement(replacement string) *_patternReplaceTokenFilter { + + s.v.Replacement = &replacement + + return s +} + +func (s *_patternReplaceTokenFilter) Version(versionstring string) *_patternReplaceTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_patternReplaceTokenFilter) PatternReplaceTokenFilterCaster() *types.PatternReplaceTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/patterntokenizer.go b/typedapi/esdsl/patterntokenizer.go new file mode 100644 index 0000000000..50265e1398 --- /dev/null +++ b/typedapi/esdsl/patterntokenizer.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _patternTokenizer struct { + v *types.PatternTokenizer +} + +func NewPatternTokenizer() *_patternTokenizer { + + return &_patternTokenizer{v: types.NewPatternTokenizer()} + +} + +func (s *_patternTokenizer) Flags(flags string) *_patternTokenizer { + + s.v.Flags = &flags + + return s +} + +func (s *_patternTokenizer) Group(group int) *_patternTokenizer { + + s.v.Group = &group + + return s +} + +func (s *_patternTokenizer) Pattern(pattern string) *_patternTokenizer { + + s.v.Pattern = &pattern + + return s +} + +func (s *_patternTokenizer) Version(versionstring string) *_patternTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_patternTokenizer) PatternTokenizerCaster() *types.PatternTokenizer { + return s.v +} diff --git a/typedapi/esdsl/percentage.go b/typedapi/esdsl/percentage.go new file mode 100644 index 0000000000..332afdd0ef --- /dev/null +++ b/typedapi/esdsl/percentage.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _percentage struct { + v types.Percentage +} + +func NewPercentage() *_percentage { + return &_percentage{v: nil} +} + +func (u *_percentage) String(string string) *_percentage { + + u.v = &string + + return u +} + +func (u *_percentage) Float32(float32 float32) *_percentage { + + u.v = &float32 + + return u +} + +func (u *_percentage) PercentageCaster() *types.Percentage { + return &u.v +} diff --git a/typedapi/esdsl/percentagescoreheuristic.go b/typedapi/esdsl/percentagescoreheuristic.go new file mode 100644 index 0000000000..55c13525df --- /dev/null +++ b/typedapi/esdsl/percentagescoreheuristic.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _percentageScoreHeuristic struct { + v *types.PercentageScoreHeuristic +} + +func NewPercentageScoreHeuristic() *_percentageScoreHeuristic { + + return &_percentageScoreHeuristic{v: types.NewPercentageScoreHeuristic()} + +} + +func (s *_percentageScoreHeuristic) PercentageScoreHeuristicCaster() *types.PercentageScoreHeuristic { + return s.v +} diff --git a/typedapi/esdsl/percentileranksaggregation.go b/typedapi/esdsl/percentileranksaggregation.go new file mode 100644 index 0000000000..e48148598a --- /dev/null +++ b/typedapi/esdsl/percentileranksaggregation.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _percentileRanksAggregation struct { + v *types.PercentileRanksAggregation +} + +// A multi-value metrics aggregation that calculates one or more percentile +// ranks over numeric values extracted from the aggregated documents. +func NewPercentileRanksAggregation() *_percentileRanksAggregation { + + return &_percentileRanksAggregation{v: types.NewPercentileRanksAggregation()} + +} + +// The field on which to run the aggregation. +func (s *_percentileRanksAggregation) Field(field string) *_percentileRanksAggregation { + + s.v.Field = &field + + return s +} + +func (s *_percentileRanksAggregation) Format(format string) *_percentileRanksAggregation { + + s.v.Format = &format + + return s +} + +// Uses the alternative High Dynamic Range Histogram algorithm to calculate +// percentile ranks. +func (s *_percentileRanksAggregation) Hdr(hdr types.HdrMethodVariant) *_percentileRanksAggregation { + + s.v.Hdr = hdr.HdrMethodCaster() + + return s +} + +// By default, the aggregation associates a unique string key with each bucket +// and returns the ranges as a hash rather than an array. +// Set to `false` to disable this behavior. +func (s *_percentileRanksAggregation) Keyed(keyed bool) *_percentileRanksAggregation { + + s.v.Keyed = &keyed + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_percentileRanksAggregation) Missing(missing types.MissingVariant) *_percentileRanksAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_percentileRanksAggregation) Script(script types.ScriptVariant) *_percentileRanksAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +// Sets parameters for the default TDigest algorithm used to calculate +// percentile ranks. +func (s *_percentileRanksAggregation) Tdigest(tdigest types.TDigestVariant) *_percentileRanksAggregation { + + s.v.Tdigest = tdigest.TDigestCaster() + + return s +} + +// An array of values for which to calculate the percentile ranks. +func (s *_percentileRanksAggregation) Values(values []types.Float64) *_percentileRanksAggregation { + + s.v.Values = &values + + return s +} + +func (s *_percentileRanksAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.PercentileRanks = s.v + + return container +} + +func (s *_percentileRanksAggregation) PercentileRanksAggregationCaster() *types.PercentileRanksAggregation { + return s.v +} diff --git a/typedapi/esdsl/percentilesaggregation.go b/typedapi/esdsl/percentilesaggregation.go new file mode 100644 index 0000000000..368a11ccc8 --- /dev/null +++ b/typedapi/esdsl/percentilesaggregation.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _percentilesAggregation struct { + v *types.PercentilesAggregation +} + +// A multi-value metrics aggregation that calculates one or more percentiles +// over numeric values extracted from the aggregated documents. +func NewPercentilesAggregation() *_percentilesAggregation { + + return &_percentilesAggregation{v: types.NewPercentilesAggregation()} + +} + +// The field on which to run the aggregation. +func (s *_percentilesAggregation) Field(field string) *_percentilesAggregation { + + s.v.Field = &field + + return s +} + +func (s *_percentilesAggregation) Format(format string) *_percentilesAggregation { + + s.v.Format = &format + + return s +} + +// Uses the alternative High Dynamic Range Histogram algorithm to calculate +// percentiles. +func (s *_percentilesAggregation) Hdr(hdr types.HdrMethodVariant) *_percentilesAggregation { + + s.v.Hdr = hdr.HdrMethodCaster() + + return s +} + +// By default, the aggregation associates a unique string key with each bucket +// and returns the ranges as a hash rather than an array. +// Set to `false` to disable this behavior. +func (s *_percentilesAggregation) Keyed(keyed bool) *_percentilesAggregation { + + s.v.Keyed = &keyed + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_percentilesAggregation) Missing(missing types.MissingVariant) *_percentilesAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +// The percentiles to calculate. +func (s *_percentilesAggregation) Percents(percents ...types.Float64) *_percentilesAggregation { + + for _, v := range percents { + + s.v.Percents = append(s.v.Percents, v) + + } + return s +} + +func (s *_percentilesAggregation) Script(script types.ScriptVariant) *_percentilesAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +// Sets parameters for the default TDigest algorithm used to calculate +// percentiles. +func (s *_percentilesAggregation) Tdigest(tdigest types.TDigestVariant) *_percentilesAggregation { + + s.v.Tdigest = tdigest.TDigestCaster() + + return s +} + +func (s *_percentilesAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Percentiles = s.v + + return container +} + +func (s *_percentilesAggregation) PercentilesAggregationCaster() *types.PercentilesAggregation { + return s.v +} diff --git a/typedapi/esdsl/percentilesbucketaggregation.go b/typedapi/esdsl/percentilesbucketaggregation.go new file mode 100644 index 0000000000..a4fbfcbd01 --- /dev/null +++ b/typedapi/esdsl/percentilesbucketaggregation.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _percentilesBucketAggregation struct { + v *types.PercentilesBucketAggregation +} + +// A sibling pipeline aggregation which calculates percentiles across all bucket +// of a specified metric in a sibling aggregation. +func NewPercentilesBucketAggregation() *_percentilesBucketAggregation { + + return &_percentilesBucketAggregation{v: types.NewPercentilesBucketAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_percentilesBucketAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_percentilesBucketAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_percentilesBucketAggregation) Format(format string) *_percentilesBucketAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_percentilesBucketAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_percentilesBucketAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +// The list of percentiles to calculate. +func (s *_percentilesBucketAggregation) Percents(percents ...types.Float64) *_percentilesBucketAggregation { + + for _, v := range percents { + + s.v.Percents = append(s.v.Percents, v) + + } + return s +} + +func (s *_percentilesBucketAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.PercentilesBucket = s.v + + return container +} + +func (s *_percentilesBucketAggregation) PercentilesBucketAggregationCaster() *types.PercentilesBucketAggregation { + return s.v +} diff --git a/typedapi/esdsl/percolatequery.go b/typedapi/esdsl/percolatequery.go new file mode 100644 index 0000000000..f0f4957301 --- /dev/null +++ b/typedapi/esdsl/percolatequery.go @@ -0,0 +1,146 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _percolateQuery struct { + v *types.PercolateQuery +} + +// Matches queries stored in an index. +func NewPercolateQuery() *_percolateQuery { + + return &_percolateQuery{v: types.NewPercolateQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_percolateQuery) Boost(boost float32) *_percolateQuery { + + s.v.Boost = &boost + + return s +} + +// The source of the document being percolated. +func (s *_percolateQuery) Document(document json.RawMessage) *_percolateQuery { + + s.v.Document = document + + return s +} + +// An array of sources of the documents being percolated. +func (s *_percolateQuery) Documents(documents ...json.RawMessage) *_percolateQuery { + + for _, v := range documents { + + s.v.Documents = append(s.v.Documents, v) + + } + return s +} + +// Field that holds the indexed queries. The field must use the `percolator` +// mapping type. +func (s *_percolateQuery) Field(field string) *_percolateQuery { + + s.v.Field = field + + return s +} + +// The ID of a stored document to percolate. +func (s *_percolateQuery) Id(id string) *_percolateQuery { + + s.v.Id = &id + + return s +} + +// The index of a stored document to percolate. +func (s *_percolateQuery) Index(indexname string) *_percolateQuery { + + s.v.Index = &indexname + + return s +} + +// The suffix used for the `_percolator_document_slot` field when multiple +// `percolate` queries are specified. +func (s *_percolateQuery) Name(name string) *_percolateQuery { + + s.v.Name = &name + + return s +} + +// Preference used to fetch document to percolate. +func (s *_percolateQuery) Preference(preference string) *_percolateQuery { + + s.v.Preference = &preference + + return s +} + +func (s *_percolateQuery) QueryName_(queryname_ string) *_percolateQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Routing used to fetch document to percolate. +func (s *_percolateQuery) Routing(routing string) *_percolateQuery { + + s.v.Routing = &routing + + return s +} + +// The expected version of a stored document to percolate. +func (s *_percolateQuery) Version(versionnumber int64) *_percolateQuery { + + s.v.Version = &versionnumber + + return s +} + +func (s *_percolateQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.Percolate = s.v + + return container +} + +func (s *_percolateQuery) PercolateQueryCaster() *types.PercolateQuery { + return s.v +} diff --git a/typedapi/esdsl/percolatorproperty.go b/typedapi/esdsl/percolatorproperty.go new file mode 100644 index 0000000000..1585d11982 --- /dev/null +++ b/typedapi/esdsl/percolatorproperty.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _percolatorProperty struct { + v *types.PercolatorProperty +} + +func NewPercolatorProperty() *_percolatorProperty { + + return &_percolatorProperty{v: types.NewPercolatorProperty()} + +} + +func (s *_percolatorProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_percolatorProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_percolatorProperty) Fields(fields map[string]types.Property) *_percolatorProperty { + + s.v.Fields = fields + return s +} + +func (s *_percolatorProperty) AddField(key string, value types.PropertyVariant) *_percolatorProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_percolatorProperty) IgnoreAbove(ignoreabove int) *_percolatorProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +// Metadata about the field. +func (s *_percolatorProperty) Meta(meta map[string]string) *_percolatorProperty { + + s.v.Meta = meta + return s +} + +func (s *_percolatorProperty) AddMeta(key string, value string) *_percolatorProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_percolatorProperty) Properties(properties map[string]types.Property) *_percolatorProperty { + + s.v.Properties = properties + return s +} + +func (s *_percolatorProperty) AddProperty(key string, value types.PropertyVariant) *_percolatorProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_percolatorProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_percolatorProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_percolatorProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_percolatorProperty) PercolatorPropertyCaster() *types.PercolatorProperty { + return s.v +} diff --git a/typedapi/esdsl/perpartitioncategorization.go b/typedapi/esdsl/perpartitioncategorization.go new file mode 100644 index 0000000000..fde7c0ab89 --- /dev/null +++ b/typedapi/esdsl/perpartitioncategorization.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _perPartitionCategorization struct { + v *types.PerPartitionCategorization +} + +func NewPerPartitionCategorization() *_perPartitionCategorization { + + return &_perPartitionCategorization{v: types.NewPerPartitionCategorization()} + +} + +// To enable this setting, you must also set the `partition_field_name` property +// to the same value in every detector that uses the keyword `mlcategory`. +// Otherwise, job creation fails. +func (s *_perPartitionCategorization) Enabled(enabled bool) *_perPartitionCategorization { + + s.v.Enabled = &enabled + + return s +} + +// This setting can be set to true only if per-partition categorization is +// enabled. If true, both categorization and subsequent anomaly detection stops +// for partitions where the categorization status changes to warn. This setting +// makes it viable to have a job where it is expected that categorization works +// well for some partitions but not others; you do not pay the cost of bad +// categorization forever in the partitions where it works badly. +func (s *_perPartitionCategorization) StopOnWarn(stoponwarn bool) *_perPartitionCategorization { + + s.v.StopOnWarn = &stoponwarn + + return s +} + +func (s *_perPartitionCategorization) PerPartitionCategorizationCaster() *types.PerPartitionCategorization { + return s.v +} diff --git a/typedapi/esdsl/persiananalyzer.go b/typedapi/esdsl/persiananalyzer.go new file mode 100644 index 0000000000..b70ec27627 --- /dev/null +++ b/typedapi/esdsl/persiananalyzer.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _persianAnalyzer struct { + v *types.PersianAnalyzer +} + +func NewPersianAnalyzer() *_persianAnalyzer { + + return &_persianAnalyzer{v: types.NewPersianAnalyzer()} + +} + +func (s *_persianAnalyzer) Stopwords(stopwords ...string) *_persianAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_persianAnalyzer) StopwordsPath(stopwordspath string) *_persianAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_persianAnalyzer) PersianAnalyzerCaster() *types.PersianAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/phase.go b/typedapi/esdsl/phase.go new file mode 100644 index 0000000000..cc636e1370 --- /dev/null +++ b/typedapi/esdsl/phase.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _phase struct { + v *types.Phase +} + +func NewPhase() *_phase { + + return &_phase{v: types.NewPhase()} + +} + +func (s *_phase) Actions(actions types.IlmActionsVariant) *_phase { + + s.v.Actions = actions.IlmActionsCaster() + + return s +} + +func (s *_phase) MinAge(duration types.DurationVariant) *_phase { + + s.v.MinAge = *duration.DurationCaster() + + return s +} + +func (s *_phase) PhaseCaster() *types.Phase { + return s.v +} diff --git a/typedapi/esdsl/phases.go b/typedapi/esdsl/phases.go new file mode 100644 index 0000000000..55b728b812 --- /dev/null +++ b/typedapi/esdsl/phases.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _phases struct { + v *types.Phases +} + +func NewPhases() *_phases { + + return &_phases{v: types.NewPhases()} + +} + +func (s *_phases) Cold(cold types.PhaseVariant) *_phases { + + s.v.Cold = cold.PhaseCaster() + + return s +} + +func (s *_phases) Delete(delete types.PhaseVariant) *_phases { + + s.v.Delete = delete.PhaseCaster() + + return s +} + +func (s *_phases) Frozen(frozen types.PhaseVariant) *_phases { + + s.v.Frozen = frozen.PhaseCaster() + + return s +} + +func (s *_phases) Hot(hot types.PhaseVariant) *_phases { + + s.v.Hot = hot.PhaseCaster() + + return s +} + +func (s *_phases) Warm(warm types.PhaseVariant) *_phases { + + s.v.Warm = warm.PhaseCaster() + + return s +} + +func (s *_phases) PhasesCaster() *types.Phases { + return s.v +} diff --git a/typedapi/esdsl/phonetictokenfilter.go b/typedapi/esdsl/phonetictokenfilter.go new file mode 100644 index 0000000000..7f20dbbb8f --- /dev/null +++ b/typedapi/esdsl/phonetictokenfilter.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticencoder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticlanguage" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticnametype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticruletype" +) + +type _phoneticTokenFilter struct { + v *types.PhoneticTokenFilter +} + +func NewPhoneticTokenFilter(encoder phoneticencoder.PhoneticEncoder) *_phoneticTokenFilter { + + tmp := &_phoneticTokenFilter{v: types.NewPhoneticTokenFilter()} + + tmp.Encoder(encoder) + + return tmp + +} + +func (s *_phoneticTokenFilter) Encoder(encoder phoneticencoder.PhoneticEncoder) *_phoneticTokenFilter { + + s.v.Encoder = encoder + return s +} + +func (s *_phoneticTokenFilter) Languageset(languagesets ...phoneticlanguage.PhoneticLanguage) *_phoneticTokenFilter { + + s.v.Languageset = make([]phoneticlanguage.PhoneticLanguage, len(languagesets)) + s.v.Languageset = languagesets + + return s +} + +func (s *_phoneticTokenFilter) MaxCodeLen(maxcodelen int) *_phoneticTokenFilter { + + s.v.MaxCodeLen = &maxcodelen + + return s +} + +func (s *_phoneticTokenFilter) NameType(nametype phoneticnametype.PhoneticNameType) *_phoneticTokenFilter { + + s.v.NameType = &nametype + return s +} + +func (s *_phoneticTokenFilter) Replace(replace bool) *_phoneticTokenFilter { + + s.v.Replace = &replace + + return s +} + +func (s *_phoneticTokenFilter) RuleType(ruletype phoneticruletype.PhoneticRuleType) *_phoneticTokenFilter { + + s.v.RuleType = &ruletype + return s +} + +func (s *_phoneticTokenFilter) Version(versionstring string) *_phoneticTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_phoneticTokenFilter) PhoneticTokenFilterCaster() *types.PhoneticTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/phrasesuggestcollate.go b/typedapi/esdsl/phrasesuggestcollate.go new file mode 100644 index 0000000000..69c8668b99 --- /dev/null +++ b/typedapi/esdsl/phrasesuggestcollate.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _phraseSuggestCollate struct { + v *types.PhraseSuggestCollate +} + +func NewPhraseSuggestCollate(query types.PhraseSuggestCollateQueryVariant) *_phraseSuggestCollate { + + tmp := &_phraseSuggestCollate{v: types.NewPhraseSuggestCollate()} + + tmp.Query(query) + + return tmp + +} + +// Parameters to use if the query is templated. +func (s *_phraseSuggestCollate) Params(params map[string]json.RawMessage) *_phraseSuggestCollate { + + s.v.Params = params + return s +} + +func (s *_phraseSuggestCollate) AddParam(key string, value json.RawMessage) *_phraseSuggestCollate { + + var tmp map[string]json.RawMessage + if s.v.Params == nil { + s.v.Params = make(map[string]json.RawMessage) + } else { + tmp = s.v.Params + } + + tmp[key] = value + + s.v.Params = tmp + return s +} + +// Returns all suggestions with an extra `collate_match` option indicating +// whether the generated phrase matched any document. +func (s *_phraseSuggestCollate) Prune(prune bool) *_phraseSuggestCollate { + + s.v.Prune = &prune + + return s +} + +// A collate query that is run once for every suggestion. +func (s *_phraseSuggestCollate) Query(query types.PhraseSuggestCollateQueryVariant) *_phraseSuggestCollate { + + s.v.Query = *query.PhraseSuggestCollateQueryCaster() + + return s +} + +func (s *_phraseSuggestCollate) PhraseSuggestCollateCaster() *types.PhraseSuggestCollate { + return s.v +} diff --git a/typedapi/esdsl/phrasesuggestcollatequery.go b/typedapi/esdsl/phrasesuggestcollatequery.go new file mode 100644 index 0000000000..9b0d6c2d0e --- /dev/null +++ b/typedapi/esdsl/phrasesuggestcollatequery.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _phraseSuggestCollateQuery struct { + v *types.PhraseSuggestCollateQuery +} + +func NewPhraseSuggestCollateQuery() *_phraseSuggestCollateQuery { + + return &_phraseSuggestCollateQuery{v: types.NewPhraseSuggestCollateQuery()} + +} + +// The search template ID. +func (s *_phraseSuggestCollateQuery) Id(id string) *_phraseSuggestCollateQuery { + + s.v.Id = &id + + return s +} + +// The query source. +func (s *_phraseSuggestCollateQuery) Source(source string) *_phraseSuggestCollateQuery { + + s.v.Source = &source + + return s +} + +func (s *_phraseSuggestCollateQuery) PhraseSuggestCollateQueryCaster() *types.PhraseSuggestCollateQuery { + return s.v +} diff --git a/typedapi/esdsl/phrasesuggester.go b/typedapi/esdsl/phrasesuggester.go new file mode 100644 index 0000000000..7e2f05323f --- /dev/null +++ b/typedapi/esdsl/phrasesuggester.go @@ -0,0 +1,197 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _phraseSuggester struct { + v *types.PhraseSuggester +} + +// Provides access to word alternatives on a per token basis within a certain +// string distance. +func NewPhraseSuggester() *_phraseSuggester { + + return &_phraseSuggester{v: types.NewPhraseSuggester()} + +} + +// The analyzer to analyze the suggest text with. +// Defaults to the search analyzer of the suggest field. +func (s *_phraseSuggester) Analyzer(analyzer string) *_phraseSuggester { + + s.v.Analyzer = &analyzer + + return s +} + +// Checks each suggestion against the specified query to prune suggestions for +// which no matching docs exist in the index. +func (s *_phraseSuggester) Collate(collate types.PhraseSuggestCollateVariant) *_phraseSuggester { + + s.v.Collate = collate.PhraseSuggestCollateCaster() + + return s +} + +// Defines a factor applied to the input phrases score, which is used as a +// threshold for other suggest candidates. +// Only candidates that score higher than the threshold will be included in the +// result. +func (s *_phraseSuggester) Confidence(confidence types.Float64) *_phraseSuggester { + + s.v.Confidence = &confidence + + return s +} + +// A list of candidate generators that produce a list of possible terms per term +// in the given text. +func (s *_phraseSuggester) DirectGenerator(directgenerators ...types.DirectGeneratorVariant) *_phraseSuggester { + + for _, v := range directgenerators { + + s.v.DirectGenerator = append(s.v.DirectGenerator, *v.DirectGeneratorCaster()) + + } + return s +} + +// The field to fetch the candidate suggestions from. +// Needs to be set globally or per suggestion. +func (s *_phraseSuggester) Field(field string) *_phraseSuggester { + + s.v.Field = field + + return s +} + +func (s *_phraseSuggester) ForceUnigrams(forceunigrams bool) *_phraseSuggester { + + s.v.ForceUnigrams = &forceunigrams + + return s +} + +// Sets max size of the n-grams (shingles) in the field. +// If the field doesn’t contain n-grams (shingles), this should be omitted or +// set to `1`. +// If the field uses a shingle filter, the `gram_size` is set to the +// `max_shingle_size` if not explicitly set. +func (s *_phraseSuggester) GramSize(gramsize int) *_phraseSuggester { + + s.v.GramSize = &gramsize + + return s +} + +// Sets up suggestion highlighting. +// If not provided, no highlighted field is returned. +func (s *_phraseSuggester) Highlight(highlight types.PhraseSuggestHighlightVariant) *_phraseSuggester { + + s.v.Highlight = highlight.PhraseSuggestHighlightCaster() + + return s +} + +// The maximum percentage of the terms considered to be misspellings in order to +// form a correction. +// This method accepts a float value in the range `[0..1)` as a fraction of the +// actual query terms or a number `>=1` as an absolute number of query terms. +func (s *_phraseSuggester) MaxErrors(maxerrors types.Float64) *_phraseSuggester { + + s.v.MaxErrors = &maxerrors + + return s +} + +// The likelihood of a term being misspelled even if the term exists in the +// dictionary. +func (s *_phraseSuggester) RealWordErrorLikelihood(realworderrorlikelihood types.Float64) *_phraseSuggester { + + s.v.RealWordErrorLikelihood = &realworderrorlikelihood + + return s +} + +// The separator that is used to separate terms in the bigram field. +// If not set, the whitespace character is used as a separator. +func (s *_phraseSuggester) Separator(separator string) *_phraseSuggester { + + s.v.Separator = &separator + + return s +} + +// Sets the maximum number of suggested terms to be retrieved from each +// individual shard. +func (s *_phraseSuggester) ShardSize(shardsize int) *_phraseSuggester { + + s.v.ShardSize = &shardsize + + return s +} + +// The maximum corrections to be returned per suggest text token. +func (s *_phraseSuggester) Size(size int) *_phraseSuggester { + + s.v.Size = &size + + return s +} + +// The smoothing model used to balance weight between infrequent grams (grams +// (shingles) are not existing in the index) and frequent grams (appear at least +// once in the index). +// The default model is Stupid Backoff. +func (s *_phraseSuggester) Smoothing(smoothing types.SmoothingModelContainerVariant) *_phraseSuggester { + + s.v.Smoothing = smoothing.SmoothingModelContainerCaster() + + return s +} + +// The text/query to provide suggestions for. +func (s *_phraseSuggester) Text(text string) *_phraseSuggester { + + s.v.Text = &text + + return s +} + +func (s *_phraseSuggester) TokenLimit(tokenlimit int) *_phraseSuggester { + + s.v.TokenLimit = &tokenlimit + + return s +} + +func (s *_phraseSuggester) FieldSuggesterCaster() *types.FieldSuggester { + container := types.NewFieldSuggester() + + container.Phrase = s.v + + return container +} + +func (s *_phraseSuggester) PhraseSuggesterCaster() *types.PhraseSuggester { + return s.v +} diff --git a/typedapi/esdsl/phrasesuggesthighlight.go b/typedapi/esdsl/phrasesuggesthighlight.go new file mode 100644 index 0000000000..27d09145c2 --- /dev/null +++ b/typedapi/esdsl/phrasesuggesthighlight.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _phraseSuggestHighlight struct { + v *types.PhraseSuggestHighlight +} + +func NewPhraseSuggestHighlight(posttag string, pretag string) *_phraseSuggestHighlight { + + tmp := &_phraseSuggestHighlight{v: types.NewPhraseSuggestHighlight()} + + tmp.PostTag(posttag) + + tmp.PreTag(pretag) + + return tmp + +} + +// Use in conjunction with `pre_tag` to define the HTML tags to use for the +// highlighted text. +func (s *_phraseSuggestHighlight) PostTag(posttag string) *_phraseSuggestHighlight { + + s.v.PostTag = posttag + + return s +} + +// Use in conjunction with `post_tag` to define the HTML tags to use for the +// highlighted text. +func (s *_phraseSuggestHighlight) PreTag(pretag string) *_phraseSuggestHighlight { + + s.v.PreTag = pretag + + return s +} + +func (s *_phraseSuggestHighlight) PhraseSuggestHighlightCaster() *types.PhraseSuggestHighlight { + return s.v +} diff --git a/typedapi/esdsl/pinneddoc.go b/typedapi/esdsl/pinneddoc.go new file mode 100644 index 0000000000..e2b34d0cb1 --- /dev/null +++ b/typedapi/esdsl/pinneddoc.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _pinnedDoc struct { + v *types.PinnedDoc +} + +// Documents listed in the order they are to appear in results. +// Required if `ids` is not specified. +func NewPinnedDoc() *_pinnedDoc { + + return &_pinnedDoc{v: types.NewPinnedDoc()} + +} + +// The unique document ID. +func (s *_pinnedDoc) Id_(id string) *_pinnedDoc { + + s.v.Id_ = id + + return s +} + +// The index that contains the document. +func (s *_pinnedDoc) Index_(indexname string) *_pinnedDoc { + + s.v.Index_ = &indexname + + return s +} + +func (s *_pinnedDoc) PinnedQueryCaster() *types.PinnedQuery { + container := types.NewPinnedQuery() + + container.Docs = append(container.Docs, *s.v) + + return container +} + +func (s *_pinnedDoc) PinnedDocCaster() *types.PinnedDoc { + return s.v +} diff --git a/typedapi/esdsl/pinnedquery.go b/typedapi/esdsl/pinnedquery.go new file mode 100644 index 0000000000..09d175fd2c --- /dev/null +++ b/typedapi/esdsl/pinnedquery.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _pinnedQuery struct { + v *types.PinnedQuery +} + +func NewPinnedQuery() *_pinnedQuery { + return &_pinnedQuery{v: types.NewPinnedQuery()} +} + +// AdditionalPinnedQueryProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_pinnedQuery) AdditionalPinnedQueryProperty(key string, value json.RawMessage) *_pinnedQuery { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalPinnedQueryProperty = tmp + return s +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_pinnedQuery) Boost(boost float32) *_pinnedQuery { + + s.v.Boost = &boost + + return s +} + +// Documents listed in the order they are to appear in results. +// Required if `ids` is not specified. +func (s *_pinnedQuery) Docs(docs ...types.PinnedDocVariant) *_pinnedQuery { + + for _, v := range docs { + + s.v.Docs = append(s.v.Docs, *v.PinnedDocCaster()) + + } + return s +} + +// Document IDs listed in the order they are to appear in results. +// Required if `docs` is not specified. +func (s *_pinnedQuery) Ids(ids ...string) *_pinnedQuery { + + for _, v := range ids { + + s.v.Ids = append(s.v.Ids, v) + + } + return s +} + +// Any choice of query used to rank documents which will be ranked below the +// "pinned" documents. +func (s *_pinnedQuery) Organic(organic types.QueryVariant) *_pinnedQuery { + + s.v.Organic = *organic.QueryCaster() + + return s +} + +func (s *_pinnedQuery) QueryName_(queryname_ string) *_pinnedQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_pinnedQuery) PinnedQueryCaster() *types.PinnedQuery { + return s.v +} diff --git a/typedapi/esdsl/pipelinemetadata.go b/typedapi/esdsl/pipelinemetadata.go new file mode 100644 index 0000000000..b6e89536a7 --- /dev/null +++ b/typedapi/esdsl/pipelinemetadata.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _pipelineMetadata struct { + v *types.PipelineMetadata +} + +func NewPipelineMetadata(type_ string, version string) *_pipelineMetadata { + + tmp := &_pipelineMetadata{v: types.NewPipelineMetadata()} + + tmp.Type(type_) + + tmp.Version(version) + + return tmp + +} + +func (s *_pipelineMetadata) Type(type_ string) *_pipelineMetadata { + + s.v.Type = type_ + + return s +} + +func (s *_pipelineMetadata) Version(version string) *_pipelineMetadata { + + s.v.Version = version + + return s +} + +func (s *_pipelineMetadata) PipelineMetadataCaster() *types.PipelineMetadata { + return s.v +} diff --git a/typedapi/esdsl/pipelineprocessor.go b/typedapi/esdsl/pipelineprocessor.go new file mode 100644 index 0000000000..be19b971b6 --- /dev/null +++ b/typedapi/esdsl/pipelineprocessor.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _pipelineProcessor struct { + v *types.PipelineProcessor +} + +// Executes another pipeline. +func NewPipelineProcessor() *_pipelineProcessor { + + return &_pipelineProcessor{v: types.NewPipelineProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_pipelineProcessor) Description(description string) *_pipelineProcessor { + + s.v.Description = &description + + return s +} + +// Conditionally execute the processor. +func (s *_pipelineProcessor) If(if_ types.ScriptVariant) *_pipelineProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_pipelineProcessor) IgnoreFailure(ignorefailure bool) *_pipelineProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// Whether to ignore missing pipelines instead of failing. +func (s *_pipelineProcessor) IgnoreMissingPipeline(ignoremissingpipeline bool) *_pipelineProcessor { + + s.v.IgnoreMissingPipeline = &ignoremissingpipeline + + return s +} + +// The name of the pipeline to execute. +// Supports template snippets. +func (s *_pipelineProcessor) Name(name string) *_pipelineProcessor { + + s.v.Name = name + + return s +} + +// Handle failures for the processor. +func (s *_pipelineProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_pipelineProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_pipelineProcessor) Tag(tag string) *_pipelineProcessor { + + s.v.Tag = &tag + + return s +} + +func (s *_pipelineProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Pipeline = s.v + + return container +} + +func (s *_pipelineProcessor) PipelineProcessorCaster() *types.PipelineProcessor { + return s.v +} diff --git a/typedapi/esdsl/pipelinesettings.go b/typedapi/esdsl/pipelinesettings.go new file mode 100644 index 0000000000..fd160c6401 --- /dev/null +++ b/typedapi/esdsl/pipelinesettings.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _pipelineSettings struct { + v *types.PipelineSettings +} + +func NewPipelineSettings(pipelinebatchdelay int, pipelinebatchsize int, pipelineworkers int, queuecheckpointwrites int, queuemaxbytesnumber int, queuemaxbytesunits string, queuetype string) *_pipelineSettings { + + tmp := &_pipelineSettings{v: types.NewPipelineSettings()} + + tmp.PipelineBatchDelay(pipelinebatchdelay) + + tmp.PipelineBatchSize(pipelinebatchsize) + + tmp.PipelineWorkers(pipelineworkers) + + tmp.QueueCheckpointWrites(queuecheckpointwrites) + + tmp.QueueMaxBytesNumber(queuemaxbytesnumber) + + tmp.QueueMaxBytesUnits(queuemaxbytesunits) + + tmp.QueueType(queuetype) + + return tmp + +} + +// When creating pipeline event batches, how long in milliseconds to wait for +// each event before dispatching an undersized batch to pipeline workers. +func (s *_pipelineSettings) PipelineBatchDelay(pipelinebatchdelay int) *_pipelineSettings { + + s.v.PipelineBatchDelay = pipelinebatchdelay + + return s +} + +// The maximum number of events an individual worker thread will collect from +// inputs before attempting to execute its filters and outputs. +func (s *_pipelineSettings) PipelineBatchSize(pipelinebatchsize int) *_pipelineSettings { + + s.v.PipelineBatchSize = pipelinebatchsize + + return s +} + +// The number of workers that will, in parallel, execute the filter and output +// stages of the pipeline. +func (s *_pipelineSettings) PipelineWorkers(pipelineworkers int) *_pipelineSettings { + + s.v.PipelineWorkers = pipelineworkers + + return s +} + +// The maximum number of written events before forcing a checkpoint when +// persistent queues are enabled (`queue.type: persisted`). +func (s *_pipelineSettings) QueueCheckpointWrites(queuecheckpointwrites int) *_pipelineSettings { + + s.v.QueueCheckpointWrites = queuecheckpointwrites + + return s +} + +// The total capacity of the queue (`queue.type: persisted`) in number of bytes. +func (s *_pipelineSettings) QueueMaxBytesNumber(queuemaxbytesnumber int) *_pipelineSettings { + + s.v.QueueMaxBytesNumber = queuemaxbytesnumber + + return s +} + +// The total capacity of the queue (`queue.type: persisted`) in terms of units +// of bytes. +func (s *_pipelineSettings) QueueMaxBytesUnits(queuemaxbytesunits string) *_pipelineSettings { + + s.v.QueueMaxBytesUnits = queuemaxbytesunits + + return s +} + +// The internal queuing model to use for event buffering. +func (s *_pipelineSettings) QueueType(queuetype string) *_pipelineSettings { + + s.v.QueueType = queuetype + + return s +} + +func (s *_pipelineSettings) PipelineSettingsCaster() *types.PipelineSettings { + return s.v +} diff --git a/typedapi/esdsl/pipeseparatedflagssimplequerystringflag.go b/typedapi/esdsl/pipeseparatedflagssimplequerystringflag.go new file mode 100644 index 0000000000..405d962f2b --- /dev/null +++ b/typedapi/esdsl/pipeseparatedflagssimplequerystringflag.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/simplequerystringflag" +) + +// This is provide all the types that are part of the union. +type _pipeSeparatedFlagsSimpleQueryStringFlag struct { + v types.PipeSeparatedFlagsSimpleQueryStringFlag +} + +func NewPipeSeparatedFlagsSimpleQueryStringFlag() *_pipeSeparatedFlagsSimpleQueryStringFlag { + return &_pipeSeparatedFlagsSimpleQueryStringFlag{v: nil} +} + +func (u *_pipeSeparatedFlagsSimpleQueryStringFlag) SimpleQueryStringFlag(simplequerystringflag simplequerystringflag.SimpleQueryStringFlag) *_pipeSeparatedFlagsSimpleQueryStringFlag { + + u.v = &simplequerystringflag + return u +} + +func (u *_pipeSeparatedFlagsSimpleQueryStringFlag) String(string string) *_pipeSeparatedFlagsSimpleQueryStringFlag { + + u.v = &string + + return u +} + +func (u *_pipeSeparatedFlagsSimpleQueryStringFlag) PipeSeparatedFlagsSimpleQueryStringFlagCaster() *types.PipeSeparatedFlagsSimpleQueryStringFlag { + return &u.v +} diff --git a/typedapi/esdsl/pivot.go b/typedapi/esdsl/pivot.go new file mode 100644 index 0000000000..8ed0efd27b --- /dev/null +++ b/typedapi/esdsl/pivot.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _pivot struct { + v *types.Pivot +} + +func NewPivot() *_pivot { + + return &_pivot{v: types.NewPivot()} + +} + +// Defines how to aggregate the grouped data. The following aggregations are +// currently supported: average, bucket +// script, bucket selector, cardinality, filter, geo bounds, geo centroid, geo +// line, max, median absolute deviation, +// min, missing, percentiles, rare terms, scripted metric, stats, sum, terms, +// top metrics, value count, weighted +// average. +func (s *_pivot) Aggregations(aggregations map[string]types.Aggregations) *_pivot { + + s.v.Aggregations = aggregations + return s +} + +func (s *_pivot) AddAggregation(key string, value types.AggregationsVariant) *_pivot { + + var tmp map[string]types.Aggregations + if s.v.Aggregations == nil { + s.v.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = s.v.Aggregations + } + + tmp[key] = *value.AggregationsCaster() + + s.v.Aggregations = tmp + return s +} + +// Defines how to group the data. More than one grouping can be defined per +// pivot. The following groupings are +// currently supported: date histogram, geotile grid, histogram, terms. +func (s *_pivot) GroupBy(groupby map[string]types.PivotGroupByContainer) *_pivot { + + s.v.GroupBy = groupby + return s +} + +func (s *_pivot) AddGroupBy(key string, value types.PivotGroupByContainerVariant) *_pivot { + + var tmp map[string]types.PivotGroupByContainer + if s.v.GroupBy == nil { + s.v.GroupBy = make(map[string]types.PivotGroupByContainer) + } else { + tmp = s.v.GroupBy + } + + tmp[key] = *value.PivotGroupByContainerCaster() + + s.v.GroupBy = tmp + return s +} + +func (s *_pivot) PivotCaster() *types.Pivot { + return s.v +} diff --git a/typedapi/esdsl/pivotgroupbycontainer.go b/typedapi/esdsl/pivotgroupbycontainer.go new file mode 100644 index 0000000000..cc20a4e78a --- /dev/null +++ b/typedapi/esdsl/pivotgroupbycontainer.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _pivotGroupByContainer struct { + v *types.PivotGroupByContainer +} + +func NewPivotGroupByContainer() *_pivotGroupByContainer { + return &_pivotGroupByContainer{v: types.NewPivotGroupByContainer()} +} + +// AdditionalPivotGroupByContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_pivotGroupByContainer) AdditionalPivotGroupByContainerProperty(key string, value json.RawMessage) *_pivotGroupByContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalPivotGroupByContainerProperty = tmp + return s +} + +func (s *_pivotGroupByContainer) DateHistogram(datehistogram types.DateHistogramAggregationVariant) *_pivotGroupByContainer { + + s.v.DateHistogram = datehistogram.DateHistogramAggregationCaster() + + return s +} + +func (s *_pivotGroupByContainer) GeotileGrid(geotilegrid types.GeoTileGridAggregationVariant) *_pivotGroupByContainer { + + s.v.GeotileGrid = geotilegrid.GeoTileGridAggregationCaster() + + return s +} + +func (s *_pivotGroupByContainer) Histogram(histogram types.HistogramAggregationVariant) *_pivotGroupByContainer { + + s.v.Histogram = histogram.HistogramAggregationCaster() + + return s +} + +func (s *_pivotGroupByContainer) Terms(terms types.TermsAggregationVariant) *_pivotGroupByContainer { + + s.v.Terms = terms.TermsAggregationCaster() + + return s +} + +func (s *_pivotGroupByContainer) PivotGroupByContainerCaster() *types.PivotGroupByContainer { + return s.v +} diff --git a/typedapi/esdsl/pointintimereference.go b/typedapi/esdsl/pointintimereference.go new file mode 100644 index 0000000000..6a747685e0 --- /dev/null +++ b/typedapi/esdsl/pointintimereference.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _pointInTimeReference struct { + v *types.PointInTimeReference +} + +func NewPointInTimeReference() *_pointInTimeReference { + + return &_pointInTimeReference{v: types.NewPointInTimeReference()} + +} + +func (s *_pointInTimeReference) Id(id string) *_pointInTimeReference { + + s.v.Id = id + + return s +} + +func (s *_pointInTimeReference) KeepAlive(duration types.DurationVariant) *_pointInTimeReference { + + s.v.KeepAlive = *duration.DurationCaster() + + return s +} + +func (s *_pointInTimeReference) PointInTimeReferenceCaster() *types.PointInTimeReference { + return s.v +} diff --git a/typedapi/esdsl/pointproperty.go b/typedapi/esdsl/pointproperty.go new file mode 100644 index 0000000000..6c0abb37bc --- /dev/null +++ b/typedapi/esdsl/pointproperty.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _pointProperty struct { + v *types.PointProperty +} + +func NewPointProperty() *_pointProperty { + + return &_pointProperty{v: types.NewPointProperty()} + +} + +func (s *_pointProperty) CopyTo(fields ...string) *_pointProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_pointProperty) DocValues(docvalues bool) *_pointProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_pointProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_pointProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_pointProperty) Fields(fields map[string]types.Property) *_pointProperty { + + s.v.Fields = fields + return s +} + +func (s *_pointProperty) AddField(key string, value types.PropertyVariant) *_pointProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_pointProperty) IgnoreAbove(ignoreabove int) *_pointProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_pointProperty) IgnoreMalformed(ignoremalformed bool) *_pointProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_pointProperty) IgnoreZValue(ignorezvalue bool) *_pointProperty { + + s.v.IgnoreZValue = &ignorezvalue + + return s +} + +// Metadata about the field. +func (s *_pointProperty) Meta(meta map[string]string) *_pointProperty { + + s.v.Meta = meta + return s +} + +func (s *_pointProperty) AddMeta(key string, value string) *_pointProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_pointProperty) NullValue(nullvalue string) *_pointProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_pointProperty) Properties(properties map[string]types.Property) *_pointProperty { + + s.v.Properties = properties + return s +} + +func (s *_pointProperty) AddProperty(key string, value types.PropertyVariant) *_pointProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_pointProperty) Store(store bool) *_pointProperty { + + s.v.Store = &store + + return s +} + +func (s *_pointProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_pointProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_pointProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_pointProperty) PointPropertyCaster() *types.PointProperty { + return s.v +} diff --git a/typedapi/esdsl/porterstemtokenfilter.go b/typedapi/esdsl/porterstemtokenfilter.go new file mode 100644 index 0000000000..ca6ff00855 --- /dev/null +++ b/typedapi/esdsl/porterstemtokenfilter.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _porterStemTokenFilter struct { + v *types.PorterStemTokenFilter +} + +func NewPorterStemTokenFilter() *_porterStemTokenFilter { + + return &_porterStemTokenFilter{v: types.NewPorterStemTokenFilter()} + +} + +func (s *_porterStemTokenFilter) Version(versionstring string) *_porterStemTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_porterStemTokenFilter) PorterStemTokenFilterCaster() *types.PorterStemTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/portugueseanalyzer.go b/typedapi/esdsl/portugueseanalyzer.go new file mode 100644 index 0000000000..2ea006cea5 --- /dev/null +++ b/typedapi/esdsl/portugueseanalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _portugueseAnalyzer struct { + v *types.PortugueseAnalyzer +} + +func NewPortugueseAnalyzer() *_portugueseAnalyzer { + + return &_portugueseAnalyzer{v: types.NewPortugueseAnalyzer()} + +} + +func (s *_portugueseAnalyzer) StemExclusion(stemexclusions ...string) *_portugueseAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_portugueseAnalyzer) Stopwords(stopwords ...string) *_portugueseAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_portugueseAnalyzer) StopwordsPath(stopwordspath string) *_portugueseAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_portugueseAnalyzer) PortugueseAnalyzerCaster() *types.PortugueseAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/predicatetokenfilter.go b/typedapi/esdsl/predicatetokenfilter.go new file mode 100644 index 0000000000..d6fd66cd6d --- /dev/null +++ b/typedapi/esdsl/predicatetokenfilter.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _predicateTokenFilter struct { + v *types.PredicateTokenFilter +} + +func NewPredicateTokenFilter(script types.ScriptVariant) *_predicateTokenFilter { + + tmp := &_predicateTokenFilter{v: types.NewPredicateTokenFilter()} + + tmp.Script(script) + + return tmp + +} + +func (s *_predicateTokenFilter) Script(script types.ScriptVariant) *_predicateTokenFilter { + + s.v.Script = *script.ScriptCaster() + + return s +} + +func (s *_predicateTokenFilter) Version(versionstring string) *_predicateTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_predicateTokenFilter) PredicateTokenFilterCaster() *types.PredicateTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/prefixquery.go b/typedapi/esdsl/prefixquery.go new file mode 100644 index 0000000000..7e334bc43d --- /dev/null +++ b/typedapi/esdsl/prefixquery.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _prefixQuery struct { + k string + v *types.PrefixQuery +} + +// Returns users that contain a specific prefix in a provided field. +func NewPrefixQuery(field string, value string) *_prefixQuery { + tmp := &_prefixQuery{ + k: field, + v: types.NewPrefixQuery(), + } + + tmp.Value(value) + return tmp +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_prefixQuery) Boost(boost float32) *_prefixQuery { + + s.v.Boost = &boost + + return s +} + +// Allows ASCII case insensitive matching of the value with the indexed field +// values when set to `true`. +// Default is `false` which means the case sensitivity of matching depends on +// the underlying field’s mapping. +func (s *_prefixQuery) CaseInsensitive(caseinsensitive bool) *_prefixQuery { + + s.v.CaseInsensitive = &caseinsensitive + + return s +} + +func (s *_prefixQuery) QueryName_(queryname_ string) *_prefixQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Method used to rewrite the query. +func (s *_prefixQuery) Rewrite(multitermqueryrewrite string) *_prefixQuery { + + s.v.Rewrite = &multitermqueryrewrite + + return s +} + +// Beginning characters of terms you wish to find in the provided field. +func (s *_prefixQuery) Value(value string) *_prefixQuery { + + s.v.Value = value + + return s +} + +func (s *_prefixQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.Prefix = map[string]types.PrefixQuery{ + s.k: *s.v, + } + return container +} + +func (s *_prefixQuery) ApiKeyQueryContainerCaster() *types.ApiKeyQueryContainer { + container := types.NewApiKeyQueryContainer() + container.Prefix = map[string]types.PrefixQuery{ + s.k: *s.v, + } + return container +} + +func (s *_prefixQuery) RoleQueryContainerCaster() *types.RoleQueryContainer { + container := types.NewRoleQueryContainer() + container.Prefix = map[string]types.PrefixQuery{ + s.k: *s.v, + } + return container +} + +func (s *_prefixQuery) UserQueryContainerCaster() *types.UserQueryContainer { + container := types.NewUserQueryContainer() + container.Prefix = map[string]types.PrefixQuery{ + s.k: *s.v, + } + return container +} + +// NewSinglePrefixQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSinglePrefixQuery() *_prefixQuery { + return &_prefixQuery{ + k: "", + v: types.NewPrefixQuery(), + } +} + +func (s *_prefixQuery) PrefixQueryCaster() *types.PrefixQuery { + return s.v.PrefixQueryCaster() +} diff --git a/typedapi/esdsl/preprocessor.go b/typedapi/esdsl/preprocessor.go new file mode 100644 index 0000000000..1a24ff987f --- /dev/null +++ b/typedapi/esdsl/preprocessor.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _preprocessor struct { + v *types.Preprocessor +} + +func NewPreprocessor() *_preprocessor { + return &_preprocessor{v: types.NewPreprocessor()} +} + +// AdditionalPreprocessorProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_preprocessor) AdditionalPreprocessorProperty(key string, value json.RawMessage) *_preprocessor { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalPreprocessorProperty = tmp + return s +} + +func (s *_preprocessor) FrequencyEncoding(frequencyencoding types.FrequencyEncodingPreprocessorVariant) *_preprocessor { + + s.v.FrequencyEncoding = frequencyencoding.FrequencyEncodingPreprocessorCaster() + + return s +} + +func (s *_preprocessor) OneHotEncoding(onehotencoding types.OneHotEncodingPreprocessorVariant) *_preprocessor { + + s.v.OneHotEncoding = onehotencoding.OneHotEncodingPreprocessorCaster() + + return s +} + +func (s *_preprocessor) TargetMeanEncoding(targetmeanencoding types.TargetMeanEncodingPreprocessorVariant) *_preprocessor { + + s.v.TargetMeanEncoding = targetmeanencoding.TargetMeanEncodingPreprocessorCaster() + + return s +} + +func (s *_preprocessor) PreprocessorCaster() *types.Preprocessor { + return s.v +} diff --git a/typedapi/esdsl/privilegesactions.go b/typedapi/esdsl/privilegesactions.go new file mode 100644 index 0000000000..721cbdcbe3 --- /dev/null +++ b/typedapi/esdsl/privilegesactions.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _privilegesActions struct { + v *types.PrivilegesActions +} + +func NewPrivilegesActions() *_privilegesActions { + + return &_privilegesActions{v: types.NewPrivilegesActions()} + +} + +func (s *_privilegesActions) Actions(actions ...string) *_privilegesActions { + + for _, v := range actions { + + s.v.Actions = append(s.v.Actions, v) + + } + return s +} + +func (s *_privilegesActions) Application(application string) *_privilegesActions { + + s.v.Application = &application + + return s +} + +func (s *_privilegesActions) Metadata(metadata types.MetadataVariant) *_privilegesActions { + + s.v.Metadata = *metadata.MetadataCaster() + + return s +} + +func (s *_privilegesActions) Name(name string) *_privilegesActions { + + s.v.Name = &name + + return s +} + +func (s *_privilegesActions) PrivilegesActionsCaster() *types.PrivilegesActions { + return s.v +} diff --git a/typedapi/esdsl/privilegescheck.go b/typedapi/esdsl/privilegescheck.go new file mode 100644 index 0000000000..6dbc64b099 --- /dev/null +++ b/typedapi/esdsl/privilegescheck.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterprivilege" +) + +type _privilegesCheck struct { + v *types.PrivilegesCheck +} + +func NewPrivilegesCheck() *_privilegesCheck { + + return &_privilegesCheck{v: types.NewPrivilegesCheck()} + +} + +func (s *_privilegesCheck) Application(applications ...types.ApplicationPrivilegesCheckVariant) *_privilegesCheck { + + for _, v := range applications { + + s.v.Application = append(s.v.Application, *v.ApplicationPrivilegesCheckCaster()) + + } + return s +} + +// A list of the cluster privileges that you want to check. +func (s *_privilegesCheck) Cluster(clusters ...clusterprivilege.ClusterPrivilege) *_privilegesCheck { + + for _, v := range clusters { + + s.v.Cluster = append(s.v.Cluster, v) + + } + return s +} + +func (s *_privilegesCheck) Index(indices ...types.IndexPrivilegesCheckVariant) *_privilegesCheck { + + for _, v := range indices { + + s.v.Index = append(s.v.Index, *v.IndexPrivilegesCheckCaster()) + + } + return s +} + +func (s *_privilegesCheck) PrivilegesCheckCaster() *types.PrivilegesCheck { + return s.v +} diff --git a/typedapi/esdsl/processorcontainer.go b/typedapi/esdsl/processorcontainer.go new file mode 100644 index 0000000000..cd3b9404ac --- /dev/null +++ b/typedapi/esdsl/processorcontainer.go @@ -0,0 +1,506 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _processorContainer struct { + v *types.ProcessorContainer +} + +func NewProcessorContainer() *_processorContainer { + return &_processorContainer{v: types.NewProcessorContainer()} +} + +// AdditionalProcessorContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_processorContainer) AdditionalProcessorContainerProperty(key string, value json.RawMessage) *_processorContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalProcessorContainerProperty = tmp + return s +} + +// Appends one or more values to an existing array if the field already exists +// and it is an array. +// Converts a scalar to an array and appends one or more values to it if the +// field exists and it is a scalar. +// Creates an array containing the provided values if the field doesn’t exist. +// Accepts a single value or an array of values. +func (s *_processorContainer) Append(append types.AppendProcessorVariant) *_processorContainer { + + s.v.Append = append.AppendProcessorCaster() + + return s +} + +// The attachment processor lets Elasticsearch extract file attachments in +// common formats (such as PPT, XLS, and PDF) by using the Apache text +// extraction library Tika. +func (s *_processorContainer) Attachment(attachment types.AttachmentProcessorVariant) *_processorContainer { + + s.v.Attachment = attachment.AttachmentProcessorCaster() + + return s +} + +// Converts a human readable byte value (for example `1kb`) to its value in +// bytes (for example `1024`). +// If the field is an array of strings, all members of the array will be +// converted. +// Supported human readable units are "b", "kb", "mb", "gb", "tb", "pb" case +// insensitive. +// An error will occur if the field is not a supported format or resultant value +// exceeds 2^63. +func (s *_processorContainer) Bytes(bytes types.BytesProcessorVariant) *_processorContainer { + + s.v.Bytes = bytes.BytesProcessorCaster() + + return s +} + +// Converts circle definitions of shapes to regular polygons which approximate +// them. +func (s *_processorContainer) Circle(circle types.CircleProcessorVariant) *_processorContainer { + + s.v.Circle = circle.CircleProcessorCaster() + + return s +} + +// Computes the Community ID for network flow data as defined in the +// Community ID Specification. You can use a community ID to correlate network +// events related to a single flow. +func (s *_processorContainer) CommunityId(communityid types.CommunityIDProcessorVariant) *_processorContainer { + + s.v.CommunityId = communityid.CommunityIDProcessorCaster() + + return s +} + +// Converts a field in the currently ingested document to a different type, such +// as converting a string to an integer. +// If the field value is an array, all members will be converted. +func (s *_processorContainer) Convert(convert types.ConvertProcessorVariant) *_processorContainer { + + s.v.Convert = convert.ConvertProcessorCaster() + + return s +} + +// Extracts fields from CSV line out of a single text field within a document. +// Any empty field in CSV will be skipped. +func (s *_processorContainer) Csv(csv types.CsvProcessorVariant) *_processorContainer { + + s.v.Csv = csv.CsvProcessorCaster() + + return s +} + +// Parses dates from fields, and then uses the date or timestamp as the +// timestamp for the document. +func (s *_processorContainer) Date(date types.DateProcessorVariant) *_processorContainer { + + s.v.Date = date.DateProcessorCaster() + + return s +} + +// The purpose of this processor is to point documents to the right time based +// index based on a date or timestamp field in a document by using the date math +// index name support. +func (s *_processorContainer) DateIndexName(dateindexname types.DateIndexNameProcessorVariant) *_processorContainer { + + s.v.DateIndexName = dateindexname.DateIndexNameProcessorCaster() + + return s +} + +// Extracts structured fields out of a single text field by matching the text +// field against a delimiter-based pattern. +func (s *_processorContainer) Dissect(dissect types.DissectProcessorVariant) *_processorContainer { + + s.v.Dissect = dissect.DissectProcessorCaster() + + return s +} + +// Expands a field with dots into an object field. +// This processor allows fields with dots in the name to be accessible by other +// processors in the pipeline. +// Otherwise these fields can’t be accessed by any processor. +func (s *_processorContainer) DotExpander(dotexpander types.DotExpanderProcessorVariant) *_processorContainer { + + s.v.DotExpander = dotexpander.DotExpanderProcessorCaster() + + return s +} + +// Drops the document without raising any errors. +// This is useful to prevent the document from getting indexed based on some +// condition. +func (s *_processorContainer) Drop(drop types.DropProcessorVariant) *_processorContainer { + + s.v.Drop = drop.DropProcessorCaster() + + return s +} + +// The `enrich` processor can enrich documents with data from another index. +func (s *_processorContainer) Enrich(enrich types.EnrichProcessorVariant) *_processorContainer { + + s.v.Enrich = enrich.EnrichProcessorCaster() + + return s +} + +// Raises an exception. +// This is useful for when you expect a pipeline to fail and want to relay a +// specific message to the requester. +func (s *_processorContainer) Fail(fail types.FailProcessorVariant) *_processorContainer { + + s.v.Fail = fail.FailProcessorCaster() + + return s +} + +// Computes a hash of the document’s content. You can use this hash for +// content fingerprinting. +func (s *_processorContainer) Fingerprint(fingerprint types.FingerprintProcessorVariant) *_processorContainer { + + s.v.Fingerprint = fingerprint.FingerprintProcessorCaster() + + return s +} + +// Runs an ingest processor on each element of an array or object. +func (s *_processorContainer) Foreach(foreach types.ForeachProcessorVariant) *_processorContainer { + + s.v.Foreach = foreach.ForeachProcessorCaster() + + return s +} + +// Converts geo-grid definitions of grid tiles or cells to regular bounding +// boxes or polygons which describe their shape. +// This is useful if there is a need to interact with the tile shapes as +// spatially indexable fields. +func (s *_processorContainer) GeoGrid(geogrid types.GeoGridProcessorVariant) *_processorContainer { + + s.v.GeoGrid = geogrid.GeoGridProcessorCaster() + + return s +} + +// The `geoip` processor adds information about the geographical location of an +// IPv4 or IPv6 address. +func (s *_processorContainer) Geoip(geoip types.GeoIpProcessorVariant) *_processorContainer { + + s.v.Geoip = geoip.GeoIpProcessorCaster() + + return s +} + +// Extracts structured fields out of a single text field within a document. +// You choose which field to extract matched fields from, as well as the grok +// pattern you expect will match. +// A grok pattern is like a regular expression that supports aliased expressions +// that can be reused. +func (s *_processorContainer) Grok(grok types.GrokProcessorVariant) *_processorContainer { + + s.v.Grok = grok.GrokProcessorCaster() + + return s +} + +// Converts a string field by applying a regular expression and a replacement. +// If the field is an array of string, all members of the array will be +// converted. +// If any non-string values are encountered, the processor will throw an +// exception. +func (s *_processorContainer) Gsub(gsub types.GsubProcessorVariant) *_processorContainer { + + s.v.Gsub = gsub.GsubProcessorCaster() + + return s +} + +// Removes HTML tags from the field. +// If the field is an array of strings, HTML tags will be removed from all +// members of the array. +func (s *_processorContainer) HtmlStrip(htmlstrip types.HtmlStripProcessorVariant) *_processorContainer { + + s.v.HtmlStrip = htmlstrip.HtmlStripProcessorCaster() + + return s +} + +// Uses a pre-trained data frame analytics model or a model deployed for natural +// language processing tasks to infer against the data that is being ingested in +// the pipeline. +func (s *_processorContainer) Inference(inference types.InferenceProcessorVariant) *_processorContainer { + + s.v.Inference = inference.InferenceProcessorCaster() + + return s +} + +// Currently an undocumented alias for GeoIP Processor. +func (s *_processorContainer) IpLocation(iplocation types.IpLocationProcessorVariant) *_processorContainer { + + s.v.IpLocation = iplocation.IpLocationProcessorCaster() + + return s +} + +// Joins each element of an array into a single string using a separator +// character between each element. +// Throws an error when the field is not an array. +func (s *_processorContainer) Join(join types.JoinProcessorVariant) *_processorContainer { + + s.v.Join = join.JoinProcessorCaster() + + return s +} + +// Converts a JSON string into a structured JSON object. +func (s *_processorContainer) Json(json types.JsonProcessorVariant) *_processorContainer { + + s.v.Json = json.JsonProcessorCaster() + + return s +} + +// This processor helps automatically parse messages (or specific event fields) +// which are of the `foo=bar` variety. +func (s *_processorContainer) Kv(kv types.KeyValueProcessorVariant) *_processorContainer { + + s.v.Kv = kv.KeyValueProcessorCaster() + + return s +} + +// Converts a string to its lowercase equivalent. +// If the field is an array of strings, all members of the array will be +// converted. +func (s *_processorContainer) Lowercase(lowercase types.LowercaseProcessorVariant) *_processorContainer { + + s.v.Lowercase = lowercase.LowercaseProcessorCaster() + + return s +} + +// Calculates the network direction given a source IP address, destination IP +// address, and a list of internal networks. +func (s *_processorContainer) NetworkDirection(networkdirection types.NetworkDirectionProcessorVariant) *_processorContainer { + + s.v.NetworkDirection = networkdirection.NetworkDirectionProcessorCaster() + + return s +} + +// Executes another pipeline. +func (s *_processorContainer) Pipeline(pipeline types.PipelineProcessorVariant) *_processorContainer { + + s.v.Pipeline = pipeline.PipelineProcessorCaster() + + return s +} + +// The Redact processor uses the Grok rules engine to obscure text in the input +// document matching the given Grok patterns. +// The processor can be used to obscure Personal Identifying Information (PII) +// by configuring it to detect known patterns such as email or IP addresses. +// Text that matches a Grok pattern is replaced with a configurable string such +// as `` where an email address is matched or simply replace all matches +// with the text `` if preferred. +func (s *_processorContainer) Redact(redact types.RedactProcessorVariant) *_processorContainer { + + s.v.Redact = redact.RedactProcessorCaster() + + return s +} + +// Extracts the registered domain (also known as the effective top-level +// domain or eTLD), sub-domain, and top-level domain from a fully qualified +// domain name (FQDN). Uses the registered domains defined in the Mozilla +// Public Suffix List. +func (s *_processorContainer) RegisteredDomain(registereddomain types.RegisteredDomainProcessorVariant) *_processorContainer { + + s.v.RegisteredDomain = registereddomain.RegisteredDomainProcessorCaster() + + return s +} + +// Removes existing fields. +// If one field doesn’t exist, an exception will be thrown. +func (s *_processorContainer) Remove(remove types.RemoveProcessorVariant) *_processorContainer { + + s.v.Remove = remove.RemoveProcessorCaster() + + return s +} + +// Renames an existing field. +// If the field doesn’t exist or the new name is already used, an exception will +// be thrown. +func (s *_processorContainer) Rename(rename types.RenameProcessorVariant) *_processorContainer { + + s.v.Rename = rename.RenameProcessorCaster() + + return s +} + +// Routes a document to another target index or data stream. +// When setting the `destination` option, the target is explicitly specified and +// the dataset and namespace options can’t be set. +// When the `destination` option is not set, this processor is in a data stream +// mode. Note that in this mode, the reroute processor can only be used on data +// streams that follow the data stream naming scheme. +func (s *_processorContainer) Reroute(reroute types.RerouteProcessorVariant) *_processorContainer { + + s.v.Reroute = reroute.RerouteProcessorCaster() + + return s +} + +// Runs an inline or stored script on incoming documents. +// The script runs in the `ingest` context. +func (s *_processorContainer) Script(script types.ScriptProcessorVariant) *_processorContainer { + + s.v.Script = script.ScriptProcessorCaster() + + return s +} + +// Adds a field with the specified value. +// If the field already exists, its value will be replaced with the provided +// one. +func (s *_processorContainer) Set(set types.SetProcessorVariant) *_processorContainer { + + s.v.Set = set.SetProcessorCaster() + + return s +} + +// Sets user-related details (such as `username`, `roles`, `email`, `full_name`, +// `metadata`, `api_key`, `realm` and `authentication_type`) from the current +// authenticated user to the current document by pre-processing the ingest. +func (s *_processorContainer) SetSecurityUser(setsecurityuser types.SetSecurityUserProcessorVariant) *_processorContainer { + + s.v.SetSecurityUser = setsecurityuser.SetSecurityUserProcessorCaster() + + return s +} + +// Sorts the elements of an array ascending or descending. +// Homogeneous arrays of numbers will be sorted numerically, while arrays of +// strings or heterogeneous arrays of strings + numbers will be sorted +// lexicographically. +// Throws an error when the field is not an array. +func (s *_processorContainer) Sort(sort types.SortProcessorVariant) *_processorContainer { + + s.v.Sort = sort.SortProcessorCaster() + + return s +} + +// Splits a field into an array using a separator character. +// Only works on string fields. +func (s *_processorContainer) Split(split types.SplitProcessorVariant) *_processorContainer { + + s.v.Split = split.SplitProcessorCaster() + + return s +} + +// Terminates the current ingest pipeline, causing no further processors to be +// run. +// This will normally be executed conditionally, using the `if` option. +func (s *_processorContainer) Terminate(terminate types.TerminateProcessorVariant) *_processorContainer { + + s.v.Terminate = terminate.TerminateProcessorCaster() + + return s +} + +// Trims whitespace from a field. +// If the field is an array of strings, all members of the array will be +// trimmed. +// This only works on leading and trailing whitespace. +func (s *_processorContainer) Trim(trim types.TrimProcessorVariant) *_processorContainer { + + s.v.Trim = trim.TrimProcessorCaster() + + return s +} + +// Converts a string to its uppercase equivalent. +// If the field is an array of strings, all members of the array will be +// converted. +func (s *_processorContainer) Uppercase(uppercase types.UppercaseProcessorVariant) *_processorContainer { + + s.v.Uppercase = uppercase.UppercaseProcessorCaster() + + return s +} + +// Parses a Uniform Resource Identifier (URI) string and extracts its components +// as an object. +// This URI object includes properties for the URI’s domain, path, fragment, +// port, query, scheme, user info, username, and password. +func (s *_processorContainer) UriParts(uriparts types.UriPartsProcessorVariant) *_processorContainer { + + s.v.UriParts = uriparts.UriPartsProcessorCaster() + + return s +} + +// URL-decodes a string. +// If the field is an array of strings, all members of the array will be +// decoded. +func (s *_processorContainer) Urldecode(urldecode types.UrlDecodeProcessorVariant) *_processorContainer { + + s.v.Urldecode = urldecode.UrlDecodeProcessorCaster() + + return s +} + +// The `user_agent` processor extracts details from the user agent string a +// browser sends with its web requests. +// This processor adds this information by default under the `user_agent` field. +func (s *_processorContainer) UserAgent(useragent types.UserAgentProcessorVariant) *_processorContainer { + + s.v.UserAgent = useragent.UserAgentProcessorCaster() + + return s +} + +func (s *_processorContainer) ProcessorContainerCaster() *types.ProcessorContainer { + return s.v +} diff --git a/typedapi/esdsl/property.go b/typedapi/esdsl/property.go new file mode 100644 index 0000000000..18a2280d08 --- /dev/null +++ b/typedapi/esdsl/property.go @@ -0,0 +1,699 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _property struct { + v types.Property +} + +func NewProperty() *_property { + return &_property{v: nil} +} + +func (u *_property) BinaryProperty(binaryproperty types.BinaryPropertyVariant) *_property { + + u.v = &binaryproperty + + return u +} + +// Interface implementation for BinaryProperty in Property union +func (u *_binaryProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) BooleanProperty(booleanproperty types.BooleanPropertyVariant) *_property { + + u.v = &booleanproperty + + return u +} + +// Interface implementation for BooleanProperty in Property union +func (u *_booleanProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) DynamicProperty(dynamicproperty types.DynamicPropertyVariant) *_property { + + u.v = &dynamicproperty + + return u +} + +// Interface implementation for DynamicProperty in Property union +func (u *_dynamicProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) JoinProperty(joinproperty types.JoinPropertyVariant) *_property { + + u.v = &joinproperty + + return u +} + +// Interface implementation for JoinProperty in Property union +func (u *_joinProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) KeywordProperty(keywordproperty types.KeywordPropertyVariant) *_property { + + u.v = &keywordproperty + + return u +} + +// Interface implementation for KeywordProperty in Property union +func (u *_keywordProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) MatchOnlyTextProperty(matchonlytextproperty types.MatchOnlyTextPropertyVariant) *_property { + + u.v = &matchonlytextproperty + + return u +} + +// Interface implementation for MatchOnlyTextProperty in Property union +func (u *_matchOnlyTextProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) PercolatorProperty(percolatorproperty types.PercolatorPropertyVariant) *_property { + + u.v = &percolatorproperty + + return u +} + +// Interface implementation for PercolatorProperty in Property union +func (u *_percolatorProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) RankFeatureProperty(rankfeatureproperty types.RankFeaturePropertyVariant) *_property { + + u.v = &rankfeatureproperty + + return u +} + +// Interface implementation for RankFeatureProperty in Property union +func (u *_rankFeatureProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) RankFeaturesProperty(rankfeaturesproperty types.RankFeaturesPropertyVariant) *_property { + + u.v = &rankfeaturesproperty + + return u +} + +// Interface implementation for RankFeaturesProperty in Property union +func (u *_rankFeaturesProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) SearchAsYouTypeProperty(searchasyoutypeproperty types.SearchAsYouTypePropertyVariant) *_property { + + u.v = &searchasyoutypeproperty + + return u +} + +// Interface implementation for SearchAsYouTypeProperty in Property union +func (u *_searchAsYouTypeProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) TextProperty(textproperty types.TextPropertyVariant) *_property { + + u.v = &textproperty + + return u +} + +// Interface implementation for TextProperty in Property union +func (u *_textProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) VersionProperty(versionproperty types.VersionPropertyVariant) *_property { + + u.v = &versionproperty + + return u +} + +// Interface implementation for VersionProperty in Property union +func (u *_versionProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) WildcardProperty(wildcardproperty types.WildcardPropertyVariant) *_property { + + u.v = &wildcardproperty + + return u +} + +// Interface implementation for WildcardProperty in Property union +func (u *_wildcardProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) DateNanosProperty(datenanosproperty types.DateNanosPropertyVariant) *_property { + + u.v = &datenanosproperty + + return u +} + +// Interface implementation for DateNanosProperty in Property union +func (u *_dateNanosProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) DateProperty(dateproperty types.DatePropertyVariant) *_property { + + u.v = &dateproperty + + return u +} + +// Interface implementation for DateProperty in Property union +func (u *_dateProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) AggregateMetricDoubleProperty(aggregatemetricdoubleproperty types.AggregateMetricDoublePropertyVariant) *_property { + + u.v = &aggregatemetricdoubleproperty + + return u +} + +// Interface implementation for AggregateMetricDoubleProperty in Property union +func (u *_aggregateMetricDoubleProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) DenseVectorProperty(densevectorproperty types.DenseVectorPropertyVariant) *_property { + + u.v = &densevectorproperty + + return u +} + +// Interface implementation for DenseVectorProperty in Property union +func (u *_denseVectorProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) FlattenedProperty(flattenedproperty types.FlattenedPropertyVariant) *_property { + + u.v = &flattenedproperty + + return u +} + +// Interface implementation for FlattenedProperty in Property union +func (u *_flattenedProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) NestedProperty(nestedproperty types.NestedPropertyVariant) *_property { + + u.v = &nestedproperty + + return u +} + +// Interface implementation for NestedProperty in Property union +func (u *_nestedProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) ObjectProperty(objectproperty types.ObjectPropertyVariant) *_property { + + u.v = &objectproperty + + return u +} + +// Interface implementation for ObjectProperty in Property union +func (u *_objectProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) PassthroughObjectProperty(passthroughobjectproperty types.PassthroughObjectPropertyVariant) *_property { + + u.v = &passthroughobjectproperty + + return u +} + +// Interface implementation for PassthroughObjectProperty in Property union +func (u *_passthroughObjectProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) SemanticTextProperty(semantictextproperty types.SemanticTextPropertyVariant) *_property { + + u.v = &semantictextproperty + + return u +} + +// Interface implementation for SemanticTextProperty in Property union +func (u *_semanticTextProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) SparseVectorProperty(sparsevectorproperty types.SparseVectorPropertyVariant) *_property { + + u.v = &sparsevectorproperty + + return u +} + +// Interface implementation for SparseVectorProperty in Property union +func (u *_sparseVectorProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) CompletionProperty(completionproperty types.CompletionPropertyVariant) *_property { + + u.v = &completionproperty + + return u +} + +// Interface implementation for CompletionProperty in Property union +func (u *_completionProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) ConstantKeywordProperty(constantkeywordproperty types.ConstantKeywordPropertyVariant) *_property { + + u.v = &constantkeywordproperty + + return u +} + +// Interface implementation for ConstantKeywordProperty in Property union +func (u *_constantKeywordProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) CountedKeywordProperty(countedkeywordproperty types.CountedKeywordPropertyVariant) *_property { + + u.v = &countedkeywordproperty + + return u +} + +// Interface implementation for CountedKeywordProperty in Property union +func (u *_countedKeywordProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) FieldAliasProperty(fieldaliasproperty types.FieldAliasPropertyVariant) *_property { + + u.v = &fieldaliasproperty + + return u +} + +// Interface implementation for FieldAliasProperty in Property union +func (u *_fieldAliasProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) HistogramProperty(histogramproperty types.HistogramPropertyVariant) *_property { + + u.v = &histogramproperty + + return u +} + +// Interface implementation for HistogramProperty in Property union +func (u *_histogramProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) IpProperty(ipproperty types.IpPropertyVariant) *_property { + + u.v = &ipproperty + + return u +} + +// Interface implementation for IpProperty in Property union +func (u *_ipProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) Murmur3HashProperty(murmur3hashproperty types.Murmur3HashPropertyVariant) *_property { + + u.v = &murmur3hashproperty + + return u +} + +// Interface implementation for Murmur3HashProperty in Property union +func (u *_murmur3HashProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) TokenCountProperty(tokencountproperty types.TokenCountPropertyVariant) *_property { + + u.v = &tokencountproperty + + return u +} + +// Interface implementation for TokenCountProperty in Property union +func (u *_tokenCountProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) GeoPointProperty(geopointproperty types.GeoPointPropertyVariant) *_property { + + u.v = &geopointproperty + + return u +} + +// Interface implementation for GeoPointProperty in Property union +func (u *_geoPointProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) GeoShapeProperty(geoshapeproperty types.GeoShapePropertyVariant) *_property { + + u.v = &geoshapeproperty + + return u +} + +// Interface implementation for GeoShapeProperty in Property union +func (u *_geoShapeProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) PointProperty(pointproperty types.PointPropertyVariant) *_property { + + u.v = &pointproperty + + return u +} + +// Interface implementation for PointProperty in Property union +func (u *_pointProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) ShapeProperty(shapeproperty types.ShapePropertyVariant) *_property { + + u.v = &shapeproperty + + return u +} + +// Interface implementation for ShapeProperty in Property union +func (u *_shapeProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) ByteNumberProperty(bytenumberproperty types.ByteNumberPropertyVariant) *_property { + + u.v = &bytenumberproperty + + return u +} + +// Interface implementation for ByteNumberProperty in Property union +func (u *_byteNumberProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) DoubleNumberProperty(doublenumberproperty types.DoubleNumberPropertyVariant) *_property { + + u.v = &doublenumberproperty + + return u +} + +// Interface implementation for DoubleNumberProperty in Property union +func (u *_doubleNumberProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) FloatNumberProperty(floatnumberproperty types.FloatNumberPropertyVariant) *_property { + + u.v = &floatnumberproperty + + return u +} + +// Interface implementation for FloatNumberProperty in Property union +func (u *_floatNumberProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) HalfFloatNumberProperty(halffloatnumberproperty types.HalfFloatNumberPropertyVariant) *_property { + + u.v = &halffloatnumberproperty + + return u +} + +// Interface implementation for HalfFloatNumberProperty in Property union +func (u *_halfFloatNumberProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) IntegerNumberProperty(integernumberproperty types.IntegerNumberPropertyVariant) *_property { + + u.v = &integernumberproperty + + return u +} + +// Interface implementation for IntegerNumberProperty in Property union +func (u *_integerNumberProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) LongNumberProperty(longnumberproperty types.LongNumberPropertyVariant) *_property { + + u.v = &longnumberproperty + + return u +} + +// Interface implementation for LongNumberProperty in Property union +func (u *_longNumberProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) ScaledFloatNumberProperty(scaledfloatnumberproperty types.ScaledFloatNumberPropertyVariant) *_property { + + u.v = &scaledfloatnumberproperty + + return u +} + +// Interface implementation for ScaledFloatNumberProperty in Property union +func (u *_scaledFloatNumberProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) ShortNumberProperty(shortnumberproperty types.ShortNumberPropertyVariant) *_property { + + u.v = &shortnumberproperty + + return u +} + +// Interface implementation for ShortNumberProperty in Property union +func (u *_shortNumberProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) UnsignedLongNumberProperty(unsignedlongnumberproperty types.UnsignedLongNumberPropertyVariant) *_property { + + u.v = &unsignedlongnumberproperty + + return u +} + +// Interface implementation for UnsignedLongNumberProperty in Property union +func (u *_unsignedLongNumberProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) DateRangeProperty(daterangeproperty types.DateRangePropertyVariant) *_property { + + u.v = &daterangeproperty + + return u +} + +// Interface implementation for DateRangeProperty in Property union +func (u *_dateRangeProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) DoubleRangeProperty(doublerangeproperty types.DoubleRangePropertyVariant) *_property { + + u.v = &doublerangeproperty + + return u +} + +// Interface implementation for DoubleRangeProperty in Property union +func (u *_doubleRangeProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) FloatRangeProperty(floatrangeproperty types.FloatRangePropertyVariant) *_property { + + u.v = &floatrangeproperty + + return u +} + +// Interface implementation for FloatRangeProperty in Property union +func (u *_floatRangeProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) IntegerRangeProperty(integerrangeproperty types.IntegerRangePropertyVariant) *_property { + + u.v = &integerrangeproperty + + return u +} + +// Interface implementation for IntegerRangeProperty in Property union +func (u *_integerRangeProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) IpRangeProperty(iprangeproperty types.IpRangePropertyVariant) *_property { + + u.v = &iprangeproperty + + return u +} + +// Interface implementation for IpRangeProperty in Property union +func (u *_ipRangeProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) LongRangeProperty(longrangeproperty types.LongRangePropertyVariant) *_property { + + u.v = &longrangeproperty + + return u +} + +// Interface implementation for LongRangeProperty in Property union +func (u *_longRangeProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) IcuCollationProperty(icucollationproperty types.IcuCollationPropertyVariant) *_property { + + u.v = &icucollationproperty + + return u +} + +// Interface implementation for IcuCollationProperty in Property union +func (u *_icuCollationProperty) PropertyCaster() *types.Property { + t := types.Property(u.v) + return &t +} + +func (u *_property) PropertyCaster() *types.Property { + return &u.v +} diff --git a/typedapi/esdsl/queries.go b/typedapi/esdsl/queries.go new file mode 100644 index 0000000000..2cf9a2109f --- /dev/null +++ b/typedapi/esdsl/queries.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _queries struct { + v *types.Queries +} + +func NewQueries() *_queries { + + return &_queries{v: types.NewQueries()} + +} + +func (s *_queries) Cache(cache types.CacheQueriesVariant) *_queries { + + s.v.Cache = cache.CacheQueriesCaster() + + return s +} + +func (s *_queries) QueriesCaster() *types.Queries { + return s.v +} diff --git a/typedapi/esdsl/query.go b/typedapi/esdsl/query.go new file mode 100644 index 0000000000..7b42848655 --- /dev/null +++ b/typedapi/esdsl/query.go @@ -0,0 +1,671 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _query struct { + v *types.Query +} + +func NewQuery() *_query { + return &_query{v: types.NewQuery()} +} + +// AdditionalQueryProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) AdditionalQueryProperty(key string, value json.RawMessage) *_query { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalQueryProperty = tmp + return s +} + +// matches documents matching boolean combinations of other queries. +func (s *_query) Bool(bool types.BoolQueryVariant) *_query { + + s.v.Bool = bool.BoolQueryCaster() + + return s +} + +// Returns documents matching a `positive` query while reducing the relevance +// score of documents that also match a `negative` query. +func (s *_query) Boosting(boosting types.BoostingQueryVariant) *_query { + + s.v.Boosting = boosting.BoostingQueryCaster() + + return s +} + +// The `combined_fields` query supports searching multiple text fields as if +// their contents had been indexed into one combined field. +func (s *_query) CombinedFields(combinedfields types.CombinedFieldsQueryVariant) *_query { + + s.v.CombinedFields = combinedfields.CombinedFieldsQueryCaster() + + return s +} + +// Common is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) Common(key string, value types.CommonTermsQueryVariant) *_query { + + tmp := make(map[string]types.CommonTermsQuery) + + tmp[key] = *value.CommonTermsQueryCaster() + + s.v.Common = tmp + return s +} + +// Wraps a filter query and returns every matching document with a relevance +// score equal to the `boost` parameter value. +func (s *_query) ConstantScore(constantscore types.ConstantScoreQueryVariant) *_query { + + s.v.ConstantScore = constantscore.ConstantScoreQueryCaster() + + return s +} + +// Returns documents matching one or more wrapped queries, called query clauses +// or clauses. +// If a returned document matches multiple query clauses, the `dis_max` query +// assigns the document the highest relevance score from any matching clause, +// plus a tie breaking increment for any additional matching subqueries. +func (s *_query) DisMax(dismax types.DisMaxQueryVariant) *_query { + + s.v.DisMax = dismax.DisMaxQueryCaster() + + return s +} + +// Boosts the relevance score of documents closer to a provided origin date or +// point. +// For example, you can use this query to give more weight to documents closer +// to a certain date or location. +func (s *_query) DistanceFeature(distancefeaturequery types.DistanceFeatureQueryVariant) *_query { + + s.v.DistanceFeature = *distancefeaturequery.DistanceFeatureQueryCaster() + + return s +} + +// Returns documents that contain an indexed value for a field. +func (s *_query) Exists(exists types.ExistsQueryVariant) *_query { + + s.v.Exists = exists.ExistsQueryCaster() + + return s +} + +// The `function_score` enables you to modify the score of documents that are +// retrieved by a query. +func (s *_query) FunctionScore(functionscore types.FunctionScoreQueryVariant) *_query { + + s.v.FunctionScore = functionscore.FunctionScoreQueryCaster() + + return s +} + +// Returns documents that contain terms similar to the search term, as measured +// by a Levenshtein edit distance. +// Fuzzy is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) Fuzzy(key string, value types.FuzzyQueryVariant) *_query { + + tmp := make(map[string]types.FuzzyQuery) + + tmp[key] = *value.FuzzyQueryCaster() + + s.v.Fuzzy = tmp + return s +} + +// Matches geo_point and geo_shape values that intersect a bounding box. +func (s *_query) GeoBoundingBox(geoboundingbox types.GeoBoundingBoxQueryVariant) *_query { + + s.v.GeoBoundingBox = geoboundingbox.GeoBoundingBoxQueryCaster() + + return s +} + +// Matches `geo_point` and `geo_shape` values within a given distance of a +// geopoint. +func (s *_query) GeoDistance(geodistance types.GeoDistanceQueryVariant) *_query { + + s.v.GeoDistance = geodistance.GeoDistanceQueryCaster() + + return s +} + +// Matches `geo_point` and `geo_shape` values that intersect a grid cell from a +// GeoGrid aggregation. +// GeoGrid is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) GeoGrid(key string, value types.GeoGridQueryVariant) *_query { + + tmp := make(map[string]types.GeoGridQuery) + + tmp[key] = *value.GeoGridQueryCaster() + + s.v.GeoGrid = tmp + return s +} + +func (s *_query) GeoPolygon(geopolygon types.GeoPolygonQueryVariant) *_query { + + s.v.GeoPolygon = geopolygon.GeoPolygonQueryCaster() + + return s +} + +// Filter documents indexed using either the `geo_shape` or the `geo_point` +// type. +func (s *_query) GeoShape(geoshape types.GeoShapeQueryVariant) *_query { + + s.v.GeoShape = geoshape.GeoShapeQueryCaster() + + return s +} + +// Returns parent documents whose joined child documents match a provided query. +func (s *_query) HasChild(haschild types.HasChildQueryVariant) *_query { + + s.v.HasChild = haschild.HasChildQueryCaster() + + return s +} + +// Returns child documents whose joined parent document matches a provided +// query. +func (s *_query) HasParent(hasparent types.HasParentQueryVariant) *_query { + + s.v.HasParent = hasparent.HasParentQueryCaster() + + return s +} + +// Returns documents based on their IDs. +// This query uses document IDs stored in the `_id` field. +func (s *_query) Ids(ids types.IdsQueryVariant) *_query { + + s.v.Ids = ids.IdsQueryCaster() + + return s +} + +// Returns documents based on the order and proximity of matching terms. +// Interval is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) Interval(key string, value types.IntervalsQueryVariant) *_query { + + tmp := make(map[string]types.IntervalsQuery) + + tmp[key] = *value.IntervalsQueryCaster() + + s.v.Intervals = tmp + return s +} + +// Finds the k nearest vectors to a query vector, as measured by a similarity +// metric. knn query finds nearest vectors through approximate search on indexed +// dense_vectors. +func (s *_query) Knn(knn types.KnnQueryVariant) *_query { + + s.v.Knn = knn.KnnQueryCaster() + + return s +} + +// Returns documents that match a provided text, number, date or boolean value. +// The provided text is analyzed before matching. +// Match is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) Match(key string, value types.MatchQueryVariant) *_query { + + tmp := make(map[string]types.MatchQuery) + + tmp[key] = *value.MatchQueryCaster() + + s.v.Match = tmp + return s +} + +// Matches all documents, giving them all a `_score` of 1.0. +func (s *_query) MatchAll(matchall types.MatchAllQueryVariant) *_query { + + s.v.MatchAll = matchall.MatchAllQueryCaster() + + return s +} + +// Analyzes its input and constructs a `bool` query from the terms. +// Each term except the last is used in a `term` query. +// The last term is used in a prefix query. +// MatchBoolPrefix is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) MatchBoolPrefix(key string, value types.MatchBoolPrefixQueryVariant) *_query { + + tmp := make(map[string]types.MatchBoolPrefixQuery) + + tmp[key] = *value.MatchBoolPrefixQueryCaster() + + s.v.MatchBoolPrefix = tmp + return s +} + +// Matches no documents. +func (s *_query) MatchNone(matchnone types.MatchNoneQueryVariant) *_query { + + s.v.MatchNone = matchnone.MatchNoneQueryCaster() + + return s +} + +// Analyzes the text and creates a phrase query out of the analyzed text. +// MatchPhrase is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) MatchPhrase(key string, value types.MatchPhraseQueryVariant) *_query { + + tmp := make(map[string]types.MatchPhraseQuery) + + tmp[key] = *value.MatchPhraseQueryCaster() + + s.v.MatchPhrase = tmp + return s +} + +// Returns documents that contain the words of a provided text, in the same +// order as provided. +// The last term of the provided text is treated as a prefix, matching any words +// that begin with that term. +// MatchPhrasePrefix is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) MatchPhrasePrefix(key string, value types.MatchPhrasePrefixQueryVariant) *_query { + + tmp := make(map[string]types.MatchPhrasePrefixQuery) + + tmp[key] = *value.MatchPhrasePrefixQueryCaster() + + s.v.MatchPhrasePrefix = tmp + return s +} + +// Returns documents that are "like" a given set of documents. +func (s *_query) MoreLikeThis(morelikethis types.MoreLikeThisQueryVariant) *_query { + + s.v.MoreLikeThis = morelikethis.MoreLikeThisQueryCaster() + + return s +} + +// Enables you to search for a provided text, number, date or boolean value +// across multiple fields. +// The provided text is analyzed before matching. +func (s *_query) MultiMatch(multimatch types.MultiMatchQueryVariant) *_query { + + s.v.MultiMatch = multimatch.MultiMatchQueryCaster() + + return s +} + +// Wraps another query to search nested fields. +// If an object matches the search, the nested query returns the root parent +// document. +func (s *_query) Nested(nested types.NestedQueryVariant) *_query { + + s.v.Nested = nested.NestedQueryCaster() + + return s +} + +// Returns child documents joined to a specific parent document. +func (s *_query) ParentId(parentid types.ParentIdQueryVariant) *_query { + + s.v.ParentId = parentid.ParentIdQueryCaster() + + return s +} + +// Matches queries stored in an index. +func (s *_query) Percolate(percolate types.PercolateQueryVariant) *_query { + + s.v.Percolate = percolate.PercolateQueryCaster() + + return s +} + +// Promotes selected documents to rank higher than those matching a given query. +func (s *_query) Pinned(pinned types.PinnedQueryVariant) *_query { + + s.v.Pinned = pinned.PinnedQueryCaster() + + return s +} + +// Returns documents that contain a specific prefix in a provided field. +// Prefix is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) Prefix(key string, value types.PrefixQueryVariant) *_query { + + tmp := make(map[string]types.PrefixQuery) + + tmp[key] = *value.PrefixQueryCaster() + + s.v.Prefix = tmp + return s +} + +// Returns documents based on a provided query string, using a parser with a +// strict syntax. +func (s *_query) QueryString(querystring types.QueryStringQueryVariant) *_query { + + s.v.QueryString = querystring.QueryStringQueryCaster() + + return s +} + +// Returns documents that contain terms within a provided range. +// Range is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) Range(key string, value types.RangeQueryVariant) *_query { + + tmp := make(map[string]types.RangeQuery) + + tmp[key] = *value.RangeQueryCaster() + + s.v.Range = tmp + return s +} + +// Boosts the relevance score of documents based on the numeric value of a +// `rank_feature` or `rank_features` field. +func (s *_query) RankFeature(rankfeature types.RankFeatureQueryVariant) *_query { + + s.v.RankFeature = rankfeature.RankFeatureQueryCaster() + + return s +} + +// Returns documents that contain terms matching a regular expression. +// Regexp is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) Regexp(key string, value types.RegexpQueryVariant) *_query { + + tmp := make(map[string]types.RegexpQuery) + + tmp[key] = *value.RegexpQueryCaster() + + s.v.Regexp = tmp + return s +} + +func (s *_query) Rule(rule types.RuleQueryVariant) *_query { + + s.v.Rule = rule.RuleQueryCaster() + + return s +} + +// Filters documents based on a provided script. +// The script query is typically used in a filter context. +func (s *_query) Script(script types.ScriptQueryVariant) *_query { + + s.v.Script = script.ScriptQueryCaster() + + return s +} + +// Uses a script to provide a custom score for returned documents. +func (s *_query) ScriptScore(scriptscore types.ScriptScoreQueryVariant) *_query { + + s.v.ScriptScore = scriptscore.ScriptScoreQueryCaster() + + return s +} + +// A semantic query to semantic_text field types +func (s *_query) Semantic(semantic types.SemanticQueryVariant) *_query { + + s.v.Semantic = semantic.SemanticQueryCaster() + + return s +} + +// Queries documents that contain fields indexed using the `shape` type. +func (s *_query) Shape(shape types.ShapeQueryVariant) *_query { + + s.v.Shape = shape.ShapeQueryCaster() + + return s +} + +// Returns documents based on a provided query string, using a parser with a +// limited but fault-tolerant syntax. +func (s *_query) SimpleQueryString(simplequerystring types.SimpleQueryStringQueryVariant) *_query { + + s.v.SimpleQueryString = simplequerystring.SimpleQueryStringQueryCaster() + + return s +} + +// Returns matches which enclose another span query. +func (s *_query) SpanContaining(spancontaining types.SpanContainingQueryVariant) *_query { + + s.v.SpanContaining = spancontaining.SpanContainingQueryCaster() + + return s +} + +// Wrapper to allow span queries to participate in composite single-field span +// queries by _lying_ about their search field. +func (s *_query) SpanFieldMasking(spanfieldmasking types.SpanFieldMaskingQueryVariant) *_query { + + s.v.SpanFieldMasking = spanfieldmasking.SpanFieldMaskingQueryCaster() + + return s +} + +// Matches spans near the beginning of a field. +func (s *_query) SpanFirst(spanfirst types.SpanFirstQueryVariant) *_query { + + s.v.SpanFirst = spanfirst.SpanFirstQueryCaster() + + return s +} + +// Allows you to wrap a multi term query (one of `wildcard`, `fuzzy`, `prefix`, +// `range`, or `regexp` query) as a `span` query, so it can be nested. +func (s *_query) SpanMulti(spanmulti types.SpanMultiTermQueryVariant) *_query { + + s.v.SpanMulti = spanmulti.SpanMultiTermQueryCaster() + + return s +} + +// Matches spans which are near one another. +// You can specify `slop`, the maximum number of intervening unmatched +// positions, as well as whether matches are required to be in-order. +func (s *_query) SpanNear(spannear types.SpanNearQueryVariant) *_query { + + s.v.SpanNear = spannear.SpanNearQueryCaster() + + return s +} + +// Removes matches which overlap with another span query or which are within x +// tokens before (controlled by the parameter `pre`) or y tokens after +// (controlled by the parameter `post`) another span query. +func (s *_query) SpanNot(spannot types.SpanNotQueryVariant) *_query { + + s.v.SpanNot = spannot.SpanNotQueryCaster() + + return s +} + +// Matches the union of its span clauses. +func (s *_query) SpanOr(spanor types.SpanOrQueryVariant) *_query { + + s.v.SpanOr = spanor.SpanOrQueryCaster() + + return s +} + +// Matches spans containing a term. +// SpanTerm is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) SpanTerm(key string, value types.SpanTermQueryVariant) *_query { + + tmp := make(map[string]types.SpanTermQuery) + + tmp[key] = *value.SpanTermQueryCaster() + + s.v.SpanTerm = tmp + return s +} + +// Returns matches which are enclosed inside another span query. +func (s *_query) SpanWithin(spanwithin types.SpanWithinQueryVariant) *_query { + + s.v.SpanWithin = spanwithin.SpanWithinQueryCaster() + + return s +} + +// Using input query vectors or a natural language processing model to convert a +// query into a list of token-weight pairs, queries against a sparse vector +// field. +func (s *_query) SparseVector(sparsevector types.SparseVectorQueryVariant) *_query { + + s.v.SparseVector = sparsevector.SparseVectorQueryCaster() + + return s +} + +// Returns documents that contain an exact term in a provided field. +// To return a document, the query term must exactly match the queried field's +// value, including whitespace and capitalization. +// Term is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) Term(key string, value types.TermQueryVariant) *_query { + + tmp := make(map[string]types.TermQuery) + + tmp[key] = *value.TermQueryCaster() + + s.v.Term = tmp + return s +} + +// Returns documents that contain one or more exact terms in a provided field. +// To return a document, one or more terms must exactly match a field value, +// including whitespace and capitalization. +func (s *_query) Terms(terms types.TermsQueryVariant) *_query { + + s.v.Terms = terms.TermsQueryCaster() + + return s +} + +// Returns documents that contain a minimum number of exact terms in a provided +// field. +// To return a document, a required number of terms must exactly match the field +// values, including whitespace and capitalization. +// TermsSet is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) TermsSet(key string, value types.TermsSetQueryVariant) *_query { + + tmp := make(map[string]types.TermsSetQuery) + + tmp[key] = *value.TermsSetQueryCaster() + + s.v.TermsSet = tmp + return s +} + +// Uses a natural language processing model to convert the query text into a +// list of token-weight pairs which are then used in a query against a sparse +// vector or rank features field. +// TextExpansion is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) TextExpansion(key string, value types.TextExpansionQueryVariant) *_query { + + tmp := make(map[string]types.TextExpansionQuery) + + tmp[key] = *value.TextExpansionQueryCaster() + + s.v.TextExpansion = tmp + return s +} + +func (s *_query) Type(type_ types.TypeQueryVariant) *_query { + + s.v.Type = type_.TypeQueryCaster() + + return s +} + +// Supports returning text_expansion query results by sending in precomputed +// tokens with the query. +// WeightedToken is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) WeightedToken(key string, value types.WeightedTokensQueryVariant) *_query { + + tmp := make(map[string]types.WeightedTokensQuery) + + tmp[key] = *value.WeightedTokensQueryCaster() + + s.v.WeightedTokens = tmp + return s +} + +// Returns documents that contain terms matching a wildcard pattern. +// Wildcard is a single key dictionnary. +// It will replace the current value on each call. +func (s *_query) Wildcard(key string, value types.WildcardQueryVariant) *_query { + + tmp := make(map[string]types.WildcardQuery) + + tmp[key] = *value.WildcardQueryCaster() + + s.v.Wildcard = tmp + return s +} + +// A query that accepts any other query as base64 encoded string. +func (s *_query) Wrapper(wrapper types.WrapperQueryVariant) *_query { + + s.v.Wrapper = wrapper.WrapperQueryCaster() + + return s +} + +func (s *_query) QueryCaster() *types.Query { + return s.v +} diff --git a/typedapi/esdsl/queryrule.go b/typedapi/esdsl/queryrule.go new file mode 100644 index 0000000000..40c141442b --- /dev/null +++ b/typedapi/esdsl/queryrule.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/queryruletype" +) + +type _queryRule struct { + v *types.QueryRule +} + +func NewQueryRule(actions types.QueryRuleActionsVariant, type_ queryruletype.QueryRuleType) *_queryRule { + + tmp := &_queryRule{v: types.NewQueryRule()} + + tmp.Actions(actions) + + tmp.Type(type_) + + return tmp + +} + +// The actions to take when the rule is matched. +// The format of this action depends on the rule type. +func (s *_queryRule) Actions(actions types.QueryRuleActionsVariant) *_queryRule { + + s.v.Actions = *actions.QueryRuleActionsCaster() + + return s +} + +// The criteria that must be met for the rule to be applied. +// If multiple criteria are specified for a rule, all criteria must be met for +// the rule to be applied. +func (s *_queryRule) Criteria(criteria ...types.QueryRuleCriteriaVariant) *_queryRule { + + s.v.Criteria = make([]types.QueryRuleCriteria, len(criteria)) + for i, v := range criteria { + s.v.Criteria[i] = *v.QueryRuleCriteriaCaster() + } + + return s +} + +func (s *_queryRule) Priority(priority int) *_queryRule { + + s.v.Priority = &priority + + return s +} + +// A unique identifier for the rule. +func (s *_queryRule) RuleId(id string) *_queryRule { + + s.v.RuleId = id + + return s +} + +// The type of rule. +// `pinned` will identify and pin specific documents to the top of search +// results. +// `exclude` will exclude specific documents from search results. +func (s *_queryRule) Type(type_ queryruletype.QueryRuleType) *_queryRule { + + s.v.Type = type_ + return s +} + +func (s *_queryRule) QueryRuleCaster() *types.QueryRule { + return s.v +} diff --git a/typedapi/esdsl/queryruleactions.go b/typedapi/esdsl/queryruleactions.go new file mode 100644 index 0000000000..ea0dab43de --- /dev/null +++ b/typedapi/esdsl/queryruleactions.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _queryRuleActions struct { + v *types.QueryRuleActions +} + +func NewQueryRuleActions() *_queryRuleActions { + + return &_queryRuleActions{v: types.NewQueryRuleActions()} + +} + +// The documents to apply the rule to. +// Only one of `ids` or `docs` may be specified and at least one must be +// specified. +// There is a maximum value of 100 documents in a rule. +// You can specify the following attributes for each document: +// +// * `_index`: The index of the document to pin. +// * `_id`: The unique document ID. +func (s *_queryRuleActions) Docs(docs ...types.PinnedDocVariant) *_queryRuleActions { + + for _, v := range docs { + + s.v.Docs = append(s.v.Docs, *v.PinnedDocCaster()) + + } + return s +} + +// The unique document IDs of the documents to apply the rule to. +// Only one of `ids` or `docs` may be specified and at least one must be +// specified. +func (s *_queryRuleActions) Ids(ids ...string) *_queryRuleActions { + + for _, v := range ids { + + s.v.Ids = append(s.v.Ids, v) + + } + return s +} + +func (s *_queryRuleActions) QueryRuleActionsCaster() *types.QueryRuleActions { + return s.v +} diff --git a/typedapi/esdsl/queryrulecriteria.go b/typedapi/esdsl/queryrulecriteria.go new file mode 100644 index 0000000000..78f371ce4f --- /dev/null +++ b/typedapi/esdsl/queryrulecriteria.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/queryrulecriteriatype" +) + +type _queryRuleCriteria struct { + v *types.QueryRuleCriteria +} + +func NewQueryRuleCriteria(type_ queryrulecriteriatype.QueryRuleCriteriaType) *_queryRuleCriteria { + + tmp := &_queryRuleCriteria{v: types.NewQueryRuleCriteria()} + + tmp.Type(type_) + + return tmp + +} + +// The metadata field to match against. +// This metadata will be used to match against `match_criteria` sent in the +// rule. +// It is required for all criteria types except `always`. +func (s *_queryRuleCriteria) Metadata(metadata string) *_queryRuleCriteria { + + s.v.Metadata = &metadata + + return s +} + +// The type of criteria. The following criteria types are supported: +// +// * `always`: Matches all queries, regardless of input. +// * `contains`: Matches that contain this value anywhere in the field meet the +// criteria defined by the rule. Only applicable for string values. +// * `exact`: Only exact matches meet the criteria defined by the rule. +// Applicable for string or numerical values. +// * `fuzzy`: Exact matches or matches within the allowed Levenshtein Edit +// Distance meet the criteria defined by the rule. Only applicable for string +// values. +// * `gt`: Matches with a value greater than this value meet the criteria +// defined by the rule. Only applicable for numerical values. +// * `gte`: Matches with a value greater than or equal to this value meet the +// criteria defined by the rule. Only applicable for numerical values. +// * `lt`: Matches with a value less than this value meet the criteria defined +// by the rule. Only applicable for numerical values. +// * `lte`: Matches with a value less than or equal to this value meet the +// criteria defined by the rule. Only applicable for numerical values. +// * `prefix`: Matches that start with this value meet the criteria defined by +// the rule. Only applicable for string values. +// * `suffix`: Matches that end with this value meet the criteria defined by the +// rule. Only applicable for string values. +func (s *_queryRuleCriteria) Type(type_ queryrulecriteriatype.QueryRuleCriteriaType) *_queryRuleCriteria { + + s.v.Type = type_ + return s +} + +// The values to match against the `metadata` field. +// Only one value must match for the criteria to be met. +// It is required for all criteria types except `always`. +func (s *_queryRuleCriteria) Values(values ...json.RawMessage) *_queryRuleCriteria { + + for _, v := range values { + + s.v.Values = append(s.v.Values, v) + + } + return s +} + +func (s *_queryRuleCriteria) QueryRuleCriteriaCaster() *types.QueryRuleCriteria { + return s.v +} diff --git a/typedapi/esdsl/querystringquery.go b/typedapi/esdsl/querystringquery.go new file mode 100644 index 0000000000..0128d7e04e --- /dev/null +++ b/typedapi/esdsl/querystringquery.go @@ -0,0 +1,288 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/textquerytype" +) + +type _queryStringQuery struct { + v *types.QueryStringQuery +} + +// Returns documents based on a provided query string, using a parser with a +// strict syntax. +func NewQueryStringQuery(query string) *_queryStringQuery { + + tmp := &_queryStringQuery{v: types.NewQueryStringQuery()} + + tmp.Query(query) + + return tmp + +} + +// If `true`, the wildcard characters `*` and `?` are allowed as the first +// character of the query string. +func (s *_queryStringQuery) AllowLeadingWildcard(allowleadingwildcard bool) *_queryStringQuery { + + s.v.AllowLeadingWildcard = &allowleadingwildcard + + return s +} + +// If `true`, the query attempts to analyze wildcard terms in the query string. +func (s *_queryStringQuery) AnalyzeWildcard(analyzewildcard bool) *_queryStringQuery { + + s.v.AnalyzeWildcard = &analyzewildcard + + return s +} + +// Analyzer used to convert text in the query string into tokens. +func (s *_queryStringQuery) Analyzer(analyzer string) *_queryStringQuery { + + s.v.Analyzer = &analyzer + + return s +} + +// If `true`, match phrase queries are automatically created for multi-term +// synonyms. +func (s *_queryStringQuery) AutoGenerateSynonymsPhraseQuery(autogeneratesynonymsphrasequery bool) *_queryStringQuery { + + s.v.AutoGenerateSynonymsPhraseQuery = &autogeneratesynonymsphrasequery + + return s +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_queryStringQuery) Boost(boost float32) *_queryStringQuery { + + s.v.Boost = &boost + + return s +} + +// Default field to search if no field is provided in the query string. +// Supports wildcards (`*`). +// Defaults to the `index.query.default_field` index setting, which has a +// default value of `*`. +func (s *_queryStringQuery) DefaultField(field string) *_queryStringQuery { + + s.v.DefaultField = &field + + return s +} + +// Default boolean logic used to interpret text in the query string if no +// operators are specified. +func (s *_queryStringQuery) DefaultOperator(defaultoperator operator.Operator) *_queryStringQuery { + + s.v.DefaultOperator = &defaultoperator + return s +} + +// If `true`, enable position increments in queries constructed from a +// `query_string` search. +func (s *_queryStringQuery) EnablePositionIncrements(enablepositionincrements bool) *_queryStringQuery { + + s.v.EnablePositionIncrements = &enablepositionincrements + + return s +} + +func (s *_queryStringQuery) Escape(escape bool) *_queryStringQuery { + + s.v.Escape = &escape + + return s +} + +// Array of fields to search. Supports wildcards (`*`). +func (s *_queryStringQuery) Fields(fields ...string) *_queryStringQuery { + + for _, v := range fields { + + s.v.Fields = append(s.v.Fields, v) + + } + return s +} + +// Maximum edit distance allowed for fuzzy matching. +func (s *_queryStringQuery) Fuzziness(fuzziness types.FuzzinessVariant) *_queryStringQuery { + + s.v.Fuzziness = *fuzziness.FuzzinessCaster() + + return s +} + +// Maximum number of terms to which the query expands for fuzzy matching. +func (s *_queryStringQuery) FuzzyMaxExpansions(fuzzymaxexpansions int) *_queryStringQuery { + + s.v.FuzzyMaxExpansions = &fuzzymaxexpansions + + return s +} + +// Number of beginning characters left unchanged for fuzzy matching. +func (s *_queryStringQuery) FuzzyPrefixLength(fuzzyprefixlength int) *_queryStringQuery { + + s.v.FuzzyPrefixLength = &fuzzyprefixlength + + return s +} + +// Method used to rewrite the query. +func (s *_queryStringQuery) FuzzyRewrite(multitermqueryrewrite string) *_queryStringQuery { + + s.v.FuzzyRewrite = &multitermqueryrewrite + + return s +} + +// If `true`, edits for fuzzy matching include transpositions of two adjacent +// characters (for example, `ab` to `ba`). +func (s *_queryStringQuery) FuzzyTranspositions(fuzzytranspositions bool) *_queryStringQuery { + + s.v.FuzzyTranspositions = &fuzzytranspositions + + return s +} + +// If `true`, format-based errors, such as providing a text value for a numeric +// field, are ignored. +func (s *_queryStringQuery) Lenient(lenient bool) *_queryStringQuery { + + s.v.Lenient = &lenient + + return s +} + +// Maximum number of automaton states required for the query. +func (s *_queryStringQuery) MaxDeterminizedStates(maxdeterminizedstates int) *_queryStringQuery { + + s.v.MaxDeterminizedStates = &maxdeterminizedstates + + return s +} + +// Minimum number of clauses that must match for a document to be returned. +func (s *_queryStringQuery) MinimumShouldMatch(minimumshouldmatch types.MinimumShouldMatchVariant) *_queryStringQuery { + + s.v.MinimumShouldMatch = *minimumshouldmatch.MinimumShouldMatchCaster() + + return s +} + +// Maximum number of positions allowed between matching tokens for phrases. +func (s *_queryStringQuery) PhraseSlop(phraseslop types.Float64) *_queryStringQuery { + + s.v.PhraseSlop = &phraseslop + + return s +} + +// Query string you wish to parse and use for search. +func (s *_queryStringQuery) Query(query string) *_queryStringQuery { + + s.v.Query = query + + return s +} + +func (s *_queryStringQuery) QueryName_(queryname_ string) *_queryStringQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Analyzer used to convert quoted text in the query string into tokens. +// For quoted text, this parameter overrides the analyzer specified in the +// `analyzer` parameter. +func (s *_queryStringQuery) QuoteAnalyzer(quoteanalyzer string) *_queryStringQuery { + + s.v.QuoteAnalyzer = "eanalyzer + + return s +} + +// Suffix appended to quoted text in the query string. +// You can use this suffix to use a different analysis method for exact matches. +func (s *_queryStringQuery) QuoteFieldSuffix(quotefieldsuffix string) *_queryStringQuery { + + s.v.QuoteFieldSuffix = "efieldsuffix + + return s +} + +// Method used to rewrite the query. +func (s *_queryStringQuery) Rewrite(multitermqueryrewrite string) *_queryStringQuery { + + s.v.Rewrite = &multitermqueryrewrite + + return s +} + +// How to combine the queries generated from the individual search terms in the +// resulting `dis_max` query. +func (s *_queryStringQuery) TieBreaker(tiebreaker types.Float64) *_queryStringQuery { + + s.v.TieBreaker = &tiebreaker + + return s +} + +// Coordinated Universal Time (UTC) offset or IANA time zone used to convert +// date values in the query string to UTC. +func (s *_queryStringQuery) TimeZone(timezone string) *_queryStringQuery { + + s.v.TimeZone = &timezone + + return s +} + +// Determines how the query matches and scores documents. +func (s *_queryStringQuery) Type(type_ textquerytype.TextQueryType) *_queryStringQuery { + + s.v.Type = &type_ + return s +} + +func (s *_queryStringQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.QueryString = s.v + + return container +} + +func (s *_queryStringQuery) QueryStringQueryCaster() *types.QueryStringQuery { + return s.v +} diff --git a/typedapi/esdsl/queryvectorbuilder.go b/typedapi/esdsl/queryvectorbuilder.go new file mode 100644 index 0000000000..a3013eb9a3 --- /dev/null +++ b/typedapi/esdsl/queryvectorbuilder.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _queryVectorBuilder struct { + v *types.QueryVectorBuilder +} + +func NewQueryVectorBuilder() *_queryVectorBuilder { + return &_queryVectorBuilder{v: types.NewQueryVectorBuilder()} +} + +// AdditionalQueryVectorBuilderProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_queryVectorBuilder) AdditionalQueryVectorBuilderProperty(key string, value json.RawMessage) *_queryVectorBuilder { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalQueryVectorBuilderProperty = tmp + return s +} + +func (s *_queryVectorBuilder) TextEmbedding(textembedding types.TextEmbeddingVariant) *_queryVectorBuilder { + + s.v.TextEmbedding = textembedding.TextEmbeddingCaster() + + return s +} + +func (s *_queryVectorBuilder) QueryVectorBuilderCaster() *types.QueryVectorBuilder { + return s.v +} diff --git a/typedapi/esdsl/questionansweringinferenceoptions.go b/typedapi/esdsl/questionansweringinferenceoptions.go new file mode 100644 index 0000000000..eadb4fce92 --- /dev/null +++ b/typedapi/esdsl/questionansweringinferenceoptions.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _questionAnsweringInferenceOptions struct { + v *types.QuestionAnsweringInferenceOptions +} + +// Question answering configuration for inference. +func NewQuestionAnsweringInferenceOptions() *_questionAnsweringInferenceOptions { + + return &_questionAnsweringInferenceOptions{v: types.NewQuestionAnsweringInferenceOptions()} + +} + +// The maximum answer length to consider +func (s *_questionAnsweringInferenceOptions) MaxAnswerLength(maxanswerlength int) *_questionAnsweringInferenceOptions { + + s.v.MaxAnswerLength = &maxanswerlength + + return s +} + +// Specifies the number of top class predictions to return. Defaults to 0. +func (s *_questionAnsweringInferenceOptions) NumTopClasses(numtopclasses int) *_questionAnsweringInferenceOptions { + + s.v.NumTopClasses = &numtopclasses + + return s +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_questionAnsweringInferenceOptions) ResultsField(resultsfield string) *_questionAnsweringInferenceOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +// The tokenization options to update when inferring +func (s *_questionAnsweringInferenceOptions) Tokenization(tokenization types.TokenizationConfigContainerVariant) *_questionAnsweringInferenceOptions { + + s.v.Tokenization = tokenization.TokenizationConfigContainerCaster() + + return s +} + +func (s *_questionAnsweringInferenceOptions) InferenceConfigCreateContainerCaster() *types.InferenceConfigCreateContainer { + container := types.NewInferenceConfigCreateContainer() + + container.QuestionAnswering = s.v + + return container +} + +func (s *_questionAnsweringInferenceOptions) QuestionAnsweringInferenceOptionsCaster() *types.QuestionAnsweringInferenceOptions { + return s.v +} diff --git a/typedapi/esdsl/questionansweringinferenceupdateoptions.go b/typedapi/esdsl/questionansweringinferenceupdateoptions.go new file mode 100644 index 0000000000..9180e2533e --- /dev/null +++ b/typedapi/esdsl/questionansweringinferenceupdateoptions.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _questionAnsweringInferenceUpdateOptions struct { + v *types.QuestionAnsweringInferenceUpdateOptions +} + +// Question answering configuration for inference +func NewQuestionAnsweringInferenceUpdateOptions(question string) *_questionAnsweringInferenceUpdateOptions { + + tmp := &_questionAnsweringInferenceUpdateOptions{v: types.NewQuestionAnsweringInferenceUpdateOptions()} + + tmp.Question(question) + + return tmp + +} + +// The maximum answer length to consider for extraction +func (s *_questionAnsweringInferenceUpdateOptions) MaxAnswerLength(maxanswerlength int) *_questionAnsweringInferenceUpdateOptions { + + s.v.MaxAnswerLength = &maxanswerlength + + return s +} + +// Specifies the number of top class predictions to return. Defaults to 0. +func (s *_questionAnsweringInferenceUpdateOptions) NumTopClasses(numtopclasses int) *_questionAnsweringInferenceUpdateOptions { + + s.v.NumTopClasses = &numtopclasses + + return s +} + +// The question to answer given the inference context +func (s *_questionAnsweringInferenceUpdateOptions) Question(question string) *_questionAnsweringInferenceUpdateOptions { + + s.v.Question = question + + return s +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_questionAnsweringInferenceUpdateOptions) ResultsField(resultsfield string) *_questionAnsweringInferenceUpdateOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +// The tokenization options to update when inferring +func (s *_questionAnsweringInferenceUpdateOptions) Tokenization(tokenization types.NlpTokenizationUpdateOptionsVariant) *_questionAnsweringInferenceUpdateOptions { + + s.v.Tokenization = tokenization.NlpTokenizationUpdateOptionsCaster() + + return s +} + +func (s *_questionAnsweringInferenceUpdateOptions) InferenceConfigUpdateContainerCaster() *types.InferenceConfigUpdateContainer { + container := types.NewInferenceConfigUpdateContainer() + + container.QuestionAnswering = s.v + + return container +} + +func (s *_questionAnsweringInferenceUpdateOptions) QuestionAnsweringInferenceUpdateOptionsCaster() *types.QuestionAnsweringInferenceUpdateOptions { + return s.v +} diff --git a/typedapi/esdsl/randomsampleraggregation.go b/typedapi/esdsl/randomsampleraggregation.go new file mode 100644 index 0000000000..36bb5d0f89 --- /dev/null +++ b/typedapi/esdsl/randomsampleraggregation.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _randomSamplerAggregation struct { + v *types.RandomSamplerAggregation +} + +// A single bucket aggregation that randomly includes documents in the +// aggregated results. +// Sampling provides significant speed improvement at the cost of accuracy. +func NewRandomSamplerAggregation(probability types.Float64) *_randomSamplerAggregation { + + tmp := &_randomSamplerAggregation{v: types.NewRandomSamplerAggregation()} + + tmp.Probability(probability) + + return tmp + +} + +// The probability that a document will be included in the aggregated data. +// Must be greater than 0, less than 0.5, or exactly 1. +// The lower the probability, the fewer documents are matched. +func (s *_randomSamplerAggregation) Probability(probability types.Float64) *_randomSamplerAggregation { + + s.v.Probability = probability + + return s +} + +// The seed to generate the random sampling of documents. +// When a seed is provided, the random subset of documents is the same between +// calls. +func (s *_randomSamplerAggregation) Seed(seed int) *_randomSamplerAggregation { + + s.v.Seed = &seed + + return s +} + +// When combined with seed, setting shard_seed ensures 100% consistent sampling +// over shards where data is exactly the same. +func (s *_randomSamplerAggregation) ShardSeed(shardseed int) *_randomSamplerAggregation { + + s.v.ShardSeed = &shardseed + + return s +} + +func (s *_randomSamplerAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.RandomSampler = s.v + + return container +} + +func (s *_randomSamplerAggregation) RandomSamplerAggregationCaster() *types.RandomSamplerAggregation { + return s.v +} diff --git a/typedapi/esdsl/randomscorefunction.go b/typedapi/esdsl/randomscorefunction.go new file mode 100644 index 0000000000..44ad1151d2 --- /dev/null +++ b/typedapi/esdsl/randomscorefunction.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _randomScoreFunction struct { + v *types.RandomScoreFunction +} + +// Generates scores that are uniformly distributed from 0 up to but not +// including 1. +// In case you want scores to be reproducible, it is possible to provide a +// `seed` and `field`. +func NewRandomScoreFunction() *_randomScoreFunction { + + return &_randomScoreFunction{v: types.NewRandomScoreFunction()} + +} + +func (s *_randomScoreFunction) Field(field string) *_randomScoreFunction { + + s.v.Field = &field + + return s +} + +func (s *_randomScoreFunction) Seed(seed string) *_randomScoreFunction { + + s.v.Seed = seed + + return s +} + +func (s *_randomScoreFunction) FunctionScoreCaster() *types.FunctionScore { + container := types.NewFunctionScore() + + container.RandomScore = s.v + + return container +} + +func (s *_randomScoreFunction) RandomScoreFunctionCaster() *types.RandomScoreFunction { + return s.v +} diff --git a/typedapi/esdsl/rangeaggregation.go b/typedapi/esdsl/rangeaggregation.go new file mode 100644 index 0000000000..8004828881 --- /dev/null +++ b/typedapi/esdsl/rangeaggregation.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rangeAggregation struct { + v *types.RangeAggregation +} + +// A multi-bucket value source based aggregation that enables the user to define +// a set of ranges - each representing a bucket. +func NewRangeAggregation() *_rangeAggregation { + + return &_rangeAggregation{v: types.NewRangeAggregation()} + +} + +// The date field whose values are use to build ranges. +func (s *_rangeAggregation) Field(field string) *_rangeAggregation { + + s.v.Field = &field + + return s +} + +func (s *_rangeAggregation) Format(format string) *_rangeAggregation { + + s.v.Format = &format + + return s +} + +// Set to `true` to associate a unique string key with each bucket and return +// the ranges as a hash rather than an array. +func (s *_rangeAggregation) Keyed(keyed bool) *_rangeAggregation { + + s.v.Keyed = &keyed + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_rangeAggregation) Missing(missing int) *_rangeAggregation { + + s.v.Missing = &missing + + return s +} + +// An array of ranges used to bucket documents. +func (s *_rangeAggregation) Ranges(ranges ...types.AggregationRangeVariant) *_rangeAggregation { + + for _, v := range ranges { + + s.v.Ranges = append(s.v.Ranges, *v.AggregationRangeCaster()) + + } + return s +} + +func (s *_rangeAggregation) Script(script types.ScriptVariant) *_rangeAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_rangeAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Range = s.v + + return container +} + +func (s *_rangeAggregation) ApiKeyAggregationContainerCaster() *types.ApiKeyAggregationContainer { + container := types.NewApiKeyAggregationContainer() + + container.Range = s.v + + return container +} + +func (s *_rangeAggregation) RangeAggregationCaster() *types.RangeAggregation { + return s.v +} diff --git a/typedapi/esdsl/rangequery.go b/typedapi/esdsl/rangequery.go new file mode 100644 index 0000000000..16c0deeb2b --- /dev/null +++ b/typedapi/esdsl/rangequery.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _rangeQuery struct { + v types.RangeQuery +} + +func NewRangeQuery() *_rangeQuery { + return &_rangeQuery{v: nil} +} + +func (u *_rangeQuery) UntypedRangeQuery(untypedrangequery types.UntypedRangeQueryVariant) *_rangeQuery { + + u.v = &untypedrangequery + + return u +} + +// Interface implementation for UntypedRangeQuery in RangeQuery union +func (u *_untypedRangeQuery) RangeQueryCaster() *types.RangeQuery { + t := types.RangeQuery(u.v) + return &t +} + +func (u *_rangeQuery) DateRangeQuery(daterangequery types.DateRangeQueryVariant) *_rangeQuery { + + u.v = &daterangequery + + return u +} + +// Interface implementation for DateRangeQuery in RangeQuery union +func (u *_dateRangeQuery) RangeQueryCaster() *types.RangeQuery { + t := types.RangeQuery(u.v) + return &t +} + +func (u *_rangeQuery) NumberRangeQuery(numberrangequery types.NumberRangeQueryVariant) *_rangeQuery { + + u.v = &numberrangequery + + return u +} + +// Interface implementation for NumberRangeQuery in RangeQuery union +func (u *_numberRangeQuery) RangeQueryCaster() *types.RangeQuery { + t := types.RangeQuery(u.v) + return &t +} + +func (u *_rangeQuery) TermRangeQuery(termrangequery types.TermRangeQueryVariant) *_rangeQuery { + + u.v = &termrangequery + + return u +} + +// Interface implementation for TermRangeQuery in RangeQuery union +func (u *_termRangeQuery) RangeQueryCaster() *types.RangeQuery { + t := types.RangeQuery(u.v) + return &t +} + +func (u *_rangeQuery) RangeQueryCaster() *types.RangeQuery { + return &u.v +} diff --git a/typedapi/esdsl/rankcontainer.go b/typedapi/esdsl/rankcontainer.go new file mode 100644 index 0000000000..3c06c9f545 --- /dev/null +++ b/typedapi/esdsl/rankcontainer.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _rankContainer struct { + v *types.RankContainer +} + +func NewRankContainer() *_rankContainer { + return &_rankContainer{v: types.NewRankContainer()} +} + +// AdditionalRankContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_rankContainer) AdditionalRankContainerProperty(key string, value json.RawMessage) *_rankContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalRankContainerProperty = tmp + return s +} + +// The reciprocal rank fusion parameters +func (s *_rankContainer) Rrf(rrf types.RrfRankVariant) *_rankContainer { + + s.v.Rrf = rrf.RrfRankCaster() + + return s +} + +func (s *_rankContainer) RankContainerCaster() *types.RankContainer { + return s.v +} diff --git a/typedapi/esdsl/rankevalmetric.go b/typedapi/esdsl/rankevalmetric.go new file mode 100644 index 0000000000..3731f031a9 --- /dev/null +++ b/typedapi/esdsl/rankevalmetric.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rankEvalMetric struct { + v *types.RankEvalMetric +} + +func NewRankEvalMetric() *_rankEvalMetric { + + return &_rankEvalMetric{v: types.NewRankEvalMetric()} + +} + +func (s *_rankEvalMetric) Dcg(dcg types.RankEvalMetricDiscountedCumulativeGainVariant) *_rankEvalMetric { + + s.v.Dcg = dcg.RankEvalMetricDiscountedCumulativeGainCaster() + + return s +} + +func (s *_rankEvalMetric) ExpectedReciprocalRank(expectedreciprocalrank types.RankEvalMetricExpectedReciprocalRankVariant) *_rankEvalMetric { + + s.v.ExpectedReciprocalRank = expectedreciprocalrank.RankEvalMetricExpectedReciprocalRankCaster() + + return s +} + +func (s *_rankEvalMetric) MeanReciprocalRank(meanreciprocalrank types.RankEvalMetricMeanReciprocalRankVariant) *_rankEvalMetric { + + s.v.MeanReciprocalRank = meanreciprocalrank.RankEvalMetricMeanReciprocalRankCaster() + + return s +} + +func (s *_rankEvalMetric) Precision(precision types.RankEvalMetricPrecisionVariant) *_rankEvalMetric { + + s.v.Precision = precision.RankEvalMetricPrecisionCaster() + + return s +} + +func (s *_rankEvalMetric) Recall(recall types.RankEvalMetricRecallVariant) *_rankEvalMetric { + + s.v.Recall = recall.RankEvalMetricRecallCaster() + + return s +} + +func (s *_rankEvalMetric) RankEvalMetricCaster() *types.RankEvalMetric { + return s.v +} diff --git a/typedapi/esdsl/rankevalmetricdiscountedcumulativegain.go b/typedapi/esdsl/rankevalmetricdiscountedcumulativegain.go new file mode 100644 index 0000000000..80cc94000f --- /dev/null +++ b/typedapi/esdsl/rankevalmetricdiscountedcumulativegain.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rankEvalMetricDiscountedCumulativeGain struct { + v *types.RankEvalMetricDiscountedCumulativeGain +} + +func NewRankEvalMetricDiscountedCumulativeGain() *_rankEvalMetricDiscountedCumulativeGain { + + return &_rankEvalMetricDiscountedCumulativeGain{v: types.NewRankEvalMetricDiscountedCumulativeGain()} + +} + +// Sets the maximum number of documents retrieved per query. This value will act +// in place of the usual size parameter in the query. +func (s *_rankEvalMetricDiscountedCumulativeGain) K(k int) *_rankEvalMetricDiscountedCumulativeGain { + + s.v.K = &k + + return s +} + +// If set to true, this metric will calculate the Normalized DCG. +func (s *_rankEvalMetricDiscountedCumulativeGain) Normalize(normalize bool) *_rankEvalMetricDiscountedCumulativeGain { + + s.v.Normalize = &normalize + + return s +} + +func (s *_rankEvalMetricDiscountedCumulativeGain) RankEvalMetricDiscountedCumulativeGainCaster() *types.RankEvalMetricDiscountedCumulativeGain { + return s.v +} diff --git a/typedapi/esdsl/rankevalmetricexpectedreciprocalrank.go b/typedapi/esdsl/rankevalmetricexpectedreciprocalrank.go new file mode 100644 index 0000000000..217b7110de --- /dev/null +++ b/typedapi/esdsl/rankevalmetricexpectedreciprocalrank.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rankEvalMetricExpectedReciprocalRank struct { + v *types.RankEvalMetricExpectedReciprocalRank +} + +func NewRankEvalMetricExpectedReciprocalRank(maximumrelevance int) *_rankEvalMetricExpectedReciprocalRank { + + tmp := &_rankEvalMetricExpectedReciprocalRank{v: types.NewRankEvalMetricExpectedReciprocalRank()} + + tmp.MaximumRelevance(maximumrelevance) + + return tmp + +} + +// Sets the maximum number of documents retrieved per query. This value will act +// in place of the usual size parameter in the query. +func (s *_rankEvalMetricExpectedReciprocalRank) K(k int) *_rankEvalMetricExpectedReciprocalRank { + + s.v.K = &k + + return s +} + +// The highest relevance grade used in the user-supplied relevance judgments. +func (s *_rankEvalMetricExpectedReciprocalRank) MaximumRelevance(maximumrelevance int) *_rankEvalMetricExpectedReciprocalRank { + + s.v.MaximumRelevance = maximumrelevance + + return s +} + +func (s *_rankEvalMetricExpectedReciprocalRank) RankEvalMetricExpectedReciprocalRankCaster() *types.RankEvalMetricExpectedReciprocalRank { + return s.v +} diff --git a/typedapi/esdsl/rankevalmetricmeanreciprocalrank.go b/typedapi/esdsl/rankevalmetricmeanreciprocalrank.go new file mode 100644 index 0000000000..09af9ce7db --- /dev/null +++ b/typedapi/esdsl/rankevalmetricmeanreciprocalrank.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rankEvalMetricMeanReciprocalRank struct { + v *types.RankEvalMetricMeanReciprocalRank +} + +func NewRankEvalMetricMeanReciprocalRank() *_rankEvalMetricMeanReciprocalRank { + + return &_rankEvalMetricMeanReciprocalRank{v: types.NewRankEvalMetricMeanReciprocalRank()} + +} + +// Sets the maximum number of documents retrieved per query. This value will act +// in place of the usual size parameter in the query. +func (s *_rankEvalMetricMeanReciprocalRank) K(k int) *_rankEvalMetricMeanReciprocalRank { + + s.v.K = &k + + return s +} + +// Sets the rating threshold above which documents are considered to be +// "relevant". +func (s *_rankEvalMetricMeanReciprocalRank) RelevantRatingThreshold(relevantratingthreshold int) *_rankEvalMetricMeanReciprocalRank { + + s.v.RelevantRatingThreshold = &relevantratingthreshold + + return s +} + +func (s *_rankEvalMetricMeanReciprocalRank) RankEvalMetricMeanReciprocalRankCaster() *types.RankEvalMetricMeanReciprocalRank { + return s.v +} diff --git a/typedapi/esdsl/rankevalmetricprecision.go b/typedapi/esdsl/rankevalmetricprecision.go new file mode 100644 index 0000000000..d16625dd33 --- /dev/null +++ b/typedapi/esdsl/rankevalmetricprecision.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rankEvalMetricPrecision struct { + v *types.RankEvalMetricPrecision +} + +func NewRankEvalMetricPrecision() *_rankEvalMetricPrecision { + + return &_rankEvalMetricPrecision{v: types.NewRankEvalMetricPrecision()} + +} + +// Controls how unlabeled documents in the search results are counted. If set to +// true, unlabeled documents are ignored and neither count as relevant or +// irrelevant. Set to false (the default), they are treated as irrelevant. +func (s *_rankEvalMetricPrecision) IgnoreUnlabeled(ignoreunlabeled bool) *_rankEvalMetricPrecision { + + s.v.IgnoreUnlabeled = &ignoreunlabeled + + return s +} + +// Sets the maximum number of documents retrieved per query. This value will act +// in place of the usual size parameter in the query. +func (s *_rankEvalMetricPrecision) K(k int) *_rankEvalMetricPrecision { + + s.v.K = &k + + return s +} + +// Sets the rating threshold above which documents are considered to be +// "relevant". +func (s *_rankEvalMetricPrecision) RelevantRatingThreshold(relevantratingthreshold int) *_rankEvalMetricPrecision { + + s.v.RelevantRatingThreshold = &relevantratingthreshold + + return s +} + +func (s *_rankEvalMetricPrecision) RankEvalMetricPrecisionCaster() *types.RankEvalMetricPrecision { + return s.v +} diff --git a/typedapi/esdsl/rankevalmetricrecall.go b/typedapi/esdsl/rankevalmetricrecall.go new file mode 100644 index 0000000000..f2301f4da9 --- /dev/null +++ b/typedapi/esdsl/rankevalmetricrecall.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rankEvalMetricRecall struct { + v *types.RankEvalMetricRecall +} + +func NewRankEvalMetricRecall() *_rankEvalMetricRecall { + + return &_rankEvalMetricRecall{v: types.NewRankEvalMetricRecall()} + +} + +// Sets the maximum number of documents retrieved per query. This value will act +// in place of the usual size parameter in the query. +func (s *_rankEvalMetricRecall) K(k int) *_rankEvalMetricRecall { + + s.v.K = &k + + return s +} + +// Sets the rating threshold above which documents are considered to be +// "relevant". +func (s *_rankEvalMetricRecall) RelevantRatingThreshold(relevantratingthreshold int) *_rankEvalMetricRecall { + + s.v.RelevantRatingThreshold = &relevantratingthreshold + + return s +} + +func (s *_rankEvalMetricRecall) RankEvalMetricRecallCaster() *types.RankEvalMetricRecall { + return s.v +} diff --git a/typedapi/esdsl/rankevalquery.go b/typedapi/esdsl/rankevalquery.go new file mode 100644 index 0000000000..c7dae505d9 --- /dev/null +++ b/typedapi/esdsl/rankevalquery.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rankEvalQuery struct { + v *types.RankEvalQuery +} + +func NewRankEvalQuery(query types.QueryVariant) *_rankEvalQuery { + + tmp := &_rankEvalQuery{v: types.NewRankEvalQuery()} + + tmp.Query(query) + + return tmp + +} + +func (s *_rankEvalQuery) Query(query types.QueryVariant) *_rankEvalQuery { + + s.v.Query = *query.QueryCaster() + + return s +} + +func (s *_rankEvalQuery) Size(size int) *_rankEvalQuery { + + s.v.Size = &size + + return s +} + +func (s *_rankEvalQuery) RankEvalQueryCaster() *types.RankEvalQuery { + return s.v +} diff --git a/typedapi/esdsl/rankevalrequestitem.go b/typedapi/esdsl/rankevalrequestitem.go new file mode 100644 index 0000000000..dfdd125643 --- /dev/null +++ b/typedapi/esdsl/rankevalrequestitem.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _rankEvalRequestItem struct { + v *types.RankEvalRequestItem +} + +func NewRankEvalRequestItem() *_rankEvalRequestItem { + + return &_rankEvalRequestItem{v: types.NewRankEvalRequestItem()} + +} + +// The search request’s ID, used to group result details later. +func (s *_rankEvalRequestItem) Id(id string) *_rankEvalRequestItem { + + s.v.Id = id + + return s +} + +// The search template parameters. +func (s *_rankEvalRequestItem) Params(params map[string]json.RawMessage) *_rankEvalRequestItem { + + s.v.Params = params + return s +} + +func (s *_rankEvalRequestItem) AddParam(key string, value json.RawMessage) *_rankEvalRequestItem { + + var tmp map[string]json.RawMessage + if s.v.Params == nil { + s.v.Params = make(map[string]json.RawMessage) + } else { + tmp = s.v.Params + } + + tmp[key] = value + + s.v.Params = tmp + return s +} + +// List of document ratings +func (s *_rankEvalRequestItem) Ratings(ratings ...types.DocumentRatingVariant) *_rankEvalRequestItem { + + for _, v := range ratings { + + s.v.Ratings = append(s.v.Ratings, *v.DocumentRatingCaster()) + + } + return s +} + +// The query being evaluated. +func (s *_rankEvalRequestItem) Request(request types.RankEvalQueryVariant) *_rankEvalRequestItem { + + s.v.Request = request.RankEvalQueryCaster() + + return s +} + +// The search template Id +func (s *_rankEvalRequestItem) TemplateId(id string) *_rankEvalRequestItem { + + s.v.TemplateId = &id + + return s +} + +func (s *_rankEvalRequestItem) RankEvalRequestItemCaster() *types.RankEvalRequestItem { + return s.v +} diff --git a/typedapi/esdsl/rankfeaturefunctionlinear.go b/typedapi/esdsl/rankfeaturefunctionlinear.go new file mode 100644 index 0000000000..a21b0c11a9 --- /dev/null +++ b/typedapi/esdsl/rankfeaturefunctionlinear.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rankFeatureFunctionLinear struct { + v *types.RankFeatureFunctionLinear +} + +func NewRankFeatureFunctionLinear() *_rankFeatureFunctionLinear { + + return &_rankFeatureFunctionLinear{v: types.NewRankFeatureFunctionLinear()} + +} + +func (s *_rankFeatureFunctionLinear) RankFeatureFunctionLinearCaster() *types.RankFeatureFunctionLinear { + return s.v +} diff --git a/typedapi/esdsl/rankfeaturefunctionlogarithm.go b/typedapi/esdsl/rankfeaturefunctionlogarithm.go new file mode 100644 index 0000000000..58173af0f9 --- /dev/null +++ b/typedapi/esdsl/rankfeaturefunctionlogarithm.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rankFeatureFunctionLogarithm struct { + v *types.RankFeatureFunctionLogarithm +} + +func NewRankFeatureFunctionLogarithm(scalingfactor float32) *_rankFeatureFunctionLogarithm { + + tmp := &_rankFeatureFunctionLogarithm{v: types.NewRankFeatureFunctionLogarithm()} + + tmp.ScalingFactor(scalingfactor) + + return tmp + +} + +// Configurable scaling factor. +func (s *_rankFeatureFunctionLogarithm) ScalingFactor(scalingfactor float32) *_rankFeatureFunctionLogarithm { + + s.v.ScalingFactor = scalingfactor + + return s +} + +func (s *_rankFeatureFunctionLogarithm) RankFeatureFunctionLogarithmCaster() *types.RankFeatureFunctionLogarithm { + return s.v +} diff --git a/typedapi/esdsl/rankfeaturefunctionsaturation.go b/typedapi/esdsl/rankfeaturefunctionsaturation.go new file mode 100644 index 0000000000..f91d526291 --- /dev/null +++ b/typedapi/esdsl/rankfeaturefunctionsaturation.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rankFeatureFunctionSaturation struct { + v *types.RankFeatureFunctionSaturation +} + +func NewRankFeatureFunctionSaturation() *_rankFeatureFunctionSaturation { + + return &_rankFeatureFunctionSaturation{v: types.NewRankFeatureFunctionSaturation()} + +} + +// Configurable pivot value so that the result will be less than 0.5. +func (s *_rankFeatureFunctionSaturation) Pivot(pivot float32) *_rankFeatureFunctionSaturation { + + s.v.Pivot = &pivot + + return s +} + +func (s *_rankFeatureFunctionSaturation) RankFeatureFunctionSaturationCaster() *types.RankFeatureFunctionSaturation { + return s.v +} diff --git a/typedapi/esdsl/rankfeaturefunctionsigmoid.go b/typedapi/esdsl/rankfeaturefunctionsigmoid.go new file mode 100644 index 0000000000..f8e9411d6e --- /dev/null +++ b/typedapi/esdsl/rankfeaturefunctionsigmoid.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rankFeatureFunctionSigmoid struct { + v *types.RankFeatureFunctionSigmoid +} + +func NewRankFeatureFunctionSigmoid(exponent float32, pivot float32) *_rankFeatureFunctionSigmoid { + + tmp := &_rankFeatureFunctionSigmoid{v: types.NewRankFeatureFunctionSigmoid()} + + tmp.Exponent(exponent) + + tmp.Pivot(pivot) + + return tmp + +} + +// Configurable Exponent. +func (s *_rankFeatureFunctionSigmoid) Exponent(exponent float32) *_rankFeatureFunctionSigmoid { + + s.v.Exponent = exponent + + return s +} + +// Configurable pivot value so that the result will be less than 0.5. +func (s *_rankFeatureFunctionSigmoid) Pivot(pivot float32) *_rankFeatureFunctionSigmoid { + + s.v.Pivot = pivot + + return s +} + +func (s *_rankFeatureFunctionSigmoid) RankFeatureFunctionSigmoidCaster() *types.RankFeatureFunctionSigmoid { + return s.v +} diff --git a/typedapi/esdsl/rankfeatureproperty.go b/typedapi/esdsl/rankfeatureproperty.go new file mode 100644 index 0000000000..f9a64bff7a --- /dev/null +++ b/typedapi/esdsl/rankfeatureproperty.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _rankFeatureProperty struct { + v *types.RankFeatureProperty +} + +func NewRankFeatureProperty() *_rankFeatureProperty { + + return &_rankFeatureProperty{v: types.NewRankFeatureProperty()} + +} + +func (s *_rankFeatureProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_rankFeatureProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_rankFeatureProperty) Fields(fields map[string]types.Property) *_rankFeatureProperty { + + s.v.Fields = fields + return s +} + +func (s *_rankFeatureProperty) AddField(key string, value types.PropertyVariant) *_rankFeatureProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_rankFeatureProperty) IgnoreAbove(ignoreabove int) *_rankFeatureProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +// Metadata about the field. +func (s *_rankFeatureProperty) Meta(meta map[string]string) *_rankFeatureProperty { + + s.v.Meta = meta + return s +} + +func (s *_rankFeatureProperty) AddMeta(key string, value string) *_rankFeatureProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_rankFeatureProperty) PositiveScoreImpact(positivescoreimpact bool) *_rankFeatureProperty { + + s.v.PositiveScoreImpact = &positivescoreimpact + + return s +} + +func (s *_rankFeatureProperty) Properties(properties map[string]types.Property) *_rankFeatureProperty { + + s.v.Properties = properties + return s +} + +func (s *_rankFeatureProperty) AddProperty(key string, value types.PropertyVariant) *_rankFeatureProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_rankFeatureProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_rankFeatureProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_rankFeatureProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_rankFeatureProperty) RankFeaturePropertyCaster() *types.RankFeatureProperty { + return s.v +} diff --git a/typedapi/esdsl/rankfeaturequery.go b/typedapi/esdsl/rankfeaturequery.go new file mode 100644 index 0000000000..1fcf4b5401 --- /dev/null +++ b/typedapi/esdsl/rankfeaturequery.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rankFeatureQuery struct { + v *types.RankFeatureQuery +} + +// Boosts the relevance score of documents based on the numeric value of a +// `rank_feature` or `rank_features` field. +func NewRankFeatureQuery() *_rankFeatureQuery { + + return &_rankFeatureQuery{v: types.NewRankFeatureQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_rankFeatureQuery) Boost(boost float32) *_rankFeatureQuery { + + s.v.Boost = &boost + + return s +} + +// `rank_feature` or `rank_features` field used to boost relevance scores. +func (s *_rankFeatureQuery) Field(field string) *_rankFeatureQuery { + + s.v.Field = field + + return s +} + +// Linear function used to boost relevance scores based on the value of the rank +// feature `field`. +func (s *_rankFeatureQuery) Linear(linear types.RankFeatureFunctionLinearVariant) *_rankFeatureQuery { + + s.v.Linear = linear.RankFeatureFunctionLinearCaster() + + return s +} + +// Logarithmic function used to boost relevance scores based on the value of the +// rank feature `field`. +func (s *_rankFeatureQuery) Log(log types.RankFeatureFunctionLogarithmVariant) *_rankFeatureQuery { + + s.v.Log = log.RankFeatureFunctionLogarithmCaster() + + return s +} + +func (s *_rankFeatureQuery) QueryName_(queryname_ string) *_rankFeatureQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Saturation function used to boost relevance scores based on the value of the +// rank feature `field`. +func (s *_rankFeatureQuery) Saturation(saturation types.RankFeatureFunctionSaturationVariant) *_rankFeatureQuery { + + s.v.Saturation = saturation.RankFeatureFunctionSaturationCaster() + + return s +} + +// Sigmoid function used to boost relevance scores based on the value of the +// rank feature `field`. +func (s *_rankFeatureQuery) Sigmoid(sigmoid types.RankFeatureFunctionSigmoidVariant) *_rankFeatureQuery { + + s.v.Sigmoid = sigmoid.RankFeatureFunctionSigmoidCaster() + + return s +} + +func (s *_rankFeatureQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.RankFeature = s.v + + return container +} + +func (s *_rankFeatureQuery) RankFeatureQueryCaster() *types.RankFeatureQuery { + return s.v +} diff --git a/typedapi/esdsl/rankfeaturesproperty.go b/typedapi/esdsl/rankfeaturesproperty.go new file mode 100644 index 0000000000..59e603dc3f --- /dev/null +++ b/typedapi/esdsl/rankfeaturesproperty.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _rankFeaturesProperty struct { + v *types.RankFeaturesProperty +} + +func NewRankFeaturesProperty() *_rankFeaturesProperty { + + return &_rankFeaturesProperty{v: types.NewRankFeaturesProperty()} + +} + +func (s *_rankFeaturesProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_rankFeaturesProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_rankFeaturesProperty) Fields(fields map[string]types.Property) *_rankFeaturesProperty { + + s.v.Fields = fields + return s +} + +func (s *_rankFeaturesProperty) AddField(key string, value types.PropertyVariant) *_rankFeaturesProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_rankFeaturesProperty) IgnoreAbove(ignoreabove int) *_rankFeaturesProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +// Metadata about the field. +func (s *_rankFeaturesProperty) Meta(meta map[string]string) *_rankFeaturesProperty { + + s.v.Meta = meta + return s +} + +func (s *_rankFeaturesProperty) AddMeta(key string, value string) *_rankFeaturesProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_rankFeaturesProperty) PositiveScoreImpact(positivescoreimpact bool) *_rankFeaturesProperty { + + s.v.PositiveScoreImpact = &positivescoreimpact + + return s +} + +func (s *_rankFeaturesProperty) Properties(properties map[string]types.Property) *_rankFeaturesProperty { + + s.v.Properties = properties + return s +} + +func (s *_rankFeaturesProperty) AddProperty(key string, value types.PropertyVariant) *_rankFeaturesProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_rankFeaturesProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_rankFeaturesProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_rankFeaturesProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_rankFeaturesProperty) RankFeaturesPropertyCaster() *types.RankFeaturesProperty { + return s.v +} diff --git a/typedapi/esdsl/raretermsaggregation.go b/typedapi/esdsl/raretermsaggregation.go new file mode 100644 index 0000000000..7b20c425f9 --- /dev/null +++ b/typedapi/esdsl/raretermsaggregation.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rareTermsAggregation struct { + v *types.RareTermsAggregation +} + +// A multi-bucket value source based aggregation which finds "rare" terms — +// terms that are at the long-tail of the distribution and are not frequent. +func NewRareTermsAggregation() *_rareTermsAggregation { + + return &_rareTermsAggregation{v: types.NewRareTermsAggregation()} + +} + +// Terms that should be excluded from the aggregation. +func (s *_rareTermsAggregation) Exclude(termsexcludes ...string) *_rareTermsAggregation { + + s.v.Exclude = termsexcludes + + return s +} + +// The field from which to return rare terms. +func (s *_rareTermsAggregation) Field(field string) *_rareTermsAggregation { + + s.v.Field = &field + + return s +} + +// Terms that should be included in the aggregation. +func (s *_rareTermsAggregation) Include(termsinclude types.TermsIncludeVariant) *_rareTermsAggregation { + + s.v.Include = *termsinclude.TermsIncludeCaster() + + return s +} + +// The maximum number of documents a term should appear in. +func (s *_rareTermsAggregation) MaxDocCount(maxdoccount int64) *_rareTermsAggregation { + + s.v.MaxDocCount = &maxdoccount + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_rareTermsAggregation) Missing(missing types.MissingVariant) *_rareTermsAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +// The precision of the internal CuckooFilters. +// Smaller precision leads to better approximation, but higher memory usage. +func (s *_rareTermsAggregation) Precision(precision types.Float64) *_rareTermsAggregation { + + s.v.Precision = &precision + + return s +} + +func (s *_rareTermsAggregation) ValueType(valuetype string) *_rareTermsAggregation { + + s.v.ValueType = &valuetype + + return s +} + +func (s *_rareTermsAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.RareTerms = s.v + + return container +} + +func (s *_rareTermsAggregation) RareTermsAggregationCaster() *types.RareTermsAggregation { + return s.v +} diff --git a/typedapi/esdsl/rateaggregation.go b/typedapi/esdsl/rateaggregation.go new file mode 100644 index 0000000000..8280536437 --- /dev/null +++ b/typedapi/esdsl/rateaggregation.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/calendarinterval" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ratemode" +) + +type _rateAggregation struct { + v *types.RateAggregation +} + +// Calculates a rate of documents or a field in each bucket. +// Can only be used inside a `date_histogram` or `composite` aggregation. +func NewRateAggregation() *_rateAggregation { + + return &_rateAggregation{v: types.NewRateAggregation()} + +} + +// The field on which to run the aggregation. +func (s *_rateAggregation) Field(field string) *_rateAggregation { + + s.v.Field = &field + + return s +} + +func (s *_rateAggregation) Format(format string) *_rateAggregation { + + s.v.Format = &format + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_rateAggregation) Missing(missing types.MissingVariant) *_rateAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +// How the rate is calculated. +func (s *_rateAggregation) Mode(mode ratemode.RateMode) *_rateAggregation { + + s.v.Mode = &mode + return s +} + +func (s *_rateAggregation) Script(script types.ScriptVariant) *_rateAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +// The interval used to calculate the rate. +// By default, the interval of the `date_histogram` is used. +func (s *_rateAggregation) Unit(unit calendarinterval.CalendarInterval) *_rateAggregation { + + s.v.Unit = &unit + return s +} + +func (s *_rateAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Rate = s.v + + return container +} + +func (s *_rateAggregation) RateAggregationCaster() *types.RateAggregation { + return s.v +} diff --git a/typedapi/esdsl/ratelimitsetting.go b/typedapi/esdsl/ratelimitsetting.go new file mode 100644 index 0000000000..1109ccc5c0 --- /dev/null +++ b/typedapi/esdsl/ratelimitsetting.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rateLimitSetting struct { + v *types.RateLimitSetting +} + +func NewRateLimitSetting() *_rateLimitSetting { + + return &_rateLimitSetting{v: types.NewRateLimitSetting()} + +} + +// The number of requests allowed per minute. +func (s *_rateLimitSetting) RequestsPerMinute(requestsperminute int) *_rateLimitSetting { + + s.v.RequestsPerMinute = &requestsperminute + + return s +} + +func (s *_rateLimitSetting) RateLimitSettingCaster() *types.RateLimitSetting { + return s.v +} diff --git a/typedapi/esdsl/readonlyurlrepository.go b/typedapi/esdsl/readonlyurlrepository.go new file mode 100644 index 0000000000..26012c5e10 --- /dev/null +++ b/typedapi/esdsl/readonlyurlrepository.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _readOnlyUrlRepository struct { + v *types.ReadOnlyUrlRepository +} + +func NewReadOnlyUrlRepository(settings types.ReadOnlyUrlRepositorySettingsVariant) *_readOnlyUrlRepository { + + tmp := &_readOnlyUrlRepository{v: types.NewReadOnlyUrlRepository()} + + tmp.Settings(settings) + + return tmp + +} + +// The repository settings. +func (s *_readOnlyUrlRepository) Settings(settings types.ReadOnlyUrlRepositorySettingsVariant) *_readOnlyUrlRepository { + + s.v.Settings = *settings.ReadOnlyUrlRepositorySettingsCaster() + + return s +} + +func (s *_readOnlyUrlRepository) Uuid(uuid string) *_readOnlyUrlRepository { + + s.v.Uuid = &uuid + + return s +} + +func (s *_readOnlyUrlRepository) ReadOnlyUrlRepositoryCaster() *types.ReadOnlyUrlRepository { + return s.v +} diff --git a/typedapi/esdsl/readonlyurlrepositorysettings.go b/typedapi/esdsl/readonlyurlrepositorysettings.go new file mode 100644 index 0000000000..070d40078c --- /dev/null +++ b/typedapi/esdsl/readonlyurlrepositorysettings.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _readOnlyUrlRepositorySettings struct { + v *types.ReadOnlyUrlRepositorySettings +} + +func NewReadOnlyUrlRepositorySettings(url string) *_readOnlyUrlRepositorySettings { + + tmp := &_readOnlyUrlRepositorySettings{v: types.NewReadOnlyUrlRepositorySettings()} + + tmp.Url(url) + + return tmp + +} + +// Big files can be broken down into multiple smaller blobs in the blob store +// during snapshotting. +// It is not recommended to change this value from its default unless there is +// an explicit reason for limiting the size of blobs in the repository. +// Setting a value lower than the default can result in an increased number of +// API calls to the blob store during snapshot create and restore operations +// compared to using the default value and thus make both operations slower and +// more costly. +// Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. +// The default varies by repository type. +func (s *_readOnlyUrlRepositorySettings) ChunkSize(bytesize types.ByteSizeVariant) *_readOnlyUrlRepositorySettings { + + s.v.ChunkSize = *bytesize.ByteSizeCaster() + + return s +} + +// When set to `true`, metadata files are stored in compressed format. +// This setting doesn't affect index files that are already compressed by +// default. +func (s *_readOnlyUrlRepositorySettings) Compress(compress bool) *_readOnlyUrlRepositorySettings { + + s.v.Compress = &compress + + return s +} + +// The maximum number of retries for HTTP and HTTPS URLs. +func (s *_readOnlyUrlRepositorySettings) HttpMaxRetries(httpmaxretries int) *_readOnlyUrlRepositorySettings { + + s.v.HttpMaxRetries = &httpmaxretries + + return s +} + +// The maximum wait time for data transfers over a connection. +func (s *_readOnlyUrlRepositorySettings) HttpSocketTimeout(duration types.DurationVariant) *_readOnlyUrlRepositorySettings { + + s.v.HttpSocketTimeout = *duration.DurationCaster() + + return s +} + +// The maximum number of snapshots the repository can contain. +// The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. +func (s *_readOnlyUrlRepositorySettings) MaxNumberOfSnapshots(maxnumberofsnapshots int) *_readOnlyUrlRepositorySettings { + + s.v.MaxNumberOfSnapshots = &maxnumberofsnapshots + + return s +} + +// The maximum snapshot restore rate per node. +// It defaults to unlimited. +// Note that restores are also throttled through recovery settings. +func (s *_readOnlyUrlRepositorySettings) MaxRestoreBytesPerSec(bytesize types.ByteSizeVariant) *_readOnlyUrlRepositorySettings { + + s.v.MaxRestoreBytesPerSec = *bytesize.ByteSizeCaster() + + return s +} + +// The maximum snapshot creation rate per node. +// It defaults to 40mb per second. +// Note that if the recovery settings for managed services are set, then it +// defaults to unlimited, and the rate is additionally throttled through +// recovery settings. +func (s *_readOnlyUrlRepositorySettings) MaxSnapshotBytesPerSec(bytesize types.ByteSizeVariant) *_readOnlyUrlRepositorySettings { + + s.v.MaxSnapshotBytesPerSec = *bytesize.ByteSizeCaster() + + return s +} + +// The URL location of the root of the shared filesystem repository. +// The following protocols are supported: +// +// * `file` +// * `ftp` +// * `http` +// * `https` +// * `jar` +// +// URLs using the HTTP, HTTPS, or FTP protocols must be explicitly allowed with +// the `repositories.url.allowed_urls` cluster setting. +// This setting supports wildcards in the place of a host, path, query, or +// fragment in the URL. +// +// URLs using the file protocol must point to the location of a shared +// filesystem accessible to all master and data nodes in the cluster. +// This location must be registered in the `path.repo` setting. +// You don't need to register URLs using the FTP, HTTP, HTTPS, or JAR protocols +// in the `path.repo` setting. +func (s *_readOnlyUrlRepositorySettings) Url(url string) *_readOnlyUrlRepositorySettings { + + s.v.Url = url + + return s +} + +func (s *_readOnlyUrlRepositorySettings) ReadOnlyUrlRepositorySettingsCaster() *types.ReadOnlyUrlRepositorySettings { + return s.v +} diff --git a/typedapi/esdsl/redactprocessor.go b/typedapi/esdsl/redactprocessor.go new file mode 100644 index 0000000000..4edc36dfb6 --- /dev/null +++ b/typedapi/esdsl/redactprocessor.go @@ -0,0 +1,180 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _redactProcessor struct { + v *types.RedactProcessor +} + +// The Redact processor uses the Grok rules engine to obscure text in the input +// document matching the given Grok patterns. +// The processor can be used to obscure Personal Identifying Information (PII) +// by configuring it to detect known patterns such as email or IP addresses. +// Text that matches a Grok pattern is replaced with a configurable string such +// as `` where an email address is matched or simply replace all matches +// with the text `` if preferred. +func NewRedactProcessor() *_redactProcessor { + + return &_redactProcessor{v: types.NewRedactProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_redactProcessor) Description(description string) *_redactProcessor { + + s.v.Description = &description + + return s +} + +// The field to be redacted +func (s *_redactProcessor) Field(field string) *_redactProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_redactProcessor) If(if_ types.ScriptVariant) *_redactProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_redactProcessor) IgnoreFailure(ignorefailure bool) *_redactProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist or is `null`, the processor quietly +// exits without modifying the document. +func (s *_redactProcessor) IgnoreMissing(ignoremissing bool) *_redactProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_redactProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_redactProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +func (s *_redactProcessor) PatternDefinitions(patterndefinitions map[string]string) *_redactProcessor { + + s.v.PatternDefinitions = patterndefinitions + return s +} + +func (s *_redactProcessor) AddPatternDefinition(key string, value string) *_redactProcessor { + + var tmp map[string]string + if s.v.PatternDefinitions == nil { + s.v.PatternDefinitions = make(map[string]string) + } else { + tmp = s.v.PatternDefinitions + } + + tmp[key] = value + + s.v.PatternDefinitions = tmp + return s +} + +// A list of grok expressions to match and redact named captures with +func (s *_redactProcessor) Patterns(patterns ...string) *_redactProcessor { + + for _, v := range patterns { + + s.v.Patterns = append(s.v.Patterns, v) + + } + return s +} + +// Start a redacted section with this token +func (s *_redactProcessor) Prefix(prefix string) *_redactProcessor { + + s.v.Prefix = &prefix + + return s +} + +// If `true` and the current license does not support running redact processors, +// then the processor quietly exits without modifying the document +func (s *_redactProcessor) SkipIfUnlicensed(skipifunlicensed bool) *_redactProcessor { + + s.v.SkipIfUnlicensed = &skipifunlicensed + + return s +} + +// End a redacted section with this token +func (s *_redactProcessor) Suffix(suffix string) *_redactProcessor { + + s.v.Suffix = &suffix + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_redactProcessor) Tag(tag string) *_redactProcessor { + + s.v.Tag = &tag + + return s +} + +// If `true` then ingest metadata `_ingest._redact._is_redacted` is set to +// `true` if the document has been redacted +func (s *_redactProcessor) TraceRedact(traceredact bool) *_redactProcessor { + + s.v.TraceRedact = &traceredact + + return s +} + +func (s *_redactProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Redact = s.v + + return container +} + +func (s *_redactProcessor) RedactProcessorCaster() *types.RedactProcessor { + return s.v +} diff --git a/typedapi/esdsl/regexoptions.go b/typedapi/esdsl/regexoptions.go new file mode 100644 index 0000000000..137d178f3c --- /dev/null +++ b/typedapi/esdsl/regexoptions.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _regexOptions struct { + v *types.RegexOptions +} + +func NewRegexOptions() *_regexOptions { + + return &_regexOptions{v: types.NewRegexOptions()} + +} + +// Optional operators for the regular expression. +func (s *_regexOptions) Flags(flags string) *_regexOptions { + + s.v.Flags = flags + + return s +} + +// Maximum number of automaton states required for the query. +func (s *_regexOptions) MaxDeterminizedStates(maxdeterminizedstates int) *_regexOptions { + + s.v.MaxDeterminizedStates = &maxdeterminizedstates + + return s +} + +func (s *_regexOptions) RegexOptionsCaster() *types.RegexOptions { + return s.v +} diff --git a/typedapi/esdsl/regexpquery.go b/typedapi/esdsl/regexpquery.go new file mode 100644 index 0000000000..ef3c01f801 --- /dev/null +++ b/typedapi/esdsl/regexpquery.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _regexpQuery struct { + k string + v *types.RegexpQuery +} + +// Returns documents that contain terms matching a regular expression. +func NewRegexpQuery(field string, value string) *_regexpQuery { + tmp := &_regexpQuery{ + k: field, + v: types.NewRegexpQuery(), + } + + tmp.Value(value) + return tmp +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_regexpQuery) Boost(boost float32) *_regexpQuery { + + s.v.Boost = &boost + + return s +} + +// Allows case insensitive matching of the regular expression value with the +// indexed field values when set to `true`. +// When `false`, case sensitivity of matching depends on the underlying field’s +// mapping. +func (s *_regexpQuery) CaseInsensitive(caseinsensitive bool) *_regexpQuery { + + s.v.CaseInsensitive = &caseinsensitive + + return s +} + +// Enables optional operators for the regular expression. +func (s *_regexpQuery) Flags(flags string) *_regexpQuery { + + s.v.Flags = &flags + + return s +} + +// Maximum number of automaton states required for the query. +func (s *_regexpQuery) MaxDeterminizedStates(maxdeterminizedstates int) *_regexpQuery { + + s.v.MaxDeterminizedStates = &maxdeterminizedstates + + return s +} + +func (s *_regexpQuery) QueryName_(queryname_ string) *_regexpQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Method used to rewrite the query. +func (s *_regexpQuery) Rewrite(multitermqueryrewrite string) *_regexpQuery { + + s.v.Rewrite = &multitermqueryrewrite + + return s +} + +// Regular expression for terms you wish to find in the provided field. +func (s *_regexpQuery) Value(value string) *_regexpQuery { + + s.v.Value = value + + return s +} + +func (s *_regexpQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.Regexp = map[string]types.RegexpQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleRegexpQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleRegexpQuery() *_regexpQuery { + return &_regexpQuery{ + k: "", + v: types.NewRegexpQuery(), + } +} + +func (s *_regexpQuery) RegexpQueryCaster() *types.RegexpQuery { + return s.v.RegexpQueryCaster() +} diff --git a/typedapi/esdsl/regexvalidation.go b/typedapi/esdsl/regexvalidation.go new file mode 100644 index 0000000000..5e2287fca1 --- /dev/null +++ b/typedapi/esdsl/regexvalidation.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _regexValidation struct { + v *types.RegexValidation +} + +func NewRegexValidation(constraint string) *_regexValidation { + + tmp := &_regexValidation{v: types.NewRegexValidation()} + + tmp.Constraint(constraint) + + return tmp + +} + +func (s *_regexValidation) Constraint(constraint string) *_regexValidation { + + s.v.Constraint = constraint + + return s +} + +func (s *_regexValidation) RegexValidationCaster() *types.RegexValidation { + return s.v +} diff --git a/typedapi/esdsl/registereddomainprocessor.go b/typedapi/esdsl/registereddomainprocessor.go new file mode 100644 index 0000000000..003a508e11 --- /dev/null +++ b/typedapi/esdsl/registereddomainprocessor.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _registeredDomainProcessor struct { + v *types.RegisteredDomainProcessor +} + +// Extracts the registered domain (also known as the effective top-level +// domain or eTLD), sub-domain, and top-level domain from a fully qualified +// domain name (FQDN). Uses the registered domains defined in the Mozilla +// Public Suffix List. +func NewRegisteredDomainProcessor() *_registeredDomainProcessor { + + return &_registeredDomainProcessor{v: types.NewRegisteredDomainProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_registeredDomainProcessor) Description(description string) *_registeredDomainProcessor { + + s.v.Description = &description + + return s +} + +// Field containing the source FQDN. +func (s *_registeredDomainProcessor) Field(field string) *_registeredDomainProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_registeredDomainProcessor) If(if_ types.ScriptVariant) *_registeredDomainProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_registeredDomainProcessor) IgnoreFailure(ignorefailure bool) *_registeredDomainProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If true and any required fields are missing, the processor quietly exits +// without modifying the document. +func (s *_registeredDomainProcessor) IgnoreMissing(ignoremissing bool) *_registeredDomainProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_registeredDomainProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_registeredDomainProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_registeredDomainProcessor) Tag(tag string) *_registeredDomainProcessor { + + s.v.Tag = &tag + + return s +} + +// Object field containing extracted domain components. If an empty string, +// the processor adds components to the document’s root. +func (s *_registeredDomainProcessor) TargetField(field string) *_registeredDomainProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_registeredDomainProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.RegisteredDomain = s.v + + return container +} + +func (s *_registeredDomainProcessor) RegisteredDomainProcessorCaster() *types.RegisteredDomainProcessor { + return s.v +} diff --git a/typedapi/esdsl/regressioninferenceoptions.go b/typedapi/esdsl/regressioninferenceoptions.go new file mode 100644 index 0000000000..aea077bbd8 --- /dev/null +++ b/typedapi/esdsl/regressioninferenceoptions.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _regressionInferenceOptions struct { + v *types.RegressionInferenceOptions +} + +// Regression configuration for inference. +func NewRegressionInferenceOptions() *_regressionInferenceOptions { + + return &_regressionInferenceOptions{v: types.NewRegressionInferenceOptions()} + +} + +// Specifies the maximum number of feature importance values per document. +func (s *_regressionInferenceOptions) NumTopFeatureImportanceValues(numtopfeatureimportancevalues int) *_regressionInferenceOptions { + + s.v.NumTopFeatureImportanceValues = &numtopfeatureimportancevalues + + return s +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_regressionInferenceOptions) ResultsField(field string) *_regressionInferenceOptions { + + s.v.ResultsField = &field + + return s +} + +func (s *_regressionInferenceOptions) InferenceConfigContainerCaster() *types.InferenceConfigContainer { + container := types.NewInferenceConfigContainer() + + container.Regression = s.v + + return container +} + +func (s *_regressionInferenceOptions) InferenceConfigCreateContainerCaster() *types.InferenceConfigCreateContainer { + container := types.NewInferenceConfigCreateContainer() + + container.Regression = s.v + + return container +} + +func (s *_regressionInferenceOptions) InferenceConfigUpdateContainerCaster() *types.InferenceConfigUpdateContainer { + container := types.NewInferenceConfigUpdateContainer() + + container.Regression = s.v + + return container +} + +func (s *_regressionInferenceOptions) RegressionInferenceOptionsCaster() *types.RegressionInferenceOptions { + return s.v +} diff --git a/typedapi/esdsl/reindexdestination.go b/typedapi/esdsl/reindexdestination.go new file mode 100644 index 0000000000..98f22e7c3f --- /dev/null +++ b/typedapi/esdsl/reindexdestination.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/optype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" +) + +type _reindexDestination struct { + v *types.ReindexDestination +} + +func NewReindexDestination() *_reindexDestination { + + return &_reindexDestination{v: types.NewReindexDestination()} + +} + +// The name of the data stream, index, or index alias you are copying to. +func (s *_reindexDestination) Index(indexname string) *_reindexDestination { + + s.v.Index = indexname + + return s +} + +// If it is `create`, the operation will only index documents that do not +// already exist (also known as "put if absent"). +// +// IMPORTANT: To reindex to a data stream destination, this argument must be +// `create`. +func (s *_reindexDestination) OpType(optype optype.OpType) *_reindexDestination { + + s.v.OpType = &optype + return s +} + +// The name of the pipeline to use. +func (s *_reindexDestination) Pipeline(pipeline string) *_reindexDestination { + + s.v.Pipeline = &pipeline + + return s +} + +// By default, a document's routing is preserved unless it's changed by the +// script. +// If it is `keep`, the routing on the bulk request sent for each match is set +// to the routing on the match. +// If it is `discard`, the routing on the bulk request sent for each match is +// set to `null`. +// If it is `=value`, the routing on the bulk request sent for each match is set +// to all value specified after the equals sign (`=`). +func (s *_reindexDestination) Routing(routing string) *_reindexDestination { + + s.v.Routing = &routing + + return s +} + +// The versioning to use for the indexing operation. +func (s *_reindexDestination) VersionType(versiontype versiontype.VersionType) *_reindexDestination { + + s.v.VersionType = &versiontype + return s +} + +func (s *_reindexDestination) ReindexDestinationCaster() *types.ReindexDestination { + return s.v +} diff --git a/typedapi/esdsl/reindexsource.go b/typedapi/esdsl/reindexsource.go new file mode 100644 index 0000000000..ddc56b66fa --- /dev/null +++ b/typedapi/esdsl/reindexsource.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _reindexSource struct { + v *types.ReindexSource +} + +func NewReindexSource() *_reindexSource { + + return &_reindexSource{v: types.NewReindexSource()} + +} + +// The name of the data stream, index, or alias you are copying from. +// It accepts a comma-separated list to reindex from multiple sources. +func (s *_reindexSource) Index(indices ...string) *_reindexSource { + + s.v.Index = indices + + return s +} + +// The documents to reindex, which is defined with Query DSL. +func (s *_reindexSource) Query(query types.QueryVariant) *_reindexSource { + + s.v.Query = query.QueryCaster() + + return s +} + +// A remote instance of Elasticsearch that you want to index from. +func (s *_reindexSource) Remote(remote types.RemoteSourceVariant) *_reindexSource { + + s.v.Remote = remote.RemoteSourceCaster() + + return s +} + +func (s *_reindexSource) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *_reindexSource { + + s.v.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() + + return s +} + +// The number of documents to index per batch. +// Use it when you are indexing from remote to ensure that the batches fit +// within the on-heap buffer, which defaults to a maximum size of 100 MB. +func (s *_reindexSource) Size(size int) *_reindexSource { + + s.v.Size = &size + + return s +} + +// Slice the reindex request manually using the provided slice ID and total +// number of slices. +func (s *_reindexSource) Slice(slice types.SlicedScrollVariant) *_reindexSource { + + s.v.Slice = slice.SlicedScrollCaster() + + return s +} + +// A comma-separated list of `:` pairs to sort by before +// indexing. +// Use it in conjunction with `max_docs` to control what documents are +// reindexed. +// +// WARNING: Sort in reindex is deprecated. +// Sorting in reindex was never guaranteed to index documents in order and +// prevents further development of reindex such as resilience and performance +// improvements. +// If used in combination with `max_docs`, consider using a query filter +// instead. +func (s *_reindexSource) Sort(sorts ...types.SortCombinationsVariant) *_reindexSource { + + for _, v := range sorts { + s.v.Sort = append(s.v.Sort, *v.SortCombinationsCaster()) + } + + return s +} + +// If `true`, reindex all source fields. +// Set it to a list to reindex select fields. +func (s *_reindexSource) SourceFields_(fields ...string) *_reindexSource { + + s.v.SourceFields_ = fields + + return s +} + +func (s *_reindexSource) ReindexSourceCaster() *types.ReindexSource { + return s.v +} diff --git a/typedapi/esdsl/remoteclusterprivileges.go b/typedapi/esdsl/remoteclusterprivileges.go new file mode 100644 index 0000000000..489afe197e --- /dev/null +++ b/typedapi/esdsl/remoteclusterprivileges.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/remoteclusterprivilege" +) + +type _remoteClusterPrivileges struct { + v *types.RemoteClusterPrivileges +} + +func NewRemoteClusterPrivileges() *_remoteClusterPrivileges { + + return &_remoteClusterPrivileges{v: types.NewRemoteClusterPrivileges()} + +} + +// A list of cluster aliases to which the permissions in this entry apply. +func (s *_remoteClusterPrivileges) Clusters(names ...string) *_remoteClusterPrivileges { + + s.v.Clusters = names + + return s +} + +// The cluster level privileges that owners of the role have on the remote +// cluster. +func (s *_remoteClusterPrivileges) Privileges(privileges ...remoteclusterprivilege.RemoteClusterPrivilege) *_remoteClusterPrivileges { + + for _, v := range privileges { + + s.v.Privileges = append(s.v.Privileges, v) + + } + return s +} + +func (s *_remoteClusterPrivileges) RemoteClusterPrivilegesCaster() *types.RemoteClusterPrivileges { + return s.v +} diff --git a/typedapi/esdsl/remoteindicesprivileges.go b/typedapi/esdsl/remoteindicesprivileges.go new file mode 100644 index 0000000000..ddf3441f8d --- /dev/null +++ b/typedapi/esdsl/remoteindicesprivileges.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexprivilege" +) + +type _remoteIndicesPrivileges struct { + v *types.RemoteIndicesPrivileges +} + +func NewRemoteIndicesPrivileges() *_remoteIndicesPrivileges { + + return &_remoteIndicesPrivileges{v: types.NewRemoteIndicesPrivileges()} + +} + +// Set to `true` if using wildcard or regular expressions for patterns that +// cover restricted indices. Implicitly, restricted indices have limited +// privileges that can cause pattern tests to fail. If restricted indices are +// explicitly included in the `names` list, Elasticsearch checks privileges +// against these indices regardless of the value set for +// `allow_restricted_indices`. +func (s *_remoteIndicesPrivileges) AllowRestrictedIndices(allowrestrictedindices bool) *_remoteIndicesPrivileges { + + s.v.AllowRestrictedIndices = &allowrestrictedindices + + return s +} + +// A list of cluster aliases to which the permissions in this entry apply. +func (s *_remoteIndicesPrivileges) Clusters(names ...string) *_remoteIndicesPrivileges { + + s.v.Clusters = names + + return s +} + +// The document fields that the owners of the role have read access to. +func (s *_remoteIndicesPrivileges) FieldSecurity(fieldsecurity types.FieldSecurityVariant) *_remoteIndicesPrivileges { + + s.v.FieldSecurity = fieldsecurity.FieldSecurityCaster() + + return s +} + +// A list of indices (or index name patterns) to which the permissions in this +// entry apply. +func (s *_remoteIndicesPrivileges) Names(names ...string) *_remoteIndicesPrivileges { + + s.v.Names = make([]string, len(names)) + s.v.Names = names + + return s +} + +// The index level privileges that owners of the role have on the specified +// indices. +func (s *_remoteIndicesPrivileges) Privileges(privileges ...indexprivilege.IndexPrivilege) *_remoteIndicesPrivileges { + + for _, v := range privileges { + + s.v.Privileges = append(s.v.Privileges, v) + + } + return s +} + +// A search query that defines the documents the owners of the role have access +// to. A document within the specified indices must match this query for it to +// be accessible by the owners of the role. +func (s *_remoteIndicesPrivileges) Query(indicesprivilegesquery types.IndicesPrivilegesQueryVariant) *_remoteIndicesPrivileges { + + s.v.Query = *indicesprivilegesquery.IndicesPrivilegesQueryCaster() + + return s +} + +func (s *_remoteIndicesPrivileges) RemoteIndicesPrivilegesCaster() *types.RemoteIndicesPrivileges { + return s.v +} diff --git a/typedapi/esdsl/remotesource.go b/typedapi/esdsl/remotesource.go new file mode 100644 index 0000000000..8e2182a35f --- /dev/null +++ b/typedapi/esdsl/remotesource.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _remoteSource struct { + v *types.RemoteSource +} + +func NewRemoteSource() *_remoteSource { + + return &_remoteSource{v: types.NewRemoteSource()} + +} + +// The remote connection timeout. +func (s *_remoteSource) ConnectTimeout(duration types.DurationVariant) *_remoteSource { + + s.v.ConnectTimeout = *duration.DurationCaster() + + return s +} + +// An object containing the headers of the request. +func (s *_remoteSource) Headers(headers map[string]string) *_remoteSource { + + s.v.Headers = headers + return s +} + +func (s *_remoteSource) AddHeader(key string, value string) *_remoteSource { + + var tmp map[string]string + if s.v.Headers == nil { + s.v.Headers = make(map[string]string) + } else { + tmp = s.v.Headers + } + + tmp[key] = value + + s.v.Headers = tmp + return s +} + +// The URL for the remote instance of Elasticsearch that you want to index from. +// This information is required when you're indexing from remote. +func (s *_remoteSource) Host(host string) *_remoteSource { + + s.v.Host = host + + return s +} + +// The password to use for authentication with the remote host. +func (s *_remoteSource) Password(password string) *_remoteSource { + + s.v.Password = &password + + return s +} + +// The remote socket read timeout. +func (s *_remoteSource) SocketTimeout(duration types.DurationVariant) *_remoteSource { + + s.v.SocketTimeout = *duration.DurationCaster() + + return s +} + +// The username to use for authentication with the remote host. +func (s *_remoteSource) Username(username string) *_remoteSource { + + s.v.Username = &username + + return s +} + +func (s *_remoteSource) RemoteSourceCaster() *types.RemoteSource { + return s.v +} diff --git a/typedapi/esdsl/removeaction.go b/typedapi/esdsl/removeaction.go new file mode 100644 index 0000000000..6efe499615 --- /dev/null +++ b/typedapi/esdsl/removeaction.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _removeAction struct { + v *types.RemoveAction +} + +// Removes a data stream or index from an alias. +func NewRemoveAction() *_removeAction { + + return &_removeAction{v: types.NewRemoveAction()} + +} + +// Alias for the action. +// Index alias names support date math. +func (s *_removeAction) Alias(indexalias string) *_removeAction { + + s.v.Alias = &indexalias + + return s +} + +// Aliases for the action. +// Index alias names support date math. +func (s *_removeAction) Aliases(aliases ...string) *_removeAction { + + s.v.Aliases = make([]string, len(aliases)) + s.v.Aliases = aliases + + return s +} + +// Data stream or index for the action. +// Supports wildcards (`*`). +func (s *_removeAction) Index(indexname string) *_removeAction { + + s.v.Index = &indexname + + return s +} + +// Data streams or indices for the action. +// Supports wildcards (`*`). +func (s *_removeAction) Indices(indices ...string) *_removeAction { + + s.v.Indices = indices + + return s +} + +// If `true`, the alias must exist to perform the action. +func (s *_removeAction) MustExist(mustexist bool) *_removeAction { + + s.v.MustExist = &mustexist + + return s +} + +func (s *_removeAction) IndicesActionCaster() *types.IndicesAction { + container := types.NewIndicesAction() + + container.Remove = s.v + + return container +} + +func (s *_removeAction) RemoveActionCaster() *types.RemoveAction { + return s.v +} diff --git a/typedapi/esdsl/removeduplicatestokenfilter.go b/typedapi/esdsl/removeduplicatestokenfilter.go new file mode 100644 index 0000000000..faeaaa6971 --- /dev/null +++ b/typedapi/esdsl/removeduplicatestokenfilter.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _removeDuplicatesTokenFilter struct { + v *types.RemoveDuplicatesTokenFilter +} + +func NewRemoveDuplicatesTokenFilter() *_removeDuplicatesTokenFilter { + + return &_removeDuplicatesTokenFilter{v: types.NewRemoveDuplicatesTokenFilter()} + +} + +func (s *_removeDuplicatesTokenFilter) Version(versionstring string) *_removeDuplicatesTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_removeDuplicatesTokenFilter) RemoveDuplicatesTokenFilterCaster() *types.RemoveDuplicatesTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/removeindexaction.go b/typedapi/esdsl/removeindexaction.go new file mode 100644 index 0000000000..cf21f9a732 --- /dev/null +++ b/typedapi/esdsl/removeindexaction.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _removeIndexAction struct { + v *types.RemoveIndexAction +} + +// Deletes an index. +// You cannot use this action on aliases or data streams. +func NewRemoveIndexAction() *_removeIndexAction { + + return &_removeIndexAction{v: types.NewRemoveIndexAction()} + +} + +// Data stream or index for the action. +// Supports wildcards (`*`). +func (s *_removeIndexAction) Index(indexname string) *_removeIndexAction { + + s.v.Index = &indexname + + return s +} + +// Data streams or indices for the action. +// Supports wildcards (`*`). +func (s *_removeIndexAction) Indices(indices ...string) *_removeIndexAction { + + s.v.Indices = indices + + return s +} + +// If `true`, the alias must exist to perform the action. +func (s *_removeIndexAction) MustExist(mustexist bool) *_removeIndexAction { + + s.v.MustExist = &mustexist + + return s +} + +func (s *_removeIndexAction) IndicesActionCaster() *types.IndicesAction { + container := types.NewIndicesAction() + + container.RemoveIndex = s.v + + return container +} + +func (s *_removeIndexAction) RemoveIndexActionCaster() *types.RemoveIndexAction { + return s.v +} diff --git a/typedapi/esdsl/removeprocessor.go b/typedapi/esdsl/removeprocessor.go new file mode 100644 index 0000000000..ac4d09fd1e --- /dev/null +++ b/typedapi/esdsl/removeprocessor.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _removeProcessor struct { + v *types.RemoveProcessor +} + +// Removes existing fields. +// If one field doesn’t exist, an exception will be thrown. +func NewRemoveProcessor() *_removeProcessor { + + return &_removeProcessor{v: types.NewRemoveProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_removeProcessor) Description(description string) *_removeProcessor { + + s.v.Description = &description + + return s +} + +// Fields to be removed. Supports template snippets. +func (s *_removeProcessor) Field(fields ...string) *_removeProcessor { + + s.v.Field = fields + + return s +} + +// Conditionally execute the processor. +func (s *_removeProcessor) If(if_ types.ScriptVariant) *_removeProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_removeProcessor) IgnoreFailure(ignorefailure bool) *_removeProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist or is `null`, the processor quietly +// exits without modifying the document. +func (s *_removeProcessor) IgnoreMissing(ignoremissing bool) *_removeProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Fields to be kept. When set, all fields other than those specified are +// removed. +func (s *_removeProcessor) Keep(fields ...string) *_removeProcessor { + + s.v.Keep = fields + + return s +} + +// Handle failures for the processor. +func (s *_removeProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_removeProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_removeProcessor) Tag(tag string) *_removeProcessor { + + s.v.Tag = &tag + + return s +} + +func (s *_removeProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Remove = s.v + + return container +} + +func (s *_removeProcessor) RemoveProcessorCaster() *types.RemoveProcessor { + return s.v +} diff --git a/typedapi/esdsl/renameprocessor.go b/typedapi/esdsl/renameprocessor.go new file mode 100644 index 0000000000..6d18c1a1dd --- /dev/null +++ b/typedapi/esdsl/renameprocessor.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _renameProcessor struct { + v *types.RenameProcessor +} + +// Renames an existing field. +// If the field doesn’t exist or the new name is already used, an exception will +// be thrown. +func NewRenameProcessor() *_renameProcessor { + + return &_renameProcessor{v: types.NewRenameProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_renameProcessor) Description(description string) *_renameProcessor { + + s.v.Description = &description + + return s +} + +// The field to be renamed. +// Supports template snippets. +func (s *_renameProcessor) Field(field string) *_renameProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_renameProcessor) If(if_ types.ScriptVariant) *_renameProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_renameProcessor) IgnoreFailure(ignorefailure bool) *_renameProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist, the processor quietly exits without +// modifying the document. +func (s *_renameProcessor) IgnoreMissing(ignoremissing bool) *_renameProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_renameProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_renameProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_renameProcessor) Tag(tag string) *_renameProcessor { + + s.v.Tag = &tag + + return s +} + +// The new name of the field. +// Supports template snippets. +func (s *_renameProcessor) TargetField(field string) *_renameProcessor { + + s.v.TargetField = field + + return s +} + +func (s *_renameProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Rename = s.v + + return container +} + +func (s *_renameProcessor) RenameProcessorCaster() *types.RenameProcessor { + return s.v +} diff --git a/typedapi/esdsl/replicationaccess.go b/typedapi/esdsl/replicationaccess.go new file mode 100644 index 0000000000..d7cb9839bb --- /dev/null +++ b/typedapi/esdsl/replicationaccess.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _replicationAccess struct { + v *types.ReplicationAccess +} + +func NewReplicationAccess() *_replicationAccess { + + return &_replicationAccess{v: types.NewReplicationAccess()} + +} + +// This needs to be set to true if the patterns in the names field should cover +// system indices. +func (s *_replicationAccess) AllowRestrictedIndices(allowrestrictedindices bool) *_replicationAccess { + + s.v.AllowRestrictedIndices = &allowrestrictedindices + + return s +} + +// A list of indices (or index name patterns) to which the permissions in this +// entry apply. +func (s *_replicationAccess) Names(names ...string) *_replicationAccess { + + s.v.Names = make([]string, len(names)) + s.v.Names = names + + return s +} + +func (s *_replicationAccess) ReplicationAccessCaster() *types.ReplicationAccess { + return s.v +} diff --git a/typedapi/esdsl/reportingemailattachment.go b/typedapi/esdsl/reportingemailattachment.go new file mode 100644 index 0000000000..a22f836586 --- /dev/null +++ b/typedapi/esdsl/reportingemailattachment.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _reportingEmailAttachment struct { + v *types.ReportingEmailAttachment +} + +func NewReportingEmailAttachment(url string) *_reportingEmailAttachment { + + tmp := &_reportingEmailAttachment{v: types.NewReportingEmailAttachment()} + + tmp.Url(url) + + return tmp + +} + +func (s *_reportingEmailAttachment) Inline(inline bool) *_reportingEmailAttachment { + + s.v.Inline = &inline + + return s +} + +func (s *_reportingEmailAttachment) Interval(duration types.DurationVariant) *_reportingEmailAttachment { + + s.v.Interval = *duration.DurationCaster() + + return s +} + +func (s *_reportingEmailAttachment) Request(request types.HttpInputRequestDefinitionVariant) *_reportingEmailAttachment { + + s.v.Request = request.HttpInputRequestDefinitionCaster() + + return s +} + +func (s *_reportingEmailAttachment) Retries(retries int) *_reportingEmailAttachment { + + s.v.Retries = &retries + + return s +} + +func (s *_reportingEmailAttachment) Url(url string) *_reportingEmailAttachment { + + s.v.Url = url + + return s +} + +func (s *_reportingEmailAttachment) EmailAttachmentContainerCaster() *types.EmailAttachmentContainer { + container := types.NewEmailAttachmentContainer() + + container.Reporting = s.v + + return container +} + +func (s *_reportingEmailAttachment) ReportingEmailAttachmentCaster() *types.ReportingEmailAttachment { + return s.v +} diff --git a/typedapi/esdsl/repository.go b/typedapi/esdsl/repository.go new file mode 100644 index 0000000000..c14e4c7365 --- /dev/null +++ b/typedapi/esdsl/repository.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _repository struct { + v types.Repository +} + +func NewRepository() *_repository { + return &_repository{v: nil} +} + +func (u *_repository) AzureRepository(azurerepository types.AzureRepositoryVariant) *_repository { + + u.v = &azurerepository + + return u +} + +// Interface implementation for AzureRepository in Repository union +func (u *_azureRepository) RepositoryCaster() *types.Repository { + t := types.Repository(u.v) + return &t +} + +func (u *_repository) GcsRepository(gcsrepository types.GcsRepositoryVariant) *_repository { + + u.v = &gcsrepository + + return u +} + +// Interface implementation for GcsRepository in Repository union +func (u *_gcsRepository) RepositoryCaster() *types.Repository { + t := types.Repository(u.v) + return &t +} + +func (u *_repository) S3Repository(s3repository types.S3RepositoryVariant) *_repository { + + u.v = &s3repository + + return u +} + +// Interface implementation for S3Repository in Repository union +func (u *_s3Repository) RepositoryCaster() *types.Repository { + t := types.Repository(u.v) + return &t +} + +func (u *_repository) SharedFileSystemRepository(sharedfilesystemrepository types.SharedFileSystemRepositoryVariant) *_repository { + + u.v = &sharedfilesystemrepository + + return u +} + +// Interface implementation for SharedFileSystemRepository in Repository union +func (u *_sharedFileSystemRepository) RepositoryCaster() *types.Repository { + t := types.Repository(u.v) + return &t +} + +func (u *_repository) ReadOnlyUrlRepository(readonlyurlrepository types.ReadOnlyUrlRepositoryVariant) *_repository { + + u.v = &readonlyurlrepository + + return u +} + +// Interface implementation for ReadOnlyUrlRepository in Repository union +func (u *_readOnlyUrlRepository) RepositoryCaster() *types.Repository { + t := types.Repository(u.v) + return &t +} + +func (u *_repository) SourceOnlyRepository(sourceonlyrepository types.SourceOnlyRepositoryVariant) *_repository { + + u.v = &sourceonlyrepository + + return u +} + +// Interface implementation for SourceOnlyRepository in Repository union +func (u *_sourceOnlyRepository) RepositoryCaster() *types.Repository { + t := types.Repository(u.v) + return &t +} + +func (u *_repository) RepositoryCaster() *types.Repository { + return &u.v +} diff --git a/typedapi/esdsl/requestitem.go b/typedapi/esdsl/requestitem.go new file mode 100644 index 0000000000..e26fadcd3d --- /dev/null +++ b/typedapi/esdsl/requestitem.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _requestItem struct { + v types.RequestItem +} + +func NewRequestItem() *_requestItem { + return &_requestItem{v: nil} +} + +func (u *_requestItem) MultisearchHeader(multisearchheader types.MultisearchHeaderVariant) *_requestItem { + + u.v = &multisearchheader + + return u +} + +// Interface implementation for MultisearchHeader in RequestItem union +func (u *_multisearchHeader) RequestItemCaster() *types.RequestItem { + t := types.RequestItem(u.v) + return &t +} + +func (u *_requestItem) TemplateConfig(templateconfig types.TemplateConfigVariant) *_requestItem { + + u.v = &templateconfig + + return u +} + +// Interface implementation for TemplateConfig in RequestItem union +func (u *_templateConfig) RequestItemCaster() *types.RequestItem { + t := types.RequestItem(u.v) + return &t +} + +func (u *_requestItem) RequestItemCaster() *types.RequestItem { + return &u.v +} diff --git a/typedapi/esdsl/rerouteprocessor.go b/typedapi/esdsl/rerouteprocessor.go new file mode 100644 index 0000000000..4ec8d1abdb --- /dev/null +++ b/typedapi/esdsl/rerouteprocessor.go @@ -0,0 +1,147 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rerouteProcessor struct { + v *types.RerouteProcessor +} + +// Routes a document to another target index or data stream. +// When setting the `destination` option, the target is explicitly specified and +// the dataset and namespace options can’t be set. +// When the `destination` option is not set, this processor is in a data stream +// mode. Note that in this mode, the reroute processor can only be used on data +// streams that follow the data stream naming scheme. +func NewRerouteProcessor() *_rerouteProcessor { + + return &_rerouteProcessor{v: types.NewRerouteProcessor()} + +} + +// Field references or a static value for the dataset part of the data stream +// name. +// In addition to the criteria for index names, cannot contain - and must be no +// longer than 100 characters. +// Example values are nginx.access and nginx.error. +// +// Supports field references with a mustache-like syntax (denoted as {{double}} +// or {{{triple}}} curly braces). +// When resolving field references, the processor replaces invalid characters +// with _. Uses the part +// of the index name as a fallback if all field references resolve to a null, +// missing, or non-string value. +// +// default {{data_stream.dataset}} +func (s *_rerouteProcessor) Dataset(datasets ...string) *_rerouteProcessor { + + s.v.Dataset = make([]string, len(datasets)) + s.v.Dataset = datasets + + return s +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_rerouteProcessor) Description(description string) *_rerouteProcessor { + + s.v.Description = &description + + return s +} + +// A static value for the target. Can’t be set when the dataset or namespace +// option is set. +func (s *_rerouteProcessor) Destination(destination string) *_rerouteProcessor { + + s.v.Destination = &destination + + return s +} + +// Conditionally execute the processor. +func (s *_rerouteProcessor) If(if_ types.ScriptVariant) *_rerouteProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_rerouteProcessor) IgnoreFailure(ignorefailure bool) *_rerouteProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// Field references or a static value for the namespace part of the data stream +// name. See the criteria for +// index names for allowed characters. Must be no longer than 100 characters. +// +// Supports field references with a mustache-like syntax (denoted as {{double}} +// or {{{triple}}} curly braces). +// When resolving field references, the processor replaces invalid characters +// with _. Uses the part +// of the index name as a fallback if all field references resolve to a null, +// missing, or non-string value. +// +// default {{data_stream.namespace}} +func (s *_rerouteProcessor) Namespace(namespaces ...string) *_rerouteProcessor { + + s.v.Namespace = make([]string, len(namespaces)) + s.v.Namespace = namespaces + + return s +} + +// Handle failures for the processor. +func (s *_rerouteProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_rerouteProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_rerouteProcessor) Tag(tag string) *_rerouteProcessor { + + s.v.Tag = &tag + + return s +} + +func (s *_rerouteProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Reroute = s.v + + return container +} + +func (s *_rerouteProcessor) RerouteProcessorCaster() *types.RerouteProcessor { + return s.v +} diff --git a/typedapi/esdsl/rescore.go b/typedapi/esdsl/rescore.go new file mode 100644 index 0000000000..985a7fcafd --- /dev/null +++ b/typedapi/esdsl/rescore.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _rescore struct { + v *types.Rescore +} + +func NewRescore() *_rescore { + return &_rescore{v: types.NewRescore()} +} + +// AdditionalRescoreProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_rescore) AdditionalRescoreProperty(key string, value json.RawMessage) *_rescore { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalRescoreProperty = tmp + return s +} + +func (s *_rescore) LearningToRank(learningtorank types.LearningToRankVariant) *_rescore { + + s.v.LearningToRank = learningtorank.LearningToRankCaster() + + return s +} + +func (s *_rescore) Query(query types.RescoreQueryVariant) *_rescore { + + s.v.Query = query.RescoreQueryCaster() + + return s +} + +func (s *_rescore) WindowSize(windowsize int) *_rescore { + + s.v.WindowSize = &windowsize + + return s +} + +func (s *_rescore) RescoreCaster() *types.Rescore { + return s.v +} diff --git a/typedapi/esdsl/rescorequery.go b/typedapi/esdsl/rescorequery.go new file mode 100644 index 0000000000..053f501794 --- /dev/null +++ b/typedapi/esdsl/rescorequery.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scoremode" +) + +type _rescoreQuery struct { + v *types.RescoreQuery +} + +func NewRescoreQuery(query types.QueryVariant) *_rescoreQuery { + + tmp := &_rescoreQuery{v: types.NewRescoreQuery()} + + tmp.Query(query) + + return tmp + +} + +// The query to use for rescoring. +// This query is only run on the Top-K results returned by the `query` and +// `post_filter` phases. +func (s *_rescoreQuery) Query(query types.QueryVariant) *_rescoreQuery { + + s.v.Query = *query.QueryCaster() + + return s +} + +// Relative importance of the original query versus the rescore query. +func (s *_rescoreQuery) QueryWeight(queryweight types.Float64) *_rescoreQuery { + + s.v.QueryWeight = &queryweight + + return s +} + +// Relative importance of the rescore query versus the original query. +func (s *_rescoreQuery) RescoreQueryWeight(rescorequeryweight types.Float64) *_rescoreQuery { + + s.v.RescoreQueryWeight = &rescorequeryweight + + return s +} + +// Determines how scores are combined. +func (s *_rescoreQuery) ScoreMode(scoremode scoremode.ScoreMode) *_rescoreQuery { + + s.v.ScoreMode = &scoremode + return s +} + +func (s *_rescoreQuery) RescoreCaster() *types.Rescore { + container := types.NewRescore() + + container.Query = s.v + + return container +} + +func (s *_rescoreQuery) RescoreQueryCaster() *types.RescoreQuery { + return s.v +} diff --git a/typedapi/esdsl/rescorevector.go b/typedapi/esdsl/rescorevector.go new file mode 100644 index 0000000000..795c4a4412 --- /dev/null +++ b/typedapi/esdsl/rescorevector.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rescoreVector struct { + v *types.RescoreVector +} + +func NewRescoreVector(oversample float32) *_rescoreVector { + + tmp := &_rescoreVector{v: types.NewRescoreVector()} + + tmp.Oversample(oversample) + + return tmp + +} + +// Applies the specified oversample factor to k on the approximate kNN search +func (s *_rescoreVector) Oversample(oversample float32) *_rescoreVector { + + s.v.Oversample = oversample + + return s +} + +func (s *_rescoreVector) RescoreVectorCaster() *types.RescoreVector { + return s.v +} diff --git a/typedapi/types/synonymsupdateresult.go b/typedapi/esdsl/restriction.go similarity index 54% rename from typedapi/types/synonymsupdateresult.go rename to typedapi/esdsl/restriction.go index 3b68e16dbf..6091812b6c 100644 --- a/typedapi/types/synonymsupdateresult.go +++ b/typedapi/esdsl/restriction.go @@ -16,28 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -package types +package esdsl import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/result" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/restrictionworkflow" ) -// SynonymsUpdateResult type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/synonyms/_types/SynonymsUpdateResult.ts#L23-L34 -type SynonymsUpdateResult struct { - // ReloadAnalyzersDetails Updating synonyms in a synonym set reloads the associated analyzers. - // This is the analyzers reloading result - ReloadAnalyzersDetails ReloadResult `json:"reload_analyzers_details"` - // Result Update operation result - Result result.Result `json:"result"` +type _restriction struct { + v *types.Restriction +} + +func NewRestriction() *_restriction { + + return &_restriction{v: types.NewRestriction()} + } -// NewSynonymsUpdateResult returns a SynonymsUpdateResult. -func NewSynonymsUpdateResult() *SynonymsUpdateResult { - r := &SynonymsUpdateResult{} +// A list of workflows to which the API key is restricted. +// NOTE: In order to use a role restriction, an API key must be created with a +// single role descriptor. +func (s *_restriction) Workflows(workflows ...restrictionworkflow.RestrictionWorkflow) *_restriction { + + for _, v := range workflows { + + s.v.Workflows = append(s.v.Workflows, v) + + } + return s +} - return r +func (s *_restriction) RestrictionCaster() *types.Restriction { + return s.v } diff --git a/typedapi/esdsl/retention.go b/typedapi/esdsl/retention.go new file mode 100644 index 0000000000..69e31b05b5 --- /dev/null +++ b/typedapi/esdsl/retention.go @@ -0,0 +1,70 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _retention struct { + v *types.Retention +} + +func NewRetention(maxcount int, mincount int) *_retention { + + tmp := &_retention{v: types.NewRetention()} + + tmp.MaxCount(maxcount) + + tmp.MinCount(mincount) + + return tmp + +} + +// Time period after which a snapshot is considered expired and eligible for +// deletion. SLM deletes expired snapshots based on the slm.retention_schedule. +func (s *_retention) ExpireAfter(duration types.DurationVariant) *_retention { + + s.v.ExpireAfter = *duration.DurationCaster() + + return s +} + +// Maximum number of snapshots to retain, even if the snapshots have not yet +// expired. If the number of snapshots in the repository exceeds this limit, the +// policy retains the most recent snapshots and deletes older snapshots. +func (s *_retention) MaxCount(maxcount int) *_retention { + + s.v.MaxCount = maxcount + + return s +} + +// Minimum number of snapshots to retain, even if the snapshots have expired. +func (s *_retention) MinCount(mincount int) *_retention { + + s.v.MinCount = mincount + + return s +} + +func (s *_retention) RetentionCaster() *types.Retention { + return s.v +} diff --git a/typedapi/esdsl/retentionlease.go b/typedapi/esdsl/retentionlease.go new file mode 100644 index 0000000000..3aafb67082 --- /dev/null +++ b/typedapi/esdsl/retentionlease.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _retentionLease struct { + v *types.RetentionLease +} + +func NewRetentionLease() *_retentionLease { + + return &_retentionLease{v: types.NewRetentionLease()} + +} + +func (s *_retentionLease) Period(duration types.DurationVariant) *_retentionLease { + + s.v.Period = *duration.DurationCaster() + + return s +} + +func (s *_retentionLease) RetentionLeaseCaster() *types.RetentionLease { + return s.v +} diff --git a/typedapi/esdsl/retentionpolicy.go b/typedapi/esdsl/retentionpolicy.go new file mode 100644 index 0000000000..3089653c1d --- /dev/null +++ b/typedapi/esdsl/retentionpolicy.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _retentionPolicy struct { + v *types.RetentionPolicy +} + +// Specifies that the transform uses a time field to set the retention policy. +func NewRetentionPolicy() *_retentionPolicy { + + return &_retentionPolicy{v: types.NewRetentionPolicy()} + +} + +// The date field that is used to calculate the age of the document. +func (s *_retentionPolicy) Field(field string) *_retentionPolicy { + + s.v.Field = field + + return s +} + +// Specifies the maximum age of a document in the destination index. Documents +// that are older than the configured +// value are removed from the destination index. +func (s *_retentionPolicy) MaxAge(duration types.DurationVariant) *_retentionPolicy { + + s.v.MaxAge = *duration.DurationCaster() + + return s +} + +func (s *_retentionPolicy) RetentionPolicyContainerCaster() *types.RetentionPolicyContainer { + container := types.NewRetentionPolicyContainer() + + container.Time = s.v + + return container +} + +func (s *_retentionPolicy) RetentionPolicyCaster() *types.RetentionPolicy { + return s.v +} diff --git a/typedapi/esdsl/retentionpolicycontainer.go b/typedapi/esdsl/retentionpolicycontainer.go new file mode 100644 index 0000000000..72dd612a19 --- /dev/null +++ b/typedapi/esdsl/retentionpolicycontainer.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _retentionPolicyContainer struct { + v *types.RetentionPolicyContainer +} + +func NewRetentionPolicyContainer() *_retentionPolicyContainer { + return &_retentionPolicyContainer{v: types.NewRetentionPolicyContainer()} +} + +// AdditionalRetentionPolicyContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_retentionPolicyContainer) AdditionalRetentionPolicyContainerProperty(key string, value json.RawMessage) *_retentionPolicyContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalRetentionPolicyContainerProperty = tmp + return s +} + +// Specifies that the transform uses a time field to set the retention policy. +func (s *_retentionPolicyContainer) Time(time types.RetentionPolicyVariant) *_retentionPolicyContainer { + + s.v.Time = time.RetentionPolicyCaster() + + return s +} + +func (s *_retentionPolicyContainer) RetentionPolicyContainerCaster() *types.RetentionPolicyContainer { + return s.v +} diff --git a/typedapi/esdsl/retrievercontainer.go b/typedapi/esdsl/retrievercontainer.go new file mode 100644 index 0000000000..29ba800565 --- /dev/null +++ b/typedapi/esdsl/retrievercontainer.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _retrieverContainer struct { + v *types.RetrieverContainer +} + +func NewRetrieverContainer() *_retrieverContainer { + return &_retrieverContainer{v: types.NewRetrieverContainer()} +} + +// AdditionalRetrieverContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_retrieverContainer) AdditionalRetrieverContainerProperty(key string, value json.RawMessage) *_retrieverContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalRetrieverContainerProperty = tmp + return s +} + +// A retriever that replaces the functionality of a knn search. +func (s *_retrieverContainer) Knn(knn types.KnnRetrieverVariant) *_retrieverContainer { + + s.v.Knn = knn.KnnRetrieverCaster() + + return s +} + +// A retriever that produces top documents from reciprocal rank fusion (RRF). +func (s *_retrieverContainer) Rrf(rrf types.RRFRetrieverVariant) *_retrieverContainer { + + s.v.Rrf = rrf.RRFRetrieverCaster() + + return s +} + +// A retriever that replaces the functionality of a rule query. +func (s *_retrieverContainer) Rule(rule types.RuleRetrieverVariant) *_retrieverContainer { + + s.v.Rule = rule.RuleRetrieverCaster() + + return s +} + +// A retriever that replaces the functionality of a traditional query. +func (s *_retrieverContainer) Standard(standard types.StandardRetrieverVariant) *_retrieverContainer { + + s.v.Standard = standard.StandardRetrieverCaster() + + return s +} + +// A retriever that reranks the top documents based on a reranking model using +// the InferenceAPI +func (s *_retrieverContainer) TextSimilarityReranker(textsimilarityreranker types.TextSimilarityRerankerVariant) *_retrieverContainer { + + s.v.TextSimilarityReranker = textsimilarityreranker.TextSimilarityRerankerCaster() + + return s +} + +func (s *_retrieverContainer) RetrieverContainerCaster() *types.RetrieverContainer { + return s.v +} diff --git a/typedapi/esdsl/reversenestedaggregation.go b/typedapi/esdsl/reversenestedaggregation.go new file mode 100644 index 0000000000..67fad9e9d8 --- /dev/null +++ b/typedapi/esdsl/reversenestedaggregation.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _reverseNestedAggregation struct { + v *types.ReverseNestedAggregation +} + +// A special single bucket aggregation that enables aggregating on parent +// documents from nested documents. +// Should only be defined inside a `nested` aggregation. +func NewReverseNestedAggregation() *_reverseNestedAggregation { + + return &_reverseNestedAggregation{v: types.NewReverseNestedAggregation()} + +} + +// Defines the nested object field that should be joined back to. +// The default is empty, which means that it joins back to the root/main +// document level. +func (s *_reverseNestedAggregation) Path(field string) *_reverseNestedAggregation { + + s.v.Path = &field + + return s +} + +func (s *_reverseNestedAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.ReverseNested = s.v + + return container +} + +func (s *_reverseNestedAggregation) ReverseNestedAggregationCaster() *types.ReverseNestedAggregation { + return s.v +} diff --git a/typedapi/esdsl/reversetokenfilter.go b/typedapi/esdsl/reversetokenfilter.go new file mode 100644 index 0000000000..7ab9e3dcd5 --- /dev/null +++ b/typedapi/esdsl/reversetokenfilter.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _reverseTokenFilter struct { + v *types.ReverseTokenFilter +} + +func NewReverseTokenFilter() *_reverseTokenFilter { + + return &_reverseTokenFilter{v: types.NewReverseTokenFilter()} + +} + +func (s *_reverseTokenFilter) Version(versionstring string) *_reverseTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_reverseTokenFilter) ReverseTokenFilterCaster() *types.ReverseTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/roledescriptor.go b/typedapi/esdsl/roledescriptor.go new file mode 100644 index 0000000000..9cb1461329 --- /dev/null +++ b/typedapi/esdsl/roledescriptor.go @@ -0,0 +1,172 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterprivilege" +) + +type _roleDescriptor struct { + v *types.RoleDescriptor +} + +func NewRoleDescriptor() *_roleDescriptor { + + return &_roleDescriptor{v: types.NewRoleDescriptor()} + +} + +// A list of application privilege entries +func (s *_roleDescriptor) Applications(applications ...types.ApplicationPrivilegesVariant) *_roleDescriptor { + + for _, v := range applications { + + s.v.Applications = append(s.v.Applications, *v.ApplicationPrivilegesCaster()) + + } + return s +} + +// A list of cluster privileges. These privileges define the cluster level +// actions that API keys are able to execute. +func (s *_roleDescriptor) Cluster(clusters ...clusterprivilege.ClusterPrivilege) *_roleDescriptor { + + for _, v := range clusters { + + s.v.Cluster = append(s.v.Cluster, v) + + } + return s +} + +// Optional description of the role descriptor +func (s *_roleDescriptor) Description(description string) *_roleDescriptor { + + s.v.Description = &description + + return s +} + +// An object defining global privileges. A global privilege is a form of cluster +// privilege that is request-aware. Support for global privileges is currently +// limited to the management of application privileges. +func (s *_roleDescriptor) Global(globals ...types.GlobalPrivilegeVariant) *_roleDescriptor { + + s.v.Global = make([]types.GlobalPrivilege, len(globals)) + for i, v := range globals { + s.v.Global[i] = *v.GlobalPrivilegeCaster() + } + + return s +} + +// A list of indices permissions entries. +func (s *_roleDescriptor) Indices(indices ...types.IndicesPrivilegesVariant) *_roleDescriptor { + + for _, v := range indices { + + s.v.Indices = append(s.v.Indices, *v.IndicesPrivilegesCaster()) + + } + return s +} + +// Optional meta-data. Within the metadata object, keys that begin with `_` are +// reserved for system usage. +func (s *_roleDescriptor) Metadata(metadata types.MetadataVariant) *_roleDescriptor { + + s.v.Metadata = *metadata.MetadataCaster() + + return s +} + +// A list of cluster permissions for remote clusters. +// NOTE: This is limited a subset of the cluster permissions. +func (s *_roleDescriptor) RemoteCluster(remoteclusters ...types.RemoteClusterPrivilegesVariant) *_roleDescriptor { + + for _, v := range remoteclusters { + + s.v.RemoteCluster = append(s.v.RemoteCluster, *v.RemoteClusterPrivilegesCaster()) + + } + return s +} + +// A list of indices permissions for remote clusters. +func (s *_roleDescriptor) RemoteIndices(remoteindices ...types.RemoteIndicesPrivilegesVariant) *_roleDescriptor { + + for _, v := range remoteindices { + + s.v.RemoteIndices = append(s.v.RemoteIndices, *v.RemoteIndicesPrivilegesCaster()) + + } + return s +} + +// Restriction for when the role descriptor is allowed to be effective. +func (s *_roleDescriptor) Restriction(restriction types.RestrictionVariant) *_roleDescriptor { + + s.v.Restriction = restriction.RestrictionCaster() + + return s +} + +// A list of users that the API keys can impersonate. +// NOTE: In Elastic Cloud Serverless, the run-as feature is disabled. +// For API compatibility, you can still specify an empty `run_as` field, but a +// non-empty list will be rejected. +func (s *_roleDescriptor) RunAs(runas ...string) *_roleDescriptor { + + for _, v := range runas { + + s.v.RunAs = append(s.v.RunAs, v) + + } + return s +} + +func (s *_roleDescriptor) TransientMetadata(transientmetadata map[string]json.RawMessage) *_roleDescriptor { + + s.v.TransientMetadata = transientmetadata + return s +} + +func (s *_roleDescriptor) AddTransientMetadatum(key string, value json.RawMessage) *_roleDescriptor { + + var tmp map[string]json.RawMessage + if s.v.TransientMetadata == nil { + s.v.TransientMetadata = make(map[string]json.RawMessage) + } else { + tmp = s.v.TransientMetadata + } + + tmp[key] = value + + s.v.TransientMetadata = tmp + return s +} + +func (s *_roleDescriptor) RoleDescriptorCaster() *types.RoleDescriptor { + return s.v +} diff --git a/typedapi/esdsl/rolemappingrule.go b/typedapi/esdsl/rolemappingrule.go new file mode 100644 index 0000000000..117a7bf5ad --- /dev/null +++ b/typedapi/esdsl/rolemappingrule.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _roleMappingRule struct { + v *types.RoleMappingRule +} + +func NewRoleMappingRule() *_roleMappingRule { + return &_roleMappingRule{v: types.NewRoleMappingRule()} +} + +// AdditionalRoleMappingRuleProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_roleMappingRule) AdditionalRoleMappingRuleProperty(key string, value json.RawMessage) *_roleMappingRule { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalRoleMappingRuleProperty = tmp + return s +} + +func (s *_roleMappingRule) All(alls ...types.RoleMappingRuleVariant) *_roleMappingRule { + + for _, v := range alls { + + s.v.All = append(s.v.All, *v.RoleMappingRuleCaster()) + + } + return s +} + +func (s *_roleMappingRule) Any(anies ...types.RoleMappingRuleVariant) *_roleMappingRule { + + for _, v := range anies { + + s.v.Any = append(s.v.Any, *v.RoleMappingRuleCaster()) + + } + return s +} + +func (s *_roleMappingRule) Except(except types.RoleMappingRuleVariant) *_roleMappingRule { + + s.v.Except = except.RoleMappingRuleCaster() + + return s +} + +// + +func (s *_roleMappingRule) RoleMappingRuleCaster() *types.RoleMappingRule { + return s.v +} diff --git a/typedapi/esdsl/rolequerycontainer.go b/typedapi/esdsl/rolequerycontainer.go new file mode 100644 index 0000000000..ff14c3ea0a --- /dev/null +++ b/typedapi/esdsl/rolequerycontainer.go @@ -0,0 +1,171 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _roleQueryContainer struct { + v *types.RoleQueryContainer +} + +func NewRoleQueryContainer() *_roleQueryContainer { + return &_roleQueryContainer{v: types.NewRoleQueryContainer()} +} + +// AdditionalRoleQueryContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_roleQueryContainer) AdditionalRoleQueryContainerProperty(key string, value json.RawMessage) *_roleQueryContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalRoleQueryContainerProperty = tmp + return s +} + +// matches roles matching boolean combinations of other queries. +func (s *_roleQueryContainer) Bool(bool types.BoolQueryVariant) *_roleQueryContainer { + + s.v.Bool = bool.BoolQueryCaster() + + return s +} + +// Returns roles that contain an indexed value for a field. +func (s *_roleQueryContainer) Exists(exists types.ExistsQueryVariant) *_roleQueryContainer { + + s.v.Exists = exists.ExistsQueryCaster() + + return s +} + +// Returns roles based on their IDs. +// This query uses role document IDs stored in the `_id` field. +func (s *_roleQueryContainer) Ids(ids types.IdsQueryVariant) *_roleQueryContainer { + + s.v.Ids = ids.IdsQueryCaster() + + return s +} + +// Returns roles that match a provided text, number, date or boolean value. +// The provided text is analyzed before matching. +// Match is a single key dictionnary. +// It will replace the current value on each call. +func (s *_roleQueryContainer) Match(key string, value types.MatchQueryVariant) *_roleQueryContainer { + + tmp := make(map[string]types.MatchQuery) + + tmp[key] = *value.MatchQueryCaster() + + s.v.Match = tmp + return s +} + +// Matches all roles, giving them all a `_score` of 1.0. +func (s *_roleQueryContainer) MatchAll(matchall types.MatchAllQueryVariant) *_roleQueryContainer { + + s.v.MatchAll = matchall.MatchAllQueryCaster() + + return s +} + +// Returns roles that contain a specific prefix in a provided field. +// Prefix is a single key dictionnary. +// It will replace the current value on each call. +func (s *_roleQueryContainer) Prefix(key string, value types.PrefixQueryVariant) *_roleQueryContainer { + + tmp := make(map[string]types.PrefixQuery) + + tmp[key] = *value.PrefixQueryCaster() + + s.v.Prefix = tmp + return s +} + +// Returns roles that contain terms within a provided range. +// Range is a single key dictionnary. +// It will replace the current value on each call. +func (s *_roleQueryContainer) Range(key string, value types.RangeQueryVariant) *_roleQueryContainer { + + tmp := make(map[string]types.RangeQuery) + + tmp[key] = *value.RangeQueryCaster() + + s.v.Range = tmp + return s +} + +// Returns roles based on a provided query string, using a parser with a limited +// but fault-tolerant syntax. +func (s *_roleQueryContainer) SimpleQueryString(simplequerystring types.SimpleQueryStringQueryVariant) *_roleQueryContainer { + + s.v.SimpleQueryString = simplequerystring.SimpleQueryStringQueryCaster() + + return s +} + +// Returns roles that contain an exact term in a provided field. +// To return a document, the query term must exactly match the queried field's +// value, including whitespace and capitalization. +// Term is a single key dictionnary. +// It will replace the current value on each call. +func (s *_roleQueryContainer) Term(key string, value types.TermQueryVariant) *_roleQueryContainer { + + tmp := make(map[string]types.TermQuery) + + tmp[key] = *value.TermQueryCaster() + + s.v.Term = tmp + return s +} + +// Returns roles that contain one or more exact terms in a provided field. +// To return a document, one or more terms must exactly match a field value, +// including whitespace and capitalization. +func (s *_roleQueryContainer) Terms(terms types.TermsQueryVariant) *_roleQueryContainer { + + s.v.Terms = terms.TermsQueryCaster() + + return s +} + +// Returns roles that contain terms matching a wildcard pattern. +// Wildcard is a single key dictionnary. +// It will replace the current value on each call. +func (s *_roleQueryContainer) Wildcard(key string, value types.WildcardQueryVariant) *_roleQueryContainer { + + tmp := make(map[string]types.WildcardQuery) + + tmp[key] = *value.WildcardQueryCaster() + + s.v.Wildcard = tmp + return s +} + +func (s *_roleQueryContainer) RoleQueryContainerCaster() *types.RoleQueryContainer { + return s.v +} diff --git a/typedapi/esdsl/roletemplate.go b/typedapi/esdsl/roletemplate.go new file mode 100644 index 0000000000..0c30ed18a3 --- /dev/null +++ b/typedapi/esdsl/roletemplate.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/templateformat" +) + +type _roleTemplate struct { + v *types.RoleTemplate +} + +func NewRoleTemplate(template types.ScriptVariant) *_roleTemplate { + + tmp := &_roleTemplate{v: types.NewRoleTemplate()} + + tmp.Template(template) + + return tmp + +} + +func (s *_roleTemplate) Format(format templateformat.TemplateFormat) *_roleTemplate { + + s.v.Format = &format + return s +} + +func (s *_roleTemplate) Template(template types.ScriptVariant) *_roleTemplate { + + s.v.Template = *template.ScriptCaster() + + return s +} + +func (s *_roleTemplate) RoleTemplateCaster() *types.RoleTemplate { + return s.v +} diff --git a/typedapi/esdsl/roletemplateinlinequery.go b/typedapi/esdsl/roletemplateinlinequery.go new file mode 100644 index 0000000000..6cc3c3dd3e --- /dev/null +++ b/typedapi/esdsl/roletemplateinlinequery.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _roleTemplateInlineQuery struct { + v types.RoleTemplateInlineQuery +} + +func NewRoleTemplateInlineQuery() *_roleTemplateInlineQuery { + return &_roleTemplateInlineQuery{v: nil} +} + +func (u *_roleTemplateInlineQuery) String(string string) *_roleTemplateInlineQuery { + + u.v = &string + + return u +} + +func (u *_roleTemplateInlineQuery) Query(query types.QueryVariant) *_roleTemplateInlineQuery { + + u.v = &query + + return u +} + +// Interface implementation for Query in RoleTemplateInlineQuery union +func (u *_query) RoleTemplateInlineQueryCaster() *types.RoleTemplateInlineQuery { + t := types.RoleTemplateInlineQuery(u.v) + return &t +} + +func (u *_roleTemplateInlineQuery) RoleTemplateInlineQueryCaster() *types.RoleTemplateInlineQuery { + return &u.v +} diff --git a/typedapi/esdsl/roletemplatequery.go b/typedapi/esdsl/roletemplatequery.go new file mode 100644 index 0000000000..994f18e12b --- /dev/null +++ b/typedapi/esdsl/roletemplatequery.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _roleTemplateQuery struct { + v *types.RoleTemplateQuery +} + +func NewRoleTemplateQuery() *_roleTemplateQuery { + + return &_roleTemplateQuery{v: types.NewRoleTemplateQuery()} + +} + +// When you create a role, you can specify a query that defines the document +// level security permissions. You can optionally +// use Mustache templates in the role query to insert the username of the +// current authenticated user into the role. +// Like other places in Elasticsearch that support templating or scripting, you +// can specify inline, stored, or file-based +// templates and define custom parameters. You access the details for the +// current authenticated user through the _user parameter. +func (s *_roleTemplateQuery) Template(template types.RoleTemplateScriptVariant) *_roleTemplateQuery { + + s.v.Template = template.RoleTemplateScriptCaster() + + return s +} + +func (s *_roleTemplateQuery) RoleTemplateQueryCaster() *types.RoleTemplateQuery { + return s.v +} diff --git a/typedapi/esdsl/roletemplatescript.go b/typedapi/esdsl/roletemplatescript.go new file mode 100644 index 0000000000..44311715c9 --- /dev/null +++ b/typedapi/esdsl/roletemplatescript.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptlanguage" +) + +type _roleTemplateScript struct { + v *types.RoleTemplateScript +} + +func NewRoleTemplateScript() *_roleTemplateScript { + + return &_roleTemplateScript{v: types.NewRoleTemplateScript()} + +} + +// The `id` for a stored script. +func (s *_roleTemplateScript) Id(id string) *_roleTemplateScript { + + s.v.Id = &id + + return s +} + +// Specifies the language the script is written in. +func (s *_roleTemplateScript) Lang(lang scriptlanguage.ScriptLanguage) *_roleTemplateScript { + + s.v.Lang = &lang + return s +} + +func (s *_roleTemplateScript) Options(options map[string]string) *_roleTemplateScript { + + s.v.Options = options + return s +} + +func (s *_roleTemplateScript) AddOption(key string, value string) *_roleTemplateScript { + + var tmp map[string]string + if s.v.Options == nil { + s.v.Options = make(map[string]string) + } else { + tmp = s.v.Options + } + + tmp[key] = value + + s.v.Options = tmp + return s +} + +// Specifies any named parameters that are passed into the script as variables. +// Use parameters instead of hard-coded values to decrease compile time. +func (s *_roleTemplateScript) Params(params map[string]json.RawMessage) *_roleTemplateScript { + + s.v.Params = params + return s +} + +func (s *_roleTemplateScript) AddParam(key string, value json.RawMessage) *_roleTemplateScript { + + var tmp map[string]json.RawMessage + if s.v.Params == nil { + s.v.Params = make(map[string]json.RawMessage) + } else { + tmp = s.v.Params + } + + tmp[key] = value + + s.v.Params = tmp + return s +} + +func (s *_roleTemplateScript) Source(roletemplateinlinequery types.RoleTemplateInlineQueryVariant) *_roleTemplateScript { + + s.v.Source = *roletemplateinlinequery.RoleTemplateInlineQueryCaster() + + return s +} + +func (s *_roleTemplateScript) RoleTemplateScriptCaster() *types.RoleTemplateScript { + return s.v +} diff --git a/typedapi/esdsl/rolloveraction.go b/typedapi/esdsl/rolloveraction.go new file mode 100644 index 0000000000..a9009dbcbf --- /dev/null +++ b/typedapi/esdsl/rolloveraction.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rolloverAction struct { + v *types.RolloverAction +} + +func NewRolloverAction() *_rolloverAction { + + return &_rolloverAction{v: types.NewRolloverAction()} + +} + +func (s *_rolloverAction) MaxAge(duration types.DurationVariant) *_rolloverAction { + + s.v.MaxAge = *duration.DurationCaster() + + return s +} + +func (s *_rolloverAction) MaxDocs(maxdocs int64) *_rolloverAction { + + s.v.MaxDocs = &maxdocs + + return s +} + +func (s *_rolloverAction) MaxPrimaryShardDocs(maxprimarysharddocs int64) *_rolloverAction { + + s.v.MaxPrimaryShardDocs = &maxprimarysharddocs + + return s +} + +func (s *_rolloverAction) MaxPrimaryShardSize(bytesize types.ByteSizeVariant) *_rolloverAction { + + s.v.MaxPrimaryShardSize = *bytesize.ByteSizeCaster() + + return s +} + +func (s *_rolloverAction) MaxSize(bytesize types.ByteSizeVariant) *_rolloverAction { + + s.v.MaxSize = *bytesize.ByteSizeCaster() + + return s +} + +func (s *_rolloverAction) MinAge(duration types.DurationVariant) *_rolloverAction { + + s.v.MinAge = *duration.DurationCaster() + + return s +} + +func (s *_rolloverAction) MinDocs(mindocs int64) *_rolloverAction { + + s.v.MinDocs = &mindocs + + return s +} + +func (s *_rolloverAction) MinPrimaryShardDocs(minprimarysharddocs int64) *_rolloverAction { + + s.v.MinPrimaryShardDocs = &minprimarysharddocs + + return s +} + +func (s *_rolloverAction) MinPrimaryShardSize(bytesize types.ByteSizeVariant) *_rolloverAction { + + s.v.MinPrimaryShardSize = *bytesize.ByteSizeCaster() + + return s +} + +func (s *_rolloverAction) MinSize(bytesize types.ByteSizeVariant) *_rolloverAction { + + s.v.MinSize = *bytesize.ByteSizeCaster() + + return s +} + +func (s *_rolloverAction) RolloverActionCaster() *types.RolloverAction { + return s.v +} diff --git a/typedapi/esdsl/rolloverconditions.go b/typedapi/esdsl/rolloverconditions.go new file mode 100644 index 0000000000..d6ecc2ae7a --- /dev/null +++ b/typedapi/esdsl/rolloverconditions.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rolloverConditions struct { + v *types.RolloverConditions +} + +func NewRolloverConditions() *_rolloverConditions { + + return &_rolloverConditions{v: types.NewRolloverConditions()} + +} + +func (s *_rolloverConditions) MaxAge(duration types.DurationVariant) *_rolloverConditions { + + s.v.MaxAge = *duration.DurationCaster() + + return s +} + +func (s *_rolloverConditions) MaxAgeMillis(durationvalueunitmillis int64) *_rolloverConditions { + + s.v.MaxAgeMillis = &durationvalueunitmillis + + return s +} + +func (s *_rolloverConditions) MaxDocs(maxdocs int64) *_rolloverConditions { + + s.v.MaxDocs = &maxdocs + + return s +} + +func (s *_rolloverConditions) MaxPrimaryShardDocs(maxprimarysharddocs int64) *_rolloverConditions { + + s.v.MaxPrimaryShardDocs = &maxprimarysharddocs + + return s +} + +func (s *_rolloverConditions) MaxPrimaryShardSize(bytesize types.ByteSizeVariant) *_rolloverConditions { + + s.v.MaxPrimaryShardSize = *bytesize.ByteSizeCaster() + + return s +} + +func (s *_rolloverConditions) MaxPrimaryShardSizeBytes(maxprimaryshardsizebytes int64) *_rolloverConditions { + + s.v.MaxPrimaryShardSizeBytes = &maxprimaryshardsizebytes + + return s +} + +func (s *_rolloverConditions) MaxSize(bytesize types.ByteSizeVariant) *_rolloverConditions { + + s.v.MaxSize = *bytesize.ByteSizeCaster() + + return s +} + +func (s *_rolloverConditions) MaxSizeBytes(maxsizebytes int64) *_rolloverConditions { + + s.v.MaxSizeBytes = &maxsizebytes + + return s +} + +func (s *_rolloverConditions) MinAge(duration types.DurationVariant) *_rolloverConditions { + + s.v.MinAge = *duration.DurationCaster() + + return s +} + +func (s *_rolloverConditions) MinDocs(mindocs int64) *_rolloverConditions { + + s.v.MinDocs = &mindocs + + return s +} + +func (s *_rolloverConditions) MinPrimaryShardDocs(minprimarysharddocs int64) *_rolloverConditions { + + s.v.MinPrimaryShardDocs = &minprimarysharddocs + + return s +} + +func (s *_rolloverConditions) MinPrimaryShardSize(bytesize types.ByteSizeVariant) *_rolloverConditions { + + s.v.MinPrimaryShardSize = *bytesize.ByteSizeCaster() + + return s +} + +func (s *_rolloverConditions) MinPrimaryShardSizeBytes(minprimaryshardsizebytes int64) *_rolloverConditions { + + s.v.MinPrimaryShardSizeBytes = &minprimaryshardsizebytes + + return s +} + +func (s *_rolloverConditions) MinSize(bytesize types.ByteSizeVariant) *_rolloverConditions { + + s.v.MinSize = *bytesize.ByteSizeCaster() + + return s +} + +func (s *_rolloverConditions) MinSizeBytes(minsizebytes int64) *_rolloverConditions { + + s.v.MinSizeBytes = &minsizebytes + + return s +} + +func (s *_rolloverConditions) RolloverConditionsCaster() *types.RolloverConditions { + return s.v +} diff --git a/typedapi/esdsl/romaniananalyzer.go b/typedapi/esdsl/romaniananalyzer.go new file mode 100644 index 0000000000..fa6d486285 --- /dev/null +++ b/typedapi/esdsl/romaniananalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _romanianAnalyzer struct { + v *types.RomanianAnalyzer +} + +func NewRomanianAnalyzer() *_romanianAnalyzer { + + return &_romanianAnalyzer{v: types.NewRomanianAnalyzer()} + +} + +func (s *_romanianAnalyzer) StemExclusion(stemexclusions ...string) *_romanianAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_romanianAnalyzer) Stopwords(stopwords ...string) *_romanianAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_romanianAnalyzer) StopwordsPath(stopwordspath string) *_romanianAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_romanianAnalyzer) RomanianAnalyzerCaster() *types.RomanianAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/routingfield.go b/typedapi/esdsl/routingfield.go new file mode 100644 index 0000000000..daf2b89183 --- /dev/null +++ b/typedapi/esdsl/routingfield.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _routingField struct { + v *types.RoutingField +} + +func NewRoutingField(required bool) *_routingField { + + tmp := &_routingField{v: types.NewRoutingField()} + + tmp.Required(required) + + return tmp + +} + +func (s *_routingField) Required(required bool) *_routingField { + + s.v.Required = required + + return s +} + +func (s *_routingField) RoutingFieldCaster() *types.RoutingField { + return s.v +} diff --git a/typedapi/esdsl/rrfrank.go b/typedapi/esdsl/rrfrank.go new file mode 100644 index 0000000000..eedf4274d5 --- /dev/null +++ b/typedapi/esdsl/rrfrank.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rrfRank struct { + v *types.RrfRank +} + +// The reciprocal rank fusion parameters +func NewRrfRank() *_rrfRank { + + return &_rrfRank{v: types.NewRrfRank()} + +} + +// How much influence documents in individual result sets per query have over +// the final ranked result set +func (s *_rrfRank) RankConstant(rankconstant int64) *_rrfRank { + + s.v.RankConstant = &rankconstant + + return s +} + +// Size of the individual result sets per query +func (s *_rrfRank) RankWindowSize(rankwindowsize int64) *_rrfRank { + + s.v.RankWindowSize = &rankwindowsize + + return s +} + +func (s *_rrfRank) RankContainerCaster() *types.RankContainer { + container := types.NewRankContainer() + + container.Rrf = s.v + + return container +} + +func (s *_rrfRank) RrfRankCaster() *types.RrfRank { + return s.v +} diff --git a/typedapi/esdsl/rrfretriever.go b/typedapi/esdsl/rrfretriever.go new file mode 100644 index 0000000000..0fb9d7dec4 --- /dev/null +++ b/typedapi/esdsl/rrfretriever.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _rRFRetriever struct { + v *types.RRFRetriever +} + +// A retriever that produces top documents from reciprocal rank fusion (RRF). +func NewRRFRetriever() *_rRFRetriever { + + return &_rRFRetriever{v: types.NewRRFRetriever()} + +} + +// Query to filter the documents that can match. +func (s *_rRFRetriever) Filter(filters ...types.QueryVariant) *_rRFRetriever { + + s.v.Filter = make([]types.Query, len(filters)) + for i, v := range filters { + s.v.Filter[i] = *v.QueryCaster() + } + + return s +} + +// Minimum _score for matching documents. Documents with a lower _score are not +// included in the top documents. +func (s *_rRFRetriever) MinScore(minscore float32) *_rRFRetriever { + + s.v.MinScore = &minscore + + return s +} + +// This value determines how much influence documents in individual result sets +// per query have over the final ranked result set. +func (s *_rRFRetriever) RankConstant(rankconstant int) *_rRFRetriever { + + s.v.RankConstant = &rankconstant + + return s +} + +// This value determines the size of the individual result sets per query. +func (s *_rRFRetriever) RankWindowSize(rankwindowsize int) *_rRFRetriever { + + s.v.RankWindowSize = &rankwindowsize + + return s +} + +// A list of child retrievers to specify which sets of returned top documents +// will have the RRF formula applied to them. +func (s *_rRFRetriever) Retrievers(retrievers ...types.RetrieverContainerVariant) *_rRFRetriever { + + for _, v := range retrievers { + + s.v.Retrievers = append(s.v.Retrievers, *v.RetrieverContainerCaster()) + + } + return s +} + +func (s *_rRFRetriever) RetrieverContainerCaster() *types.RetrieverContainer { + container := types.NewRetrieverContainer() + + container.Rrf = s.v + + return container +} + +func (s *_rRFRetriever) RRFRetrieverCaster() *types.RRFRetriever { + return s.v +} diff --git a/typedapi/esdsl/rulecondition.go b/typedapi/esdsl/rulecondition.go new file mode 100644 index 0000000000..fbf32c951a --- /dev/null +++ b/typedapi/esdsl/rulecondition.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/appliesto" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditionoperator" +) + +type _ruleCondition struct { + v *types.RuleCondition +} + +func NewRuleCondition(appliesto appliesto.AppliesTo, operator conditionoperator.ConditionOperator, value types.Float64) *_ruleCondition { + + tmp := &_ruleCondition{v: types.NewRuleCondition()} + + tmp.AppliesTo(appliesto) + + tmp.Operator(operator) + + tmp.Value(value) + + return tmp + +} + +// Specifies the result property to which the condition applies. If your +// detector uses `lat_long`, `metric`, `rare`, or `freq_rare` functions, you can +// only specify conditions that apply to time. +func (s *_ruleCondition) AppliesTo(appliesto appliesto.AppliesTo) *_ruleCondition { + + s.v.AppliesTo = appliesto + return s +} + +// Specifies the condition operator. The available options are greater than, +// greater than or equals, less than, and less than or equals. +func (s *_ruleCondition) Operator(operator conditionoperator.ConditionOperator) *_ruleCondition { + + s.v.Operator = operator + return s +} + +// The value that is compared against the `applies_to` field using the operator. +func (s *_ruleCondition) Value(value types.Float64) *_ruleCondition { + + s.v.Value = value + + return s +} + +func (s *_ruleCondition) RuleConditionCaster() *types.RuleCondition { + return s.v +} diff --git a/typedapi/esdsl/rulequery.go b/typedapi/esdsl/rulequery.go new file mode 100644 index 0000000000..b4609914ad --- /dev/null +++ b/typedapi/esdsl/rulequery.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _ruleQuery struct { + v *types.RuleQuery +} + +func NewRuleQuery(matchcriteria json.RawMessage, organic types.QueryVariant) *_ruleQuery { + + tmp := &_ruleQuery{v: types.NewRuleQuery()} + + tmp.MatchCriteria(matchcriteria) + + tmp.Organic(organic) + + return tmp + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_ruleQuery) Boost(boost float32) *_ruleQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_ruleQuery) MatchCriteria(matchcriteria json.RawMessage) *_ruleQuery { + + s.v.MatchCriteria = matchcriteria + + return s +} + +func (s *_ruleQuery) Organic(organic types.QueryVariant) *_ruleQuery { + + s.v.Organic = *organic.QueryCaster() + + return s +} + +func (s *_ruleQuery) QueryName_(queryname_ string) *_ruleQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_ruleQuery) RulesetIds(rulesetids ...string) *_ruleQuery { + + for _, v := range rulesetids { + + s.v.RulesetIds = append(s.v.RulesetIds, v) + + } + return s +} + +func (s *_ruleQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.Rule = s.v + + return container +} + +func (s *_ruleQuery) RuleQueryCaster() *types.RuleQuery { + return s.v +} diff --git a/typedapi/esdsl/ruleretriever.go b/typedapi/esdsl/ruleretriever.go new file mode 100644 index 0000000000..11ed37dc15 --- /dev/null +++ b/typedapi/esdsl/ruleretriever.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _ruleRetriever struct { + v *types.RuleRetriever +} + +// A retriever that replaces the functionality of a rule query. +func NewRuleRetriever(matchcriteria json.RawMessage, retriever types.RetrieverContainerVariant) *_ruleRetriever { + + tmp := &_ruleRetriever{v: types.NewRuleRetriever()} + + tmp.MatchCriteria(matchcriteria) + + tmp.Retriever(retriever) + + return tmp + +} + +// Query to filter the documents that can match. +func (s *_ruleRetriever) Filter(filters ...types.QueryVariant) *_ruleRetriever { + + s.v.Filter = make([]types.Query, len(filters)) + for i, v := range filters { + s.v.Filter[i] = *v.QueryCaster() + } + + return s +} + +// The match criteria that will determine if a rule in the provided rulesets +// should be applied. +func (s *_ruleRetriever) MatchCriteria(matchcriteria json.RawMessage) *_ruleRetriever { + + s.v.MatchCriteria = matchcriteria + + return s +} + +// Minimum _score for matching documents. Documents with a lower _score are not +// included in the top documents. +func (s *_ruleRetriever) MinScore(minscore float32) *_ruleRetriever { + + s.v.MinScore = &minscore + + return s +} + +// This value determines the size of the individual result set. +func (s *_ruleRetriever) RankWindowSize(rankwindowsize int) *_ruleRetriever { + + s.v.RankWindowSize = &rankwindowsize + + return s +} + +// The retriever whose results rules should be applied to. +func (s *_ruleRetriever) Retriever(retriever types.RetrieverContainerVariant) *_ruleRetriever { + + s.v.Retriever = *retriever.RetrieverContainerCaster() + + return s +} + +// The ruleset IDs containing the rules this retriever is evaluating against. +func (s *_ruleRetriever) RulesetIds(rulesetids ...string) *_ruleRetriever { + + for _, v := range rulesetids { + + s.v.RulesetIds = append(s.v.RulesetIds, v) + + } + return s +} + +func (s *_ruleRetriever) RetrieverContainerCaster() *types.RetrieverContainer { + container := types.NewRetrieverContainer() + + container.Rule = s.v + + return container +} + +func (s *_ruleRetriever) RuleRetrieverCaster() *types.RuleRetriever { + return s.v +} diff --git a/typedapi/esdsl/runtimefield.go b/typedapi/esdsl/runtimefield.go new file mode 100644 index 0000000000..2886175eda --- /dev/null +++ b/typedapi/esdsl/runtimefield.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/runtimefieldtype" +) + +type _runtimeField struct { + v *types.RuntimeField +} + +func NewRuntimeField(type_ runtimefieldtype.RuntimeFieldType) *_runtimeField { + + tmp := &_runtimeField{v: types.NewRuntimeField()} + + tmp.Type(type_) + + return tmp + +} + +// For type `lookup` +func (s *_runtimeField) FetchFields(fetchfields ...types.RuntimeFieldFetchFieldsVariant) *_runtimeField { + + for _, v := range fetchfields { + + s.v.FetchFields = append(s.v.FetchFields, *v.RuntimeFieldFetchFieldsCaster()) + + } + return s +} + +// For type `composite` +func (s *_runtimeField) Fields(fields map[string]types.CompositeSubField) *_runtimeField { + + s.v.Fields = fields + return s +} + +func (s *_runtimeField) AddField(key string, value types.CompositeSubFieldVariant) *_runtimeField { + + var tmp map[string]types.CompositeSubField + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.CompositeSubField) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.CompositeSubFieldCaster() + + s.v.Fields = tmp + return s +} + +// A custom format for `date` type runtime fields. +func (s *_runtimeField) Format(format string) *_runtimeField { + + s.v.Format = &format + + return s +} + +// For type `lookup` +func (s *_runtimeField) InputField(field string) *_runtimeField { + + s.v.InputField = &field + + return s +} + +// Painless script executed at query time. +func (s *_runtimeField) Script(script types.ScriptVariant) *_runtimeField { + + s.v.Script = script.ScriptCaster() + + return s +} + +// For type `lookup` +func (s *_runtimeField) TargetField(field string) *_runtimeField { + + s.v.TargetField = &field + + return s +} + +// For type `lookup` +func (s *_runtimeField) TargetIndex(indexname string) *_runtimeField { + + s.v.TargetIndex = &indexname + + return s +} + +// Field type, which can be: `boolean`, `composite`, `date`, `double`, +// `geo_point`, `ip`,`keyword`, `long`, or `lookup`. +func (s *_runtimeField) Type(type_ runtimefieldtype.RuntimeFieldType) *_runtimeField { + + s.v.Type = type_ + return s +} + +func (s *_runtimeField) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Runtime = s.v + + return container +} + +func (s *_runtimeField) RuntimeFieldCaster() *types.RuntimeField { + return s.v +} diff --git a/typedapi/esdsl/runtimefieldfetchfields.go b/typedapi/esdsl/runtimefieldfetchfields.go new file mode 100644 index 0000000000..4a7cf812e6 --- /dev/null +++ b/typedapi/esdsl/runtimefieldfetchfields.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _runtimeFieldFetchFields struct { + v *types.RuntimeFieldFetchFields +} + +func NewRuntimeFieldFetchFields() *_runtimeFieldFetchFields { + + return &_runtimeFieldFetchFields{v: types.NewRuntimeFieldFetchFields()} + +} + +func (s *_runtimeFieldFetchFields) Field(field string) *_runtimeFieldFetchFields { + + s.v.Field = field + + return s +} + +func (s *_runtimeFieldFetchFields) Format(format string) *_runtimeFieldFetchFields { + + s.v.Format = &format + + return s +} + +func (s *_runtimeFieldFetchFields) RuntimeFieldFetchFieldsCaster() *types.RuntimeFieldFetchFields { + return s.v +} diff --git a/typedapi/esdsl/runtimefields.go b/typedapi/esdsl/runtimefields.go new file mode 100644 index 0000000000..8650cd2213 --- /dev/null +++ b/typedapi/esdsl/runtimefields.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _runtimeFields struct { + v types.RuntimeFields +} + +func NewRuntimeFields(runtimefields types.RuntimeFieldVariant) *_runtimeFields { + return &_runtimeFields{v: make(map[string]types.RuntimeField, 0)} +} + +func (u *_runtimeFields) RuntimeFieldsCaster() *types.RuntimeFields { + return &u.v +} diff --git a/typedapi/esdsl/russiananalyzer.go b/typedapi/esdsl/russiananalyzer.go new file mode 100644 index 0000000000..fb35361dbd --- /dev/null +++ b/typedapi/esdsl/russiananalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _russianAnalyzer struct { + v *types.RussianAnalyzer +} + +func NewRussianAnalyzer() *_russianAnalyzer { + + return &_russianAnalyzer{v: types.NewRussianAnalyzer()} + +} + +func (s *_russianAnalyzer) StemExclusion(stemexclusions ...string) *_russianAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_russianAnalyzer) Stopwords(stopwords ...string) *_russianAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_russianAnalyzer) StopwordsPath(stopwordspath string) *_russianAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_russianAnalyzer) RussianAnalyzerCaster() *types.RussianAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/s3repository.go b/typedapi/esdsl/s3repository.go new file mode 100644 index 0000000000..6c90800710 --- /dev/null +++ b/typedapi/esdsl/s3repository.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _s3Repository struct { + v *types.S3Repository +} + +func NewS3Repository(settings types.S3RepositorySettingsVariant) *_s3Repository { + + tmp := &_s3Repository{v: types.NewS3Repository()} + + tmp.Settings(settings) + + return tmp + +} + +// The repository settings. +// +// NOTE: In addition to the specified settings, you can also use all non-secure +// client settings in the repository settings. +// In this case, the client settings found in the repository settings will be +// merged with those of the named client used by the repository. +// Conflicts between client and repository settings are resolved by the +// repository settings taking precedence over client settings. +func (s *_s3Repository) Settings(settings types.S3RepositorySettingsVariant) *_s3Repository { + + s.v.Settings = *settings.S3RepositorySettingsCaster() + + return s +} + +func (s *_s3Repository) Uuid(uuid string) *_s3Repository { + + s.v.Uuid = &uuid + + return s +} + +func (s *_s3Repository) S3RepositoryCaster() *types.S3Repository { + return s.v +} diff --git a/typedapi/esdsl/s3repositorysettings.go b/typedapi/esdsl/s3repositorysettings.go new file mode 100644 index 0000000000..1639b2c0f7 --- /dev/null +++ b/typedapi/esdsl/s3repositorysettings.go @@ -0,0 +1,273 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _s3RepositorySettings struct { + v *types.S3RepositorySettings +} + +func NewS3RepositorySettings(bucket string) *_s3RepositorySettings { + + tmp := &_s3RepositorySettings{v: types.NewS3RepositorySettings()} + + tmp.Bucket(bucket) + + return tmp + +} + +// The path to the repository data within its bucket. +// It defaults to an empty string, meaning that the repository is at the root of +// the bucket. +// The value of this setting should not start or end with a forward slash (`/`). +// +// NOTE: Don't set base_path when configuring a snapshot repository for Elastic +// Cloud Enterprise. +// Elastic Cloud Enterprise automatically generates the `base_path` for each +// deployment so that multiple deployments may share the same bucket. +func (s *_s3RepositorySettings) BasePath(basepath string) *_s3RepositorySettings { + + s.v.BasePath = &basepath + + return s +} + +// The name of the S3 bucket to use for snapshots. +// The bucket name must adhere to Amazon's S3 bucket naming rules. +func (s *_s3RepositorySettings) Bucket(bucket string) *_s3RepositorySettings { + + s.v.Bucket = bucket + + return s +} + +// The minimum threshold below which the chunk is uploaded using a single +// request. +// Beyond this threshold, the S3 repository will use the AWS Multipart Upload +// API to split the chunk into several parts, each of `buffer_size` length, and +// to upload each part in its own request. +// Note that setting a buffer size lower than 5mb is not allowed since it will +// prevent the use of the Multipart API and may result in upload errors. +// It is also not possible to set a buffer size greater than 5gb as it is the +// maximum upload size allowed by S3. +// Defaults to `100mb` or 5% of JVM heap, whichever is smaller. +func (s *_s3RepositorySettings) BufferSize(bytesize types.ByteSizeVariant) *_s3RepositorySettings { + + s.v.BufferSize = *bytesize.ByteSizeCaster() + + return s +} + +// The S3 repository supports all S3 canned ACLs: `private`, `public-read`, +// `public-read-write`, `authenticated-read`, `log-delivery-write`, +// `bucket-owner-read`, `bucket-owner-full-control`. +// You could specify a canned ACL using the `canned_acl` setting. +// When the S3 repository creates buckets and objects, it adds the canned ACL +// into the buckets and objects. +func (s *_s3RepositorySettings) CannedAcl(cannedacl string) *_s3RepositorySettings { + + s.v.CannedAcl = &cannedacl + + return s +} + +// Big files can be broken down into multiple smaller blobs in the blob store +// during snapshotting. +// It is not recommended to change this value from its default unless there is +// an explicit reason for limiting the size of blobs in the repository. +// Setting a value lower than the default can result in an increased number of +// API calls to the blob store during snapshot create and restore operations +// compared to using the default value and thus make both operations slower and +// more costly. +// Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. +// The default varies by repository type. +func (s *_s3RepositorySettings) ChunkSize(bytesize types.ByteSizeVariant) *_s3RepositorySettings { + + s.v.ChunkSize = *bytesize.ByteSizeCaster() + + return s +} + +// The name of the S3 client to use to connect to S3. +func (s *_s3RepositorySettings) Client(client string) *_s3RepositorySettings { + + s.v.Client = &client + + return s +} + +// When set to `true`, metadata files are stored in compressed format. +// This setting doesn't affect index files that are already compressed by +// default. +func (s *_s3RepositorySettings) Compress(compress bool) *_s3RepositorySettings { + + s.v.Compress = &compress + + return s +} + +// The maxmimum batch size, between 1 and 1000, used for `DeleteObjects` +// requests. +// Defaults to 1000 which is the maximum number supported by the AWS +// DeleteObjects API. +func (s *_s3RepositorySettings) DeleteObjectsMaxSize(deleteobjectsmaxsize int) *_s3RepositorySettings { + + s.v.DeleteObjectsMaxSize = &deleteobjectsmaxsize + + return s +} + +// The time to wait before trying again if an attempt to read a linearizable +// register fails. +func (s *_s3RepositorySettings) GetRegisterRetryDelay(duration types.DurationVariant) *_s3RepositorySettings { + + s.v.GetRegisterRetryDelay = *duration.DurationCaster() + + return s +} + +// The maximum number of parts that Elasticsearch will write during a multipart +// upload of a single object. +// Files which are larger than `buffer_size × max_multipart_parts` will be +// chunked into several smaller objects. +// Elasticsearch may also split a file across multiple objects to satisfy other +// constraints such as the `chunk_size` limit. +// Defaults to `10000` which is the maximum number of parts in a multipart +// upload in AWS S3. +func (s *_s3RepositorySettings) MaxMultipartParts(maxmultipartparts int) *_s3RepositorySettings { + + s.v.MaxMultipartParts = &maxmultipartparts + + return s +} + +// The maximum number of possibly-dangling multipart uploads to clean up in each +// batch of snapshot deletions. +// Defaults to 1000 which is the maximum number supported by the AWS +// ListMultipartUploads API. +// If set to `0`, Elasticsearch will not attempt to clean up dangling multipart +// uploads. +func (s *_s3RepositorySettings) MaxMultipartUploadCleanupSize(maxmultipartuploadcleanupsize int) *_s3RepositorySettings { + + s.v.MaxMultipartUploadCleanupSize = &maxmultipartuploadcleanupsize + + return s +} + +// The maximum snapshot restore rate per node. +// It defaults to unlimited. +// Note that restores are also throttled through recovery settings. +func (s *_s3RepositorySettings) MaxRestoreBytesPerSec(bytesize types.ByteSizeVariant) *_s3RepositorySettings { + + s.v.MaxRestoreBytesPerSec = *bytesize.ByteSizeCaster() + + return s +} + +// The maximum snapshot creation rate per node. +// It defaults to 40mb per second. +// Note that if the recovery settings for managed services are set, then it +// defaults to unlimited, and the rate is additionally throttled through +// recovery settings. +func (s *_s3RepositorySettings) MaxSnapshotBytesPerSec(bytesize types.ByteSizeVariant) *_s3RepositorySettings { + + s.v.MaxSnapshotBytesPerSec = *bytesize.ByteSizeCaster() + + return s +} + +// If true, the repository is read-only. +// The cluster can retrieve and restore snapshots from the repository but not +// write to the repository or create snapshots in it. +// +// Only a cluster with write access can create snapshots in the repository. +// All other clusters connected to the repository should have the `readonly` +// parameter set to `true`. +// +// If `false`, the cluster can write to the repository and create snapshots in +// it. +// +// IMPORTANT: If you register the same snapshot repository with multiple +// clusters, only one cluster should have write access to the repository. +// Having multiple clusters write to the repository at the same time risks +// corrupting the contents of the repository. +func (s *_s3RepositorySettings) Readonly(readonly bool) *_s3RepositorySettings { + + s.v.Readonly = &readonly + + return s +} + +// When set to `true`, files are encrypted on server side using an AES256 +// algorithm. +func (s *_s3RepositorySettings) ServerSideEncryption(serversideencryption bool) *_s3RepositorySettings { + + s.v.ServerSideEncryption = &serversideencryption + + return s +} + +// The S3 storage class for objects written to the repository. +// Values may be `standard`, `reduced_redundancy`, `standard_ia`, `onezone_ia`, +// and `intelligent_tiering`. +func (s *_s3RepositorySettings) StorageClass(storageclass string) *_s3RepositorySettings { + + s.v.StorageClass = &storageclass + + return s +} + +// The delay before the first retry and the amount the delay is incremented by +// on each subsequent retry. +// The default is 50ms and the minimum is 0ms. +func (s *_s3RepositorySettings) ThrottledDeleteRetryDelayIncrement(duration types.DurationVariant) *_s3RepositorySettings { + + s.v.ThrottledDeleteRetryDelayIncrement = *duration.DurationCaster() + + return s +} + +// The upper bound on how long the delays between retries will grow to. +// The default is 5s and the minimum is 0ms. +func (s *_s3RepositorySettings) ThrottledDeleteRetryMaximumDelay(duration types.DurationVariant) *_s3RepositorySettings { + + s.v.ThrottledDeleteRetryMaximumDelay = *duration.DurationCaster() + + return s +} + +// The number times to retry a throttled snapshot deletion. +// The default is 10 and the minimum value is 0 which will disable retries +// altogether. +// Note that if retries are enabled in the Azure client, each of these retries +// comprises that many client-level retries. +func (s *_s3RepositorySettings) ThrottledDeleteRetryMaximumNumberOfRetries(throttleddeleteretrymaximumnumberofretries int) *_s3RepositorySettings { + + s.v.ThrottledDeleteRetryMaximumNumberOfRetries = &throttleddeleteretrymaximumnumberofretries + + return s +} + +func (s *_s3RepositorySettings) S3RepositorySettingsCaster() *types.S3RepositorySettings { + return s.v +} diff --git a/typedapi/esdsl/samplediversity.go b/typedapi/esdsl/samplediversity.go new file mode 100644 index 0000000000..c2eb3e5822 --- /dev/null +++ b/typedapi/esdsl/samplediversity.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _sampleDiversity struct { + v *types.SampleDiversity +} + +func NewSampleDiversity(maxdocspervalue int) *_sampleDiversity { + + tmp := &_sampleDiversity{v: types.NewSampleDiversity()} + + tmp.MaxDocsPerValue(maxdocspervalue) + + return tmp + +} + +func (s *_sampleDiversity) Field(field string) *_sampleDiversity { + + s.v.Field = field + + return s +} + +func (s *_sampleDiversity) MaxDocsPerValue(maxdocspervalue int) *_sampleDiversity { + + s.v.MaxDocsPerValue = maxdocspervalue + + return s +} + +func (s *_sampleDiversity) SampleDiversityCaster() *types.SampleDiversity { + return s.v +} diff --git a/typedapi/esdsl/sampleraggregation.go b/typedapi/esdsl/sampleraggregation.go new file mode 100644 index 0000000000..4eddfe1e32 --- /dev/null +++ b/typedapi/esdsl/sampleraggregation.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _samplerAggregation struct { + v *types.SamplerAggregation +} + +// A filtering aggregation used to limit any sub aggregations' processing to a +// sample of the top-scoring documents. +func NewSamplerAggregation() *_samplerAggregation { + + return &_samplerAggregation{v: types.NewSamplerAggregation()} + +} + +// Limits how many top-scoring documents are collected in the sample processed +// on each shard. +func (s *_samplerAggregation) ShardSize(shardsize int) *_samplerAggregation { + + s.v.ShardSize = &shardsize + + return s +} + +func (s *_samplerAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Sampler = s.v + + return container +} + +func (s *_samplerAggregation) SamplerAggregationCaster() *types.SamplerAggregation { + return s.v +} diff --git a/typedapi/esdsl/scalarvalue.go b/typedapi/esdsl/scalarvalue.go new file mode 100644 index 0000000000..6567537eb8 --- /dev/null +++ b/typedapi/esdsl/scalarvalue.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _scalarValue struct { + v types.ScalarValue +} + +func NewScalarValue() *_scalarValue { + return &_scalarValue{v: nil} +} + +func (u *_scalarValue) Int64(int64 int64) *_scalarValue { + + u.v = &int64 + + return u +} + +func (u *_scalarValue) Float64(float64 types.Float64) *_scalarValue { + + u.v = &float64 + + return u +} + +func (u *_scalarValue) String(string string) *_scalarValue { + + u.v = &string + + return u +} + +func (u *_scalarValue) Bool(bool bool) *_scalarValue { + + u.v = &bool + + return u +} + +func (u *_scalarValue) Nil() *_scalarValue { + u.v = types.NullValue{} + return u +} + +func (u *_scalarValue) ScalarValueCaster() *types.ScalarValue { + return &u.v +} diff --git a/typedapi/esdsl/scaledfloatnumberproperty.go b/typedapi/esdsl/scaledfloatnumberproperty.go new file mode 100644 index 0000000000..e1af0a5985 --- /dev/null +++ b/typedapi/esdsl/scaledfloatnumberproperty.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" +) + +type _scaledFloatNumberProperty struct { + v *types.ScaledFloatNumberProperty +} + +func NewScaledFloatNumberProperty() *_scaledFloatNumberProperty { + + return &_scaledFloatNumberProperty{v: types.NewScaledFloatNumberProperty()} + +} + +func (s *_scaledFloatNumberProperty) Boost(boost types.Float64) *_scaledFloatNumberProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_scaledFloatNumberProperty) Coerce(coerce bool) *_scaledFloatNumberProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_scaledFloatNumberProperty) CopyTo(fields ...string) *_scaledFloatNumberProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_scaledFloatNumberProperty) DocValues(docvalues bool) *_scaledFloatNumberProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_scaledFloatNumberProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_scaledFloatNumberProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_scaledFloatNumberProperty) Fields(fields map[string]types.Property) *_scaledFloatNumberProperty { + + s.v.Fields = fields + return s +} + +func (s *_scaledFloatNumberProperty) AddField(key string, value types.PropertyVariant) *_scaledFloatNumberProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_scaledFloatNumberProperty) IgnoreAbove(ignoreabove int) *_scaledFloatNumberProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_scaledFloatNumberProperty) IgnoreMalformed(ignoremalformed bool) *_scaledFloatNumberProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_scaledFloatNumberProperty) Index(index bool) *_scaledFloatNumberProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_scaledFloatNumberProperty) Meta(meta map[string]string) *_scaledFloatNumberProperty { + + s.v.Meta = meta + return s +} + +func (s *_scaledFloatNumberProperty) AddMeta(key string, value string) *_scaledFloatNumberProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_scaledFloatNumberProperty) NullValue(nullvalue types.Float64) *_scaledFloatNumberProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_scaledFloatNumberProperty) OnScriptError(onscripterror onscripterror.OnScriptError) *_scaledFloatNumberProperty { + + s.v.OnScriptError = &onscripterror + return s +} + +func (s *_scaledFloatNumberProperty) Properties(properties map[string]types.Property) *_scaledFloatNumberProperty { + + s.v.Properties = properties + return s +} + +func (s *_scaledFloatNumberProperty) AddProperty(key string, value types.PropertyVariant) *_scaledFloatNumberProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_scaledFloatNumberProperty) ScalingFactor(scalingfactor types.Float64) *_scaledFloatNumberProperty { + + s.v.ScalingFactor = &scalingfactor + + return s +} + +func (s *_scaledFloatNumberProperty) Script(script types.ScriptVariant) *_scaledFloatNumberProperty { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_scaledFloatNumberProperty) Store(store bool) *_scaledFloatNumberProperty { + + s.v.Store = &store + + return s +} + +func (s *_scaledFloatNumberProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_scaledFloatNumberProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_scaledFloatNumberProperty) TimeSeriesDimension(timeseriesdimension bool) *_scaledFloatNumberProperty { + + s.v.TimeSeriesDimension = ×eriesdimension + + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_scaledFloatNumberProperty) TimeSeriesMetric(timeseriesmetric timeseriesmetrictype.TimeSeriesMetricType) *_scaledFloatNumberProperty { + + s.v.TimeSeriesMetric = ×eriesmetric + return s +} + +func (s *_scaledFloatNumberProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_scaledFloatNumberProperty) ScaledFloatNumberPropertyCaster() *types.ScaledFloatNumberProperty { + return s.v +} diff --git a/typedapi/esdsl/schedulecontainer.go b/typedapi/esdsl/schedulecontainer.go new file mode 100644 index 0000000000..a3b3cb39be --- /dev/null +++ b/typedapi/esdsl/schedulecontainer.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _scheduleContainer struct { + v *types.ScheduleContainer +} + +func NewScheduleContainer() *_scheduleContainer { + return &_scheduleContainer{v: types.NewScheduleContainer()} +} + +// AdditionalScheduleContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_scheduleContainer) AdditionalScheduleContainerProperty(key string, value json.RawMessage) *_scheduleContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalScheduleContainerProperty = tmp + return s +} + +func (s *_scheduleContainer) Cron(cronexpression string) *_scheduleContainer { + + s.v.Cron = &cronexpression + + return s +} + +func (s *_scheduleContainer) Daily(daily types.DailyScheduleVariant) *_scheduleContainer { + + s.v.Daily = daily.DailyScheduleCaster() + + return s +} + +func (s *_scheduleContainer) Hourly(hourly types.HourlyScheduleVariant) *_scheduleContainer { + + s.v.Hourly = hourly.HourlyScheduleCaster() + + return s +} + +func (s *_scheduleContainer) Interval(duration types.DurationVariant) *_scheduleContainer { + + s.v.Interval = *duration.DurationCaster() + + return s +} + +func (s *_scheduleContainer) Monthly(monthlies ...types.TimeOfMonthVariant) *_scheduleContainer { + + s.v.Monthly = make([]types.TimeOfMonth, len(monthlies)) + for i, v := range monthlies { + s.v.Monthly[i] = *v.TimeOfMonthCaster() + } + + return s +} + +func (s *_scheduleContainer) Timezone(timezone string) *_scheduleContainer { + + s.v.Timezone = &timezone + + return s +} + +func (s *_scheduleContainer) Weekly(weeklies ...types.TimeOfWeekVariant) *_scheduleContainer { + + s.v.Weekly = make([]types.TimeOfWeek, len(weeklies)) + for i, v := range weeklies { + s.v.Weekly[i] = *v.TimeOfWeekCaster() + } + + return s +} + +func (s *_scheduleContainer) Yearly(yearlies ...types.TimeOfYearVariant) *_scheduleContainer { + + s.v.Yearly = make([]types.TimeOfYear, len(yearlies)) + for i, v := range yearlies { + s.v.Yearly[i] = *v.TimeOfYearCaster() + } + + return s +} + +func (s *_scheduleContainer) ScheduleContainerCaster() *types.ScheduleContainer { + return s.v +} diff --git a/typedapi/esdsl/scheduletimeofday.go b/typedapi/esdsl/scheduletimeofday.go new file mode 100644 index 0000000000..14de066c22 --- /dev/null +++ b/typedapi/esdsl/scheduletimeofday.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _scheduleTimeOfDay struct { + v types.ScheduleTimeOfDay +} + +func NewScheduleTimeOfDay() *_scheduleTimeOfDay { + return &_scheduleTimeOfDay{v: nil} +} + +func (u *_scheduleTimeOfDay) String(string string) *_scheduleTimeOfDay { + + u.v = &string + + return u +} + +func (u *_scheduleTimeOfDay) HourAndMinute(hourandminute types.HourAndMinuteVariant) *_scheduleTimeOfDay { + + u.v = &hourandminute + + return u +} + +// Interface implementation for HourAndMinute in ScheduleTimeOfDay union +func (u *_hourAndMinute) ScheduleTimeOfDayCaster() *types.ScheduleTimeOfDay { + t := types.ScheduleTimeOfDay(u.v) + return &t +} + +func (u *_scheduleTimeOfDay) ScheduleTimeOfDayCaster() *types.ScheduleTimeOfDay { + return &u.v +} diff --git a/typedapi/esdsl/scheduletriggerevent.go b/typedapi/esdsl/scheduletriggerevent.go new file mode 100644 index 0000000000..1c81954cb8 --- /dev/null +++ b/typedapi/esdsl/scheduletriggerevent.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _scheduleTriggerEvent struct { + v *types.ScheduleTriggerEvent +} + +func NewScheduleTriggerEvent() *_scheduleTriggerEvent { + + return &_scheduleTriggerEvent{v: types.NewScheduleTriggerEvent()} + +} + +func (s *_scheduleTriggerEvent) ScheduledTime(datetime types.DateTimeVariant) *_scheduleTriggerEvent { + + s.v.ScheduledTime = *datetime.DateTimeCaster() + + return s +} + +func (s *_scheduleTriggerEvent) TriggeredTime(datetime types.DateTimeVariant) *_scheduleTriggerEvent { + + s.v.TriggeredTime = *datetime.DateTimeCaster() + + return s +} + +func (s *_scheduleTriggerEvent) TriggerEventContainerCaster() *types.TriggerEventContainer { + container := types.NewTriggerEventContainer() + + container.Schedule = s.v + + return container +} + +func (s *_scheduleTriggerEvent) ScheduleTriggerEventCaster() *types.ScheduleTriggerEvent { + return s.v +} diff --git a/typedapi/esdsl/schedulingconfiguration.go b/typedapi/esdsl/schedulingconfiguration.go new file mode 100644 index 0000000000..4e1badb581 --- /dev/null +++ b/typedapi/esdsl/schedulingconfiguration.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _schedulingConfiguration struct { + v *types.SchedulingConfiguration +} + +func NewSchedulingConfiguration() *_schedulingConfiguration { + + return &_schedulingConfiguration{v: types.NewSchedulingConfiguration()} + +} + +func (s *_schedulingConfiguration) AccessControl(accesscontrol types.ConnectorSchedulingVariant) *_schedulingConfiguration { + + s.v.AccessControl = accesscontrol.ConnectorSchedulingCaster() + + return s +} + +func (s *_schedulingConfiguration) Full(full types.ConnectorSchedulingVariant) *_schedulingConfiguration { + + s.v.Full = full.ConnectorSchedulingCaster() + + return s +} + +func (s *_schedulingConfiguration) Incremental(incremental types.ConnectorSchedulingVariant) *_schedulingConfiguration { + + s.v.Incremental = incremental.ConnectorSchedulingCaster() + + return s +} + +func (s *_schedulingConfiguration) SchedulingConfigurationCaster() *types.SchedulingConfiguration { + return s.v +} diff --git a/typedapi/esdsl/scoresort.go b/typedapi/esdsl/scoresort.go new file mode 100644 index 0000000000..c59e596b4f --- /dev/null +++ b/typedapi/esdsl/scoresort.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" +) + +type _scoreSort struct { + v *types.ScoreSort +} + +func NewScoreSort() *_scoreSort { + + return &_scoreSort{v: types.NewScoreSort()} + +} + +func (s *_scoreSort) Order(order sortorder.SortOrder) *_scoreSort { + + s.v.Order = &order + return s +} + +func (s *_scoreSort) SortOptionsCaster() *types.SortOptions { + container := types.NewSortOptions() + + container.Score_ = s.v + + return container +} + +func (s *_scoreSort) ScoreSortCaster() *types.ScoreSort { + return s.v +} diff --git a/typedapi/esdsl/script.go b/typedapi/esdsl/script.go new file mode 100644 index 0000000000..39dcf7d68b --- /dev/null +++ b/typedapi/esdsl/script.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptlanguage" +) + +type _script struct { + v *types.Script +} + +// Script used to return matching documents. +// This script must return a boolean value: `true` or `false`. +func NewScript(source string) *_script { + + tmp := &_script{v: types.NewScript()} + + tmp.Source(source) + + return tmp + +} + +// The `id` for a stored script. +func (s *_script) Id(id string) *_script { + + s.v.Id = &id + + return s +} + +// Specifies the language the script is written in. +func (s *_script) Lang(lang scriptlanguage.ScriptLanguage) *_script { + + s.v.Lang = &lang + return s +} + +func (s *_script) Options(options map[string]string) *_script { + + s.v.Options = options + return s +} + +func (s *_script) AddOption(key string, value string) *_script { + + var tmp map[string]string + if s.v.Options == nil { + s.v.Options = make(map[string]string) + } else { + tmp = s.v.Options + } + + tmp[key] = value + + s.v.Options = tmp + return s +} + +// Specifies any named parameters that are passed into the script as variables. +// Use parameters instead of hard-coded values to decrease compile time. +func (s *_script) Params(params map[string]json.RawMessage) *_script { + + s.v.Params = params + return s +} + +func (s *_script) AddParam(key string, value json.RawMessage) *_script { + + var tmp map[string]json.RawMessage + if s.v.Params == nil { + s.v.Params = make(map[string]json.RawMessage) + } else { + tmp = s.v.Params + } + + tmp[key] = value + + s.v.Params = tmp + return s +} + +// The script source. +func (s *_script) Source(source string) *_script { + + s.v.Source = &source + + return s +} + +func (s *_script) IntervalsFilterCaster() *types.IntervalsFilter { + container := types.NewIntervalsFilter() + + container.Script = s.v + + return container +} + +func (s *_script) ScriptCaster() *types.Script { + return s.v +} diff --git a/typedapi/esdsl/scriptcondition.go b/typedapi/esdsl/scriptcondition.go new file mode 100644 index 0000000000..c7c85cf8d9 --- /dev/null +++ b/typedapi/esdsl/scriptcondition.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _scriptCondition struct { + v *types.ScriptCondition +} + +func NewScriptCondition() *_scriptCondition { + + return &_scriptCondition{v: types.NewScriptCondition()} + +} + +func (s *_scriptCondition) Id(id string) *_scriptCondition { + + s.v.Id = &id + + return s +} + +func (s *_scriptCondition) Lang(lang string) *_scriptCondition { + + s.v.Lang = &lang + + return s +} + +func (s *_scriptCondition) Params(params map[string]json.RawMessage) *_scriptCondition { + + s.v.Params = params + return s +} + +func (s *_scriptCondition) AddParam(key string, value json.RawMessage) *_scriptCondition { + + var tmp map[string]json.RawMessage + if s.v.Params == nil { + s.v.Params = make(map[string]json.RawMessage) + } else { + tmp = s.v.Params + } + + tmp[key] = value + + s.v.Params = tmp + return s +} + +func (s *_scriptCondition) Source(source string) *_scriptCondition { + + s.v.Source = &source + + return s +} + +func (s *_scriptCondition) WatcherConditionCaster() *types.WatcherCondition { + container := types.NewWatcherCondition() + + container.Script = s.v + + return container +} + +func (s *_scriptCondition) ScriptConditionCaster() *types.ScriptCondition { + return s.v +} diff --git a/typedapi/esdsl/scriptedheuristic.go b/typedapi/esdsl/scriptedheuristic.go new file mode 100644 index 0000000000..6a4654be8f --- /dev/null +++ b/typedapi/esdsl/scriptedheuristic.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _scriptedHeuristic struct { + v *types.ScriptedHeuristic +} + +func NewScriptedHeuristic(script types.ScriptVariant) *_scriptedHeuristic { + + tmp := &_scriptedHeuristic{v: types.NewScriptedHeuristic()} + + tmp.Script(script) + + return tmp + +} + +func (s *_scriptedHeuristic) Script(script types.ScriptVariant) *_scriptedHeuristic { + + s.v.Script = *script.ScriptCaster() + + return s +} + +func (s *_scriptedHeuristic) ScriptedHeuristicCaster() *types.ScriptedHeuristic { + return s.v +} diff --git a/typedapi/esdsl/scriptedmetricaggregation.go b/typedapi/esdsl/scriptedmetricaggregation.go new file mode 100644 index 0000000000..280795d192 --- /dev/null +++ b/typedapi/esdsl/scriptedmetricaggregation.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _scriptedMetricAggregation struct { + v *types.ScriptedMetricAggregation +} + +// A metric aggregation that uses scripts to provide a metric output. +func NewScriptedMetricAggregation() *_scriptedMetricAggregation { + + return &_scriptedMetricAggregation{v: types.NewScriptedMetricAggregation()} + +} + +// Runs once on each shard after document collection is complete. +// Allows the aggregation to consolidate the state returned from each shard. +func (s *_scriptedMetricAggregation) CombineScript(combinescript types.ScriptVariant) *_scriptedMetricAggregation { + + s.v.CombineScript = combinescript.ScriptCaster() + + return s +} + +// The field on which to run the aggregation. +func (s *_scriptedMetricAggregation) Field(field string) *_scriptedMetricAggregation { + + s.v.Field = &field + + return s +} + +// Runs prior to any collection of documents. +// Allows the aggregation to set up any initial state. +func (s *_scriptedMetricAggregation) InitScript(initscript types.ScriptVariant) *_scriptedMetricAggregation { + + s.v.InitScript = initscript.ScriptCaster() + + return s +} + +// Run once per document collected. +// If no `combine_script` is specified, the resulting state needs to be stored +// in the `state` object. +func (s *_scriptedMetricAggregation) MapScript(mapscript types.ScriptVariant) *_scriptedMetricAggregation { + + s.v.MapScript = mapscript.ScriptCaster() + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_scriptedMetricAggregation) Missing(missing types.MissingVariant) *_scriptedMetricAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +// A global object with script parameters for `init`, `map` and `combine` +// scripts. +// It is shared between the scripts. +func (s *_scriptedMetricAggregation) Params(params map[string]json.RawMessage) *_scriptedMetricAggregation { + + s.v.Params = params + return s +} + +func (s *_scriptedMetricAggregation) AddParam(key string, value json.RawMessage) *_scriptedMetricAggregation { + + var tmp map[string]json.RawMessage + if s.v.Params == nil { + s.v.Params = make(map[string]json.RawMessage) + } else { + tmp = s.v.Params + } + + tmp[key] = value + + s.v.Params = tmp + return s +} + +// Runs once on the coordinating node after all shards have returned their +// results. +// The script is provided with access to a variable `states`, which is an array +// of the result of the `combine_script` on each shard. +func (s *_scriptedMetricAggregation) ReduceScript(reducescript types.ScriptVariant) *_scriptedMetricAggregation { + + s.v.ReduceScript = reducescript.ScriptCaster() + + return s +} + +func (s *_scriptedMetricAggregation) Script(script types.ScriptVariant) *_scriptedMetricAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_scriptedMetricAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.ScriptedMetric = s.v + + return container +} + +func (s *_scriptedMetricAggregation) ScriptedMetricAggregationCaster() *types.ScriptedMetricAggregation { + return s.v +} diff --git a/typedapi/esdsl/scriptfield.go b/typedapi/esdsl/scriptfield.go new file mode 100644 index 0000000000..d591d1e51c --- /dev/null +++ b/typedapi/esdsl/scriptfield.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _scriptField struct { + v *types.ScriptField +} + +func NewScriptField(script types.ScriptVariant) *_scriptField { + + tmp := &_scriptField{v: types.NewScriptField()} + + tmp.Script(script) + + return tmp + +} + +func (s *_scriptField) IgnoreFailure(ignorefailure bool) *_scriptField { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +func (s *_scriptField) Script(script types.ScriptVariant) *_scriptField { + + s.v.Script = *script.ScriptCaster() + + return s +} + +func (s *_scriptField) ScriptFieldCaster() *types.ScriptField { + return s.v +} diff --git a/typedapi/esdsl/scriptprocessor.go b/typedapi/esdsl/scriptprocessor.go new file mode 100644 index 0000000000..5e99ef6187 --- /dev/null +++ b/typedapi/esdsl/scriptprocessor.go @@ -0,0 +1,144 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _scriptProcessor struct { + v *types.ScriptProcessor +} + +// Runs an inline or stored script on incoming documents. +// The script runs in the `ingest` context. +func NewScriptProcessor() *_scriptProcessor { + + return &_scriptProcessor{v: types.NewScriptProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_scriptProcessor) Description(description string) *_scriptProcessor { + + s.v.Description = &description + + return s +} + +// ID of a stored script. +// If no `source` is specified, this parameter is required. +func (s *_scriptProcessor) Id(id string) *_scriptProcessor { + + s.v.Id = &id + + return s +} + +// Conditionally execute the processor. +func (s *_scriptProcessor) If(if_ types.ScriptVariant) *_scriptProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_scriptProcessor) IgnoreFailure(ignorefailure bool) *_scriptProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// Script language. +func (s *_scriptProcessor) Lang(lang string) *_scriptProcessor { + + s.v.Lang = &lang + + return s +} + +// Handle failures for the processor. +func (s *_scriptProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_scriptProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Object containing parameters for the script. +func (s *_scriptProcessor) Params(params map[string]json.RawMessage) *_scriptProcessor { + + s.v.Params = params + return s +} + +func (s *_scriptProcessor) AddParam(key string, value json.RawMessage) *_scriptProcessor { + + var tmp map[string]json.RawMessage + if s.v.Params == nil { + s.v.Params = make(map[string]json.RawMessage) + } else { + tmp = s.v.Params + } + + tmp[key] = value + + s.v.Params = tmp + return s +} + +// Inline script. +// If no `id` is specified, this parameter is required. +func (s *_scriptProcessor) Source(source string) *_scriptProcessor { + + s.v.Source = &source + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_scriptProcessor) Tag(tag string) *_scriptProcessor { + + s.v.Tag = &tag + + return s +} + +func (s *_scriptProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Script = s.v + + return container +} + +func (s *_scriptProcessor) ScriptProcessorCaster() *types.ScriptProcessor { + return s.v +} diff --git a/typedapi/esdsl/scriptquery.go b/typedapi/esdsl/scriptquery.go new file mode 100644 index 0000000000..9a1c4a5939 --- /dev/null +++ b/typedapi/esdsl/scriptquery.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _scriptQuery struct { + v *types.ScriptQuery +} + +// Filters documents based on a provided script. +// The script query is typically used in a filter context. +func NewScriptQuery(script types.ScriptVariant) *_scriptQuery { + + tmp := &_scriptQuery{v: types.NewScriptQuery()} + + tmp.Script(script) + + return tmp + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_scriptQuery) Boost(boost float32) *_scriptQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_scriptQuery) QueryName_(queryname_ string) *_scriptQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Contains a script to run as a query. +// This script must return a boolean value, `true` or `false`. +func (s *_scriptQuery) Script(script types.ScriptVariant) *_scriptQuery { + + s.v.Script = *script.ScriptCaster() + + return s +} + +func (s *_scriptQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.Script = s.v + + return container +} + +func (s *_scriptQuery) ScriptQueryCaster() *types.ScriptQuery { + return s.v +} diff --git a/typedapi/esdsl/scriptscorefunction.go b/typedapi/esdsl/scriptscorefunction.go new file mode 100644 index 0000000000..64c56a7e0f --- /dev/null +++ b/typedapi/esdsl/scriptscorefunction.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _scriptScoreFunction struct { + v *types.ScriptScoreFunction +} + +// Enables you to wrap another query and customize the scoring of it optionally +// with a computation derived from other numeric field values in the doc using a +// script expression. +func NewScriptScoreFunction(script types.ScriptVariant) *_scriptScoreFunction { + + tmp := &_scriptScoreFunction{v: types.NewScriptScoreFunction()} + + tmp.Script(script) + + return tmp + +} + +// A script that computes a score. +func (s *_scriptScoreFunction) Script(script types.ScriptVariant) *_scriptScoreFunction { + + s.v.Script = *script.ScriptCaster() + + return s +} + +func (s *_scriptScoreFunction) FunctionScoreCaster() *types.FunctionScore { + container := types.NewFunctionScore() + + container.ScriptScore = s.v + + return container +} + +func (s *_scriptScoreFunction) ScriptScoreFunctionCaster() *types.ScriptScoreFunction { + return s.v +} diff --git a/typedapi/esdsl/scriptscorequery.go b/typedapi/esdsl/scriptscorequery.go new file mode 100644 index 0000000000..2ecc5f3ac2 --- /dev/null +++ b/typedapi/esdsl/scriptscorequery.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _scriptScoreQuery struct { + v *types.ScriptScoreQuery +} + +// Uses a script to provide a custom score for returned documents. +func NewScriptScoreQuery(query types.QueryVariant, script types.ScriptVariant) *_scriptScoreQuery { + + tmp := &_scriptScoreQuery{v: types.NewScriptScoreQuery()} + + tmp.Query(query) + + tmp.Script(script) + + return tmp + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_scriptScoreQuery) Boost(boost float32) *_scriptScoreQuery { + + s.v.Boost = &boost + + return s +} + +// Documents with a score lower than this floating point number are excluded +// from the search results. +func (s *_scriptScoreQuery) MinScore(minscore float32) *_scriptScoreQuery { + + s.v.MinScore = &minscore + + return s +} + +// Query used to return documents. +func (s *_scriptScoreQuery) Query(query types.QueryVariant) *_scriptScoreQuery { + + s.v.Query = *query.QueryCaster() + + return s +} + +func (s *_scriptScoreQuery) QueryName_(queryname_ string) *_scriptScoreQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Script used to compute the score of documents returned by the query. +// Important: final relevance scores from the `script_score` query cannot be +// negative. +func (s *_scriptScoreQuery) Script(script types.ScriptVariant) *_scriptScoreQuery { + + s.v.Script = *script.ScriptCaster() + + return s +} + +func (s *_scriptScoreQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.ScriptScore = s.v + + return container +} + +func (s *_scriptScoreQuery) ScriptScoreQueryCaster() *types.ScriptScoreQuery { + return s.v +} diff --git a/typedapi/esdsl/scriptsort.go b/typedapi/esdsl/scriptsort.go new file mode 100644 index 0000000000..591e8f453c --- /dev/null +++ b/typedapi/esdsl/scriptsort.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptsorttype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortmode" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" +) + +type _scriptSort struct { + v *types.ScriptSort +} + +func NewScriptSort(script types.ScriptVariant) *_scriptSort { + + tmp := &_scriptSort{v: types.NewScriptSort()} + + tmp.Script(script) + + return tmp + +} + +func (s *_scriptSort) Mode(mode sortmode.SortMode) *_scriptSort { + + s.v.Mode = &mode + return s +} + +func (s *_scriptSort) Nested(nested types.NestedSortValueVariant) *_scriptSort { + + s.v.Nested = nested.NestedSortValueCaster() + + return s +} + +func (s *_scriptSort) Order(order sortorder.SortOrder) *_scriptSort { + + s.v.Order = &order + return s +} + +func (s *_scriptSort) Script(script types.ScriptVariant) *_scriptSort { + + s.v.Script = *script.ScriptCaster() + + return s +} + +func (s *_scriptSort) Type(type_ scriptsorttype.ScriptSortType) *_scriptSort { + + s.v.Type = &type_ + return s +} + +func (s *_scriptSort) SortOptionsCaster() *types.SortOptions { + container := types.NewSortOptions() + + container.Script_ = s.v + + return container +} + +func (s *_scriptSort) ScriptSortCaster() *types.ScriptSort { + return s.v +} diff --git a/typedapi/esdsl/scripttransform.go b/typedapi/esdsl/scripttransform.go new file mode 100644 index 0000000000..a587af46e0 --- /dev/null +++ b/typedapi/esdsl/scripttransform.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _scriptTransform struct { + v *types.ScriptTransform +} + +func NewScriptTransform() *_scriptTransform { + + return &_scriptTransform{v: types.NewScriptTransform()} + +} + +func (s *_scriptTransform) Id(id string) *_scriptTransform { + + s.v.Id = &id + + return s +} + +func (s *_scriptTransform) Lang(lang string) *_scriptTransform { + + s.v.Lang = &lang + + return s +} + +func (s *_scriptTransform) Params(params map[string]json.RawMessage) *_scriptTransform { + + s.v.Params = params + return s +} + +func (s *_scriptTransform) AddParam(key string, value json.RawMessage) *_scriptTransform { + + var tmp map[string]json.RawMessage + if s.v.Params == nil { + s.v.Params = make(map[string]json.RawMessage) + } else { + tmp = s.v.Params + } + + tmp[key] = value + + s.v.Params = tmp + return s +} + +func (s *_scriptTransform) Source(source string) *_scriptTransform { + + s.v.Source = &source + + return s +} + +func (s *_scriptTransform) TransformContainerCaster() *types.TransformContainer { + container := types.NewTransformContainer() + + container.Script = s.v + + return container +} + +func (s *_scriptTransform) ScriptTransformCaster() *types.ScriptTransform { + return s.v +} diff --git a/typedapi/esdsl/scrollids.go b/typedapi/esdsl/scrollids.go new file mode 100644 index 0000000000..48e729a3a9 --- /dev/null +++ b/typedapi/esdsl/scrollids.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _scrollIds struct { + v types.ScrollIds +} + +func NewScrollIds() *_scrollIds { + return &_scrollIds{v: []string{}} +} + +func (u *_scrollIds) ScrollIdsCaster() *types.ScrollIds { + return &u.v +} diff --git a/typedapi/esdsl/searchablesnapshotaction.go b/typedapi/esdsl/searchablesnapshotaction.go new file mode 100644 index 0000000000..85c0541b75 --- /dev/null +++ b/typedapi/esdsl/searchablesnapshotaction.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _searchableSnapshotAction struct { + v *types.SearchableSnapshotAction +} + +func NewSearchableSnapshotAction(snapshotrepository string) *_searchableSnapshotAction { + + tmp := &_searchableSnapshotAction{v: types.NewSearchableSnapshotAction()} + + tmp.SnapshotRepository(snapshotrepository) + + return tmp + +} + +func (s *_searchableSnapshotAction) ForceMergeIndex(forcemergeindex bool) *_searchableSnapshotAction { + + s.v.ForceMergeIndex = &forcemergeindex + + return s +} + +func (s *_searchableSnapshotAction) SnapshotRepository(snapshotrepository string) *_searchableSnapshotAction { + + s.v.SnapshotRepository = snapshotrepository + + return s +} + +func (s *_searchableSnapshotAction) SearchableSnapshotActionCaster() *types.SearchableSnapshotAction { + return s.v +} diff --git a/typedapi/esdsl/searchaccess.go b/typedapi/esdsl/searchaccess.go new file mode 100644 index 0000000000..b54fe9176f --- /dev/null +++ b/typedapi/esdsl/searchaccess.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _searchAccess struct { + v *types.SearchAccess +} + +func NewSearchAccess() *_searchAccess { + + return &_searchAccess{v: types.NewSearchAccess()} + +} + +// Set to `true` if using wildcard or regular expressions for patterns that +// cover restricted indices. Implicitly, restricted indices have limited +// privileges that can cause pattern tests to fail. If restricted indices are +// explicitly included in the `names` list, Elasticsearch checks privileges +// against these indices regardless of the value set for +// `allow_restricted_indices`. +func (s *_searchAccess) AllowRestrictedIndices(allowrestrictedindices bool) *_searchAccess { + + s.v.AllowRestrictedIndices = &allowrestrictedindices + + return s +} + +// The document fields that the owners of the role have read access to. +func (s *_searchAccess) FieldSecurity(fieldsecurity types.FieldSecurityVariant) *_searchAccess { + + s.v.FieldSecurity = fieldsecurity.FieldSecurityCaster() + + return s +} + +// A list of indices (or index name patterns) to which the permissions in this +// entry apply. +func (s *_searchAccess) Names(names ...string) *_searchAccess { + + s.v.Names = make([]string, len(names)) + s.v.Names = names + + return s +} + +// A search query that defines the documents the owners of the role have access +// to. A document within the specified indices must match this query for it to +// be accessible by the owners of the role. +func (s *_searchAccess) Query(indicesprivilegesquery types.IndicesPrivilegesQueryVariant) *_searchAccess { + + s.v.Query = *indicesprivilegesquery.IndicesPrivilegesQueryCaster() + + return s +} + +func (s *_searchAccess) SearchAccessCaster() *types.SearchAccess { + return s.v +} diff --git a/typedapi/esdsl/searchapplicationparameters.go b/typedapi/esdsl/searchapplicationparameters.go new file mode 100644 index 0000000000..e32571998f --- /dev/null +++ b/typedapi/esdsl/searchapplicationparameters.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _searchApplicationParameters struct { + v *types.SearchApplicationParameters +} + +func NewSearchApplicationParameters() *_searchApplicationParameters { + + return &_searchApplicationParameters{v: types.NewSearchApplicationParameters()} + +} + +// Analytics collection associated to the Search Application. +func (s *_searchApplicationParameters) AnalyticsCollectionName(name string) *_searchApplicationParameters { + + s.v.AnalyticsCollectionName = &name + + return s +} + +// Indices that are part of the Search Application. +func (s *_searchApplicationParameters) Indices(indices ...string) *_searchApplicationParameters { + + for _, v := range indices { + + s.v.Indices = append(s.v.Indices, v) + + } + return s +} + +// Search template to use on search operations. +func (s *_searchApplicationParameters) Template(template types.SearchApplicationTemplateVariant) *_searchApplicationParameters { + + s.v.Template = template.SearchApplicationTemplateCaster() + + return s +} + +func (s *_searchApplicationParameters) SearchApplicationParametersCaster() *types.SearchApplicationParameters { + return s.v +} diff --git a/typedapi/esdsl/searchapplicationtemplate.go b/typedapi/esdsl/searchapplicationtemplate.go new file mode 100644 index 0000000000..3ae77cb00c --- /dev/null +++ b/typedapi/esdsl/searchapplicationtemplate.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _searchApplicationTemplate struct { + v *types.SearchApplicationTemplate +} + +func NewSearchApplicationTemplate(script types.ScriptVariant) *_searchApplicationTemplate { + + tmp := &_searchApplicationTemplate{v: types.NewSearchApplicationTemplate()} + + tmp.Script(script) + + return tmp + +} + +// The associated mustache template. +func (s *_searchApplicationTemplate) Script(script types.ScriptVariant) *_searchApplicationTemplate { + + s.v.Script = *script.ScriptCaster() + + return s +} + +func (s *_searchApplicationTemplate) SearchApplicationTemplateCaster() *types.SearchApplicationTemplate { + return s.v +} diff --git a/typedapi/esdsl/searchasyoutypeproperty.go b/typedapi/esdsl/searchasyoutypeproperty.go new file mode 100644 index 0000000000..8a06f397e7 --- /dev/null +++ b/typedapi/esdsl/searchasyoutypeproperty.go @@ -0,0 +1,209 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption" +) + +type _searchAsYouTypeProperty struct { + v *types.SearchAsYouTypeProperty +} + +func NewSearchAsYouTypeProperty() *_searchAsYouTypeProperty { + + return &_searchAsYouTypeProperty{v: types.NewSearchAsYouTypeProperty()} + +} + +func (s *_searchAsYouTypeProperty) Analyzer(analyzer string) *_searchAsYouTypeProperty { + + s.v.Analyzer = &analyzer + + return s +} + +func (s *_searchAsYouTypeProperty) CopyTo(fields ...string) *_searchAsYouTypeProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_searchAsYouTypeProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_searchAsYouTypeProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_searchAsYouTypeProperty) Fields(fields map[string]types.Property) *_searchAsYouTypeProperty { + + s.v.Fields = fields + return s +} + +func (s *_searchAsYouTypeProperty) AddField(key string, value types.PropertyVariant) *_searchAsYouTypeProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_searchAsYouTypeProperty) IgnoreAbove(ignoreabove int) *_searchAsYouTypeProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_searchAsYouTypeProperty) Index(index bool) *_searchAsYouTypeProperty { + + s.v.Index = &index + + return s +} + +func (s *_searchAsYouTypeProperty) IndexOptions(indexoptions indexoptions.IndexOptions) *_searchAsYouTypeProperty { + + s.v.IndexOptions = &indexoptions + return s +} + +func (s *_searchAsYouTypeProperty) MaxShingleSize(maxshinglesize int) *_searchAsYouTypeProperty { + + s.v.MaxShingleSize = &maxshinglesize + + return s +} + +// Metadata about the field. +func (s *_searchAsYouTypeProperty) Meta(meta map[string]string) *_searchAsYouTypeProperty { + + s.v.Meta = meta + return s +} + +func (s *_searchAsYouTypeProperty) AddMeta(key string, value string) *_searchAsYouTypeProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_searchAsYouTypeProperty) Norms(norms bool) *_searchAsYouTypeProperty { + + s.v.Norms = &norms + + return s +} + +func (s *_searchAsYouTypeProperty) Properties(properties map[string]types.Property) *_searchAsYouTypeProperty { + + s.v.Properties = properties + return s +} + +func (s *_searchAsYouTypeProperty) AddProperty(key string, value types.PropertyVariant) *_searchAsYouTypeProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_searchAsYouTypeProperty) SearchAnalyzer(searchanalyzer string) *_searchAsYouTypeProperty { + + s.v.SearchAnalyzer = &searchanalyzer + + return s +} + +func (s *_searchAsYouTypeProperty) SearchQuoteAnalyzer(searchquoteanalyzer string) *_searchAsYouTypeProperty { + + s.v.SearchQuoteAnalyzer = &searchquoteanalyzer + + return s +} + +func (s *_searchAsYouTypeProperty) Similarity(similarity string) *_searchAsYouTypeProperty { + + s.v.Similarity = &similarity + + return s +} + +func (s *_searchAsYouTypeProperty) Store(store bool) *_searchAsYouTypeProperty { + + s.v.Store = &store + + return s +} + +func (s *_searchAsYouTypeProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_searchAsYouTypeProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_searchAsYouTypeProperty) TermVector(termvector termvectoroption.TermVectorOption) *_searchAsYouTypeProperty { + + s.v.TermVector = &termvector + return s +} + +func (s *_searchAsYouTypeProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_searchAsYouTypeProperty) SearchAsYouTypePropertyCaster() *types.SearchAsYouTypeProperty { + return s.v +} diff --git a/typedapi/esdsl/searchidle.go b/typedapi/esdsl/searchidle.go new file mode 100644 index 0000000000..897f2b87ae --- /dev/null +++ b/typedapi/esdsl/searchidle.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _searchIdle struct { + v *types.SearchIdle +} + +func NewSearchIdle() *_searchIdle { + + return &_searchIdle{v: types.NewSearchIdle()} + +} + +func (s *_searchIdle) After(duration types.DurationVariant) *_searchIdle { + + s.v.After = *duration.DurationCaster() + + return s +} + +func (s *_searchIdle) SearchIdleCaster() *types.SearchIdle { + return s.v +} diff --git a/typedapi/esdsl/searchinput.go b/typedapi/esdsl/searchinput.go new file mode 100644 index 0000000000..b328bf845c --- /dev/null +++ b/typedapi/esdsl/searchinput.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _searchInput struct { + v *types.SearchInput +} + +func NewSearchInput(request types.SearchInputRequestDefinitionVariant) *_searchInput { + + tmp := &_searchInput{v: types.NewSearchInput()} + + tmp.Request(request) + + return tmp + +} + +func (s *_searchInput) Extract(extracts ...string) *_searchInput { + + for _, v := range extracts { + + s.v.Extract = append(s.v.Extract, v) + + } + return s +} + +func (s *_searchInput) Request(request types.SearchInputRequestDefinitionVariant) *_searchInput { + + s.v.Request = *request.SearchInputRequestDefinitionCaster() + + return s +} + +func (s *_searchInput) Timeout(duration types.DurationVariant) *_searchInput { + + s.v.Timeout = *duration.DurationCaster() + + return s +} + +func (s *_searchInput) WatcherInputCaster() *types.WatcherInput { + container := types.NewWatcherInput() + + container.Search = s.v + + return container +} + +func (s *_searchInput) SearchInputCaster() *types.SearchInput { + return s.v +} diff --git a/typedapi/esdsl/searchinputrequestbody.go b/typedapi/esdsl/searchinputrequestbody.go new file mode 100644 index 0000000000..1df918987c --- /dev/null +++ b/typedapi/esdsl/searchinputrequestbody.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _searchInputRequestBody struct { + v *types.SearchInputRequestBody +} + +func NewSearchInputRequestBody(query types.QueryVariant) *_searchInputRequestBody { + + tmp := &_searchInputRequestBody{v: types.NewSearchInputRequestBody()} + + tmp.Query(query) + + return tmp + +} + +func (s *_searchInputRequestBody) Query(query types.QueryVariant) *_searchInputRequestBody { + + s.v.Query = *query.QueryCaster() + + return s +} + +func (s *_searchInputRequestBody) SearchInputRequestBodyCaster() *types.SearchInputRequestBody { + return s.v +} diff --git a/typedapi/esdsl/searchinputrequestdefinition.go b/typedapi/esdsl/searchinputrequestdefinition.go new file mode 100644 index 0000000000..f4205a51a1 --- /dev/null +++ b/typedapi/esdsl/searchinputrequestdefinition.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype" +) + +type _searchInputRequestDefinition struct { + v *types.SearchInputRequestDefinition +} + +func NewSearchInputRequestDefinition() *_searchInputRequestDefinition { + + return &_searchInputRequestDefinition{v: types.NewSearchInputRequestDefinition()} + +} + +func (s *_searchInputRequestDefinition) Body(body types.SearchInputRequestBodyVariant) *_searchInputRequestDefinition { + + s.v.Body = body.SearchInputRequestBodyCaster() + + return s +} + +func (s *_searchInputRequestDefinition) Indices(indices ...string) *_searchInputRequestDefinition { + + for _, v := range indices { + + s.v.Indices = append(s.v.Indices, v) + + } + return s +} + +func (s *_searchInputRequestDefinition) IndicesOptions(indicesoptions types.IndicesOptionsVariant) *_searchInputRequestDefinition { + + s.v.IndicesOptions = indicesoptions.IndicesOptionsCaster() + + return s +} + +func (s *_searchInputRequestDefinition) RestTotalHitsAsInt(resttotalhitsasint bool) *_searchInputRequestDefinition { + + s.v.RestTotalHitsAsInt = &resttotalhitsasint + + return s +} + +func (s *_searchInputRequestDefinition) SearchType(searchtype searchtype.SearchType) *_searchInputRequestDefinition { + + s.v.SearchType = &searchtype + return s +} + +func (s *_searchInputRequestDefinition) Template(template types.SearchTemplateRequestBodyVariant) *_searchInputRequestDefinition { + + s.v.Template = template.SearchTemplateRequestBodyCaster() + + return s +} + +func (s *_searchInputRequestDefinition) SearchInputRequestDefinitionCaster() *types.SearchInputRequestDefinition { + return s.v +} diff --git a/typedapi/esdsl/searchtemplaterequestbody.go b/typedapi/esdsl/searchtemplaterequestbody.go new file mode 100644 index 0000000000..1e09d81697 --- /dev/null +++ b/typedapi/esdsl/searchtemplaterequestbody.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _searchTemplateRequestBody struct { + v *types.SearchTemplateRequestBody +} + +func NewSearchTemplateRequestBody() *_searchTemplateRequestBody { + + return &_searchTemplateRequestBody{v: types.NewSearchTemplateRequestBody()} + +} + +func (s *_searchTemplateRequestBody) Explain(explain bool) *_searchTemplateRequestBody { + + s.v.Explain = &explain + + return s +} + +// ID of the search template to use. If no source is specified, +// this parameter is required. +func (s *_searchTemplateRequestBody) Id(id string) *_searchTemplateRequestBody { + + s.v.Id = &id + + return s +} + +func (s *_searchTemplateRequestBody) Params(params map[string]json.RawMessage) *_searchTemplateRequestBody { + + s.v.Params = params + return s +} + +func (s *_searchTemplateRequestBody) AddParam(key string, value json.RawMessage) *_searchTemplateRequestBody { + + var tmp map[string]json.RawMessage + if s.v.Params == nil { + s.v.Params = make(map[string]json.RawMessage) + } else { + tmp = s.v.Params + } + + tmp[key] = value + + s.v.Params = tmp + return s +} + +func (s *_searchTemplateRequestBody) Profile(profile bool) *_searchTemplateRequestBody { + + s.v.Profile = &profile + + return s +} + +// An inline search template. Supports the same parameters as the search API's +// request body. Also supports Mustache variables. If no id is specified, this +// parameter is required. +func (s *_searchTemplateRequestBody) Source(source string) *_searchTemplateRequestBody { + + s.v.Source = &source + + return s +} + +func (s *_searchTemplateRequestBody) SearchTemplateRequestBodyCaster() *types.SearchTemplateRequestBody { + return s.v +} diff --git a/typedapi/esdsl/searchtransform.go b/typedapi/esdsl/searchtransform.go new file mode 100644 index 0000000000..467970be90 --- /dev/null +++ b/typedapi/esdsl/searchtransform.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _searchTransform struct { + v *types.SearchTransform +} + +func NewSearchTransform(request types.SearchInputRequestDefinitionVariant) *_searchTransform { + + tmp := &_searchTransform{v: types.NewSearchTransform()} + + tmp.Request(request) + + return tmp + +} + +func (s *_searchTransform) Request(request types.SearchInputRequestDefinitionVariant) *_searchTransform { + + s.v.Request = *request.SearchInputRequestDefinitionCaster() + + return s +} + +func (s *_searchTransform) Timeout(duration types.DurationVariant) *_searchTransform { + + s.v.Timeout = *duration.DurationCaster() + + return s +} + +func (s *_searchTransform) TransformContainerCaster() *types.TransformContainer { + container := types.NewTransformContainer() + + container.Search = s.v + + return container +} + +func (s *_searchTransform) SearchTransformCaster() *types.SearchTransform { + return s.v +} diff --git a/typedapi/esdsl/securitysettings.go b/typedapi/esdsl/securitysettings.go new file mode 100644 index 0000000000..8bbf3e89d2 --- /dev/null +++ b/typedapi/esdsl/securitysettings.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _securitySettings struct { + v *types.SecuritySettings +} + +func NewSecuritySettings() *_securitySettings { + + return &_securitySettings{v: types.NewSecuritySettings()} + +} + +func (s *_securitySettings) Index(index types.IndexSettingsVariant) *_securitySettings { + + s.v.Index = index.IndexSettingsCaster() + + return s +} + +func (s *_securitySettings) SecuritySettingsCaster() *types.SecuritySettings { + return s.v +} diff --git a/typedapi/esdsl/selectoption.go b/typedapi/esdsl/selectoption.go new file mode 100644 index 0000000000..dee5737dc2 --- /dev/null +++ b/typedapi/esdsl/selectoption.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _selectOption struct { + v *types.SelectOption +} + +func NewSelectOption(label string) *_selectOption { + + tmp := &_selectOption{v: types.NewSelectOption()} + + tmp.Label(label) + + return tmp + +} + +func (s *_selectOption) Label(label string) *_selectOption { + + s.v.Label = label + + return s +} + +func (s *_selectOption) Value(scalarvalue types.ScalarValueVariant) *_selectOption { + + s.v.Value = *scalarvalue.ScalarValueCaster() + + return s +} + +func (s *_selectOption) SelectOptionCaster() *types.SelectOption { + return s.v +} diff --git a/typedapi/esdsl/semanticquery.go b/typedapi/esdsl/semanticquery.go new file mode 100644 index 0000000000..309a58ae8d --- /dev/null +++ b/typedapi/esdsl/semanticquery.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _semanticQuery struct { + v *types.SemanticQuery +} + +// A semantic query to semantic_text field types +func NewSemanticQuery(field string, query string) *_semanticQuery { + + tmp := &_semanticQuery{v: types.NewSemanticQuery()} + + tmp.Field(field) + + tmp.Query(query) + + return tmp + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_semanticQuery) Boost(boost float32) *_semanticQuery { + + s.v.Boost = &boost + + return s +} + +// The field to query, which must be a semantic_text field type +func (s *_semanticQuery) Field(field string) *_semanticQuery { + + s.v.Field = field + + return s +} + +// The query text +func (s *_semanticQuery) Query(query string) *_semanticQuery { + + s.v.Query = query + + return s +} + +func (s *_semanticQuery) QueryName_(queryname_ string) *_semanticQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_semanticQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.Semantic = s.v + + return container +} + +func (s *_semanticQuery) SemanticQueryCaster() *types.SemanticQuery { + return s.v +} diff --git a/typedapi/esdsl/semantictextproperty.go b/typedapi/esdsl/semantictextproperty.go new file mode 100644 index 0000000000..de44f87d1f --- /dev/null +++ b/typedapi/esdsl/semantictextproperty.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _semanticTextProperty struct { + v *types.SemanticTextProperty +} + +func NewSemanticTextProperty() *_semanticTextProperty { + + return &_semanticTextProperty{v: types.NewSemanticTextProperty()} + +} + +// Inference endpoint that will be used to generate embeddings for the field. +// This parameter cannot be updated. Use the Create inference API to create the +// endpoint. +// If `search_inference_id` is specified, the inference endpoint will only be +// used at index time. +func (s *_semanticTextProperty) InferenceId(id string) *_semanticTextProperty { + + s.v.InferenceId = &id + + return s +} + +func (s *_semanticTextProperty) Meta(meta map[string]string) *_semanticTextProperty { + + s.v.Meta = meta + return s +} + +func (s *_semanticTextProperty) AddMeta(key string, value string) *_semanticTextProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +// Inference endpoint that will be used to generate embeddings at query time. +// You can update this parameter by using the Update mapping API. Use the Create +// inference API to create the endpoint. +// If not specified, the inference endpoint defined by inference_id will be used +// at both index and query time. +func (s *_semanticTextProperty) SearchInferenceId(id string) *_semanticTextProperty { + + s.v.SearchInferenceId = &id + + return s +} + +func (s *_semanticTextProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_semanticTextProperty) SemanticTextPropertyCaster() *types.SemanticTextProperty { + return s.v +} diff --git a/typedapi/esdsl/serbiananalyzer.go b/typedapi/esdsl/serbiananalyzer.go new file mode 100644 index 0000000000..b1d3a07915 --- /dev/null +++ b/typedapi/esdsl/serbiananalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _serbianAnalyzer struct { + v *types.SerbianAnalyzer +} + +func NewSerbianAnalyzer() *_serbianAnalyzer { + + return &_serbianAnalyzer{v: types.NewSerbianAnalyzer()} + +} + +func (s *_serbianAnalyzer) StemExclusion(stemexclusions ...string) *_serbianAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_serbianAnalyzer) Stopwords(stopwords ...string) *_serbianAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_serbianAnalyzer) StopwordsPath(stopwordspath string) *_serbianAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_serbianAnalyzer) SerbianAnalyzerCaster() *types.SerbianAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/serialdifferencingaggregation.go b/typedapi/esdsl/serialdifferencingaggregation.go new file mode 100644 index 0000000000..6a97aec0bf --- /dev/null +++ b/typedapi/esdsl/serialdifferencingaggregation.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _serialDifferencingAggregation struct { + v *types.SerialDifferencingAggregation +} + +// An aggregation that subtracts values in a time series from themselves at +// different time lags or periods. +func NewSerialDifferencingAggregation() *_serialDifferencingAggregation { + + return &_serialDifferencingAggregation{v: types.NewSerialDifferencingAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_serialDifferencingAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_serialDifferencingAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_serialDifferencingAggregation) Format(format string) *_serialDifferencingAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_serialDifferencingAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_serialDifferencingAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +// The historical bucket to subtract from the current value. +// Must be a positive, non-zero integer. +func (s *_serialDifferencingAggregation) Lag(lag int) *_serialDifferencingAggregation { + + s.v.Lag = &lag + + return s +} + +func (s *_serialDifferencingAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.SerialDiff = s.v + + return container +} + +func (s *_serialDifferencingAggregation) SerialDifferencingAggregationCaster() *types.SerialDifferencingAggregation { + return s.v +} diff --git a/typedapi/esdsl/setpriorityaction.go b/typedapi/esdsl/setpriorityaction.go new file mode 100644 index 0000000000..4645a13eae --- /dev/null +++ b/typedapi/esdsl/setpriorityaction.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _setPriorityAction struct { + v *types.SetPriorityAction +} + +func NewSetPriorityAction() *_setPriorityAction { + + return &_setPriorityAction{v: types.NewSetPriorityAction()} + +} + +func (s *_setPriorityAction) Priority(priority int) *_setPriorityAction { + + s.v.Priority = &priority + + return s +} + +func (s *_setPriorityAction) SetPriorityActionCaster() *types.SetPriorityAction { + return s.v +} diff --git a/typedapi/esdsl/setprocessor.go b/typedapi/esdsl/setprocessor.go new file mode 100644 index 0000000000..15d7790f96 --- /dev/null +++ b/typedapi/esdsl/setprocessor.go @@ -0,0 +1,157 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _setProcessor struct { + v *types.SetProcessor +} + +// Adds a field with the specified value. +// If the field already exists, its value will be replaced with the provided +// one. +func NewSetProcessor() *_setProcessor { + + return &_setProcessor{v: types.NewSetProcessor()} + +} + +// The origin field which will be copied to `field`, cannot set `value` +// simultaneously. +// Supported data types are `boolean`, `number`, `array`, `object`, `string`, +// `date`, etc. +func (s *_setProcessor) CopyFrom(field string) *_setProcessor { + + s.v.CopyFrom = &field + + return s +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_setProcessor) Description(description string) *_setProcessor { + + s.v.Description = &description + + return s +} + +// The field to insert, upsert, or update. +// Supports template snippets. +func (s *_setProcessor) Field(field string) *_setProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_setProcessor) If(if_ types.ScriptVariant) *_setProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// If `true` and `value` is a template snippet that evaluates to `null` or the +// empty string, the processor quietly exits without modifying the document. +func (s *_setProcessor) IgnoreEmptyValue(ignoreemptyvalue bool) *_setProcessor { + + s.v.IgnoreEmptyValue = &ignoreemptyvalue + + return s +} + +// Ignore failures for the processor. +func (s *_setProcessor) IgnoreFailure(ignorefailure bool) *_setProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// The media type for encoding `value`. +// Applies only when value is a template snippet. +// Must be one of `application/json`, `text/plain`, or +// `application/x-www-form-urlencoded`. +func (s *_setProcessor) MediaType(mediatype string) *_setProcessor { + + s.v.MediaType = &mediatype + + return s +} + +// Handle failures for the processor. +func (s *_setProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_setProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// If `true` processor will update fields with pre-existing non-null-valued +// field. +// When set to `false`, such fields will not be touched. +func (s *_setProcessor) Override(override bool) *_setProcessor { + + s.v.Override = &override + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_setProcessor) Tag(tag string) *_setProcessor { + + s.v.Tag = &tag + + return s +} + +// The value to be set for the field. +// Supports template snippets. +// May specify only one of `value` or `copy_from`. +func (s *_setProcessor) Value(value json.RawMessage) *_setProcessor { + + s.v.Value = value + + return s +} + +func (s *_setProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Set = s.v + + return container +} + +func (s *_setProcessor) SetProcessorCaster() *types.SetProcessor { + return s.v +} diff --git a/typedapi/esdsl/setsecurityuserprocessor.go b/typedapi/esdsl/setsecurityuserprocessor.go new file mode 100644 index 0000000000..ec7a92f5af --- /dev/null +++ b/typedapi/esdsl/setsecurityuserprocessor.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _setSecurityUserProcessor struct { + v *types.SetSecurityUserProcessor +} + +// Sets user-related details (such as `username`, `roles`, `email`, `full_name`, +// `metadata`, `api_key`, `realm` and `authentication_type`) from the current +// authenticated user to the current document by pre-processing the ingest. +func NewSetSecurityUserProcessor() *_setSecurityUserProcessor { + + return &_setSecurityUserProcessor{v: types.NewSetSecurityUserProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_setSecurityUserProcessor) Description(description string) *_setSecurityUserProcessor { + + s.v.Description = &description + + return s +} + +// The field to store the user information into. +func (s *_setSecurityUserProcessor) Field(field string) *_setSecurityUserProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_setSecurityUserProcessor) If(if_ types.ScriptVariant) *_setSecurityUserProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_setSecurityUserProcessor) IgnoreFailure(ignorefailure bool) *_setSecurityUserProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// Handle failures for the processor. +func (s *_setSecurityUserProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_setSecurityUserProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Controls what user related properties are added to the field. +func (s *_setSecurityUserProcessor) Properties(properties ...string) *_setSecurityUserProcessor { + + for _, v := range properties { + + s.v.Properties = append(s.v.Properties, v) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_setSecurityUserProcessor) Tag(tag string) *_setSecurityUserProcessor { + + s.v.Tag = &tag + + return s +} + +func (s *_setSecurityUserProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.SetSecurityUser = s.v + + return container +} + +func (s *_setSecurityUserProcessor) SetSecurityUserProcessorCaster() *types.SetSecurityUserProcessor { + return s.v +} diff --git a/typedapi/esdsl/settings.go b/typedapi/esdsl/settings.go new file mode 100644 index 0000000000..365ec149af --- /dev/null +++ b/typedapi/esdsl/settings.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _settings struct { + v *types.Settings +} + +func NewSettings() *_settings { + + return &_settings{v: types.NewSettings()} + +} + +// Specifies whether the transform checkpoint ranges should be optimized for +// performance. Such optimization can align +// checkpoint ranges with the date histogram interval when date histogram is +// specified as a group source in the +// transform config. As a result, less document updates in the destination index +// will be performed thus improving +// overall performance. +func (s *_settings) AlignCheckpoints(aligncheckpoints bool) *_settings { + + s.v.AlignCheckpoints = &aligncheckpoints + + return s +} + +// Defines if dates in the ouput should be written as ISO formatted string or as +// millis since epoch. epoch_millis was +// the default for transforms created before version 7.11. For compatible output +// set this value to `true`. +func (s *_settings) DatesAsEpochMillis(datesasepochmillis bool) *_settings { + + s.v.DatesAsEpochMillis = &datesasepochmillis + + return s +} + +// Specifies whether the transform should deduce the destination index mappings +// from the transform configuration. +func (s *_settings) DeduceMappings(deducemappings bool) *_settings { + + s.v.DeduceMappings = &deducemappings + + return s +} + +// Specifies a limit on the number of input documents per second. This setting +// throttles the transform by adding a +// wait time between search requests. The default value is null, which disables +// throttling. +func (s *_settings) DocsPerSecond(docspersecond float32) *_settings { + + s.v.DocsPerSecond = &docspersecond + + return s +} + +// Defines the initial page size to use for the composite aggregation for each +// checkpoint. If circuit breaker +// exceptions occur, the page size is dynamically adjusted to a lower value. The +// minimum value is `10` and the +// maximum is `65,536`. +func (s *_settings) MaxPageSearchSize(maxpagesearchsize int) *_settings { + + s.v.MaxPageSearchSize = &maxpagesearchsize + + return s +} + +// If `true`, the transform runs in unattended mode. In unattended mode, the +// transform retries indefinitely in case +// of an error which means the transform never fails. Setting the number of +// retries other than infinite fails in +// validation. +func (s *_settings) Unattended(unattended bool) *_settings { + + s.v.Unattended = &unattended + + return s +} + +func (s *_settings) SettingsCaster() *types.Settings { + return s.v +} diff --git a/typedapi/esdsl/settingsanalyze.go b/typedapi/esdsl/settingsanalyze.go new file mode 100644 index 0000000000..be18961277 --- /dev/null +++ b/typedapi/esdsl/settingsanalyze.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _settingsAnalyze struct { + v *types.SettingsAnalyze +} + +func NewSettingsAnalyze() *_settingsAnalyze { + + return &_settingsAnalyze{v: types.NewSettingsAnalyze()} + +} + +func (s *_settingsAnalyze) MaxTokenCount(stringifiedinteger types.StringifiedintegerVariant) *_settingsAnalyze { + + s.v.MaxTokenCount = *stringifiedinteger.StringifiedintegerCaster() + + return s +} + +func (s *_settingsAnalyze) SettingsAnalyzeCaster() *types.SettingsAnalyze { + return s.v +} diff --git a/typedapi/esdsl/settingshighlight.go b/typedapi/esdsl/settingshighlight.go new file mode 100644 index 0000000000..707f4a75da --- /dev/null +++ b/typedapi/esdsl/settingshighlight.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _settingsHighlight struct { + v *types.SettingsHighlight +} + +func NewSettingsHighlight() *_settingsHighlight { + + return &_settingsHighlight{v: types.NewSettingsHighlight()} + +} + +func (s *_settingsHighlight) MaxAnalyzedOffset(maxanalyzedoffset int) *_settingsHighlight { + + s.v.MaxAnalyzedOffset = &maxanalyzedoffset + + return s +} + +func (s *_settingsHighlight) SettingsHighlightCaster() *types.SettingsHighlight { + return s.v +} diff --git a/typedapi/esdsl/settingsquerystring.go b/typedapi/esdsl/settingsquerystring.go new file mode 100644 index 0000000000..3c64a611ae --- /dev/null +++ b/typedapi/esdsl/settingsquerystring.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _settingsQueryString struct { + v *types.SettingsQueryString +} + +func NewSettingsQueryString() *_settingsQueryString { + + return &_settingsQueryString{v: types.NewSettingsQueryString()} + +} + +func (s *_settingsQueryString) Lenient(stringifiedboolean types.StringifiedbooleanVariant) *_settingsQueryString { + + s.v.Lenient = *stringifiedboolean.StringifiedbooleanCaster() + + return s +} + +func (s *_settingsQueryString) SettingsQueryStringCaster() *types.SettingsQueryString { + return s.v +} diff --git a/typedapi/esdsl/settingssearch.go b/typedapi/esdsl/settingssearch.go new file mode 100644 index 0000000000..47a60311b2 --- /dev/null +++ b/typedapi/esdsl/settingssearch.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _settingsSearch struct { + v *types.SettingsSearch +} + +func NewSettingsSearch() *_settingsSearch { + + return &_settingsSearch{v: types.NewSettingsSearch()} + +} + +func (s *_settingsSearch) Idle(idle types.SearchIdleVariant) *_settingsSearch { + + s.v.Idle = idle.SearchIdleCaster() + + return s +} + +func (s *_settingsSearch) Slowlog(slowlog types.SlowlogSettingsVariant) *_settingsSearch { + + s.v.Slowlog = slowlog.SlowlogSettingsCaster() + + return s +} + +func (s *_settingsSearch) SettingsSearchCaster() *types.SettingsSearch { + return s.v +} diff --git a/typedapi/esdsl/settingssimilarity.go b/typedapi/esdsl/settingssimilarity.go new file mode 100644 index 0000000000..9fd09aa608 --- /dev/null +++ b/typedapi/esdsl/settingssimilarity.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _settingsSimilarity struct { + v types.SettingsSimilarity +} + +func NewSettingsSimilarity() *_settingsSimilarity { + return &_settingsSimilarity{v: nil} +} + +func (u *_settingsSimilarity) SettingsSimilarityBm25(settingssimilaritybm25 types.SettingsSimilarityBm25Variant) *_settingsSimilarity { + + u.v = &settingssimilaritybm25 + + return u +} + +// Interface implementation for SettingsSimilarityBm25 in SettingsSimilarity union +func (u *_settingsSimilarityBm25) SettingsSimilarityCaster() *types.SettingsSimilarity { + t := types.SettingsSimilarity(u.v) + return &t +} + +func (u *_settingsSimilarity) SettingsSimilarityBoolean(settingssimilarityboolean types.SettingsSimilarityBooleanVariant) *_settingsSimilarity { + + u.v = &settingssimilarityboolean + + return u +} + +// Interface implementation for SettingsSimilarityBoolean in SettingsSimilarity union +func (u *_settingsSimilarityBoolean) SettingsSimilarityCaster() *types.SettingsSimilarity { + t := types.SettingsSimilarity(u.v) + return &t +} + +func (u *_settingsSimilarity) SettingsSimilarityDfi(settingssimilaritydfi types.SettingsSimilarityDfiVariant) *_settingsSimilarity { + + u.v = &settingssimilaritydfi + + return u +} + +// Interface implementation for SettingsSimilarityDfi in SettingsSimilarity union +func (u *_settingsSimilarityDfi) SettingsSimilarityCaster() *types.SettingsSimilarity { + t := types.SettingsSimilarity(u.v) + return &t +} + +func (u *_settingsSimilarity) SettingsSimilarityDfr(settingssimilaritydfr types.SettingsSimilarityDfrVariant) *_settingsSimilarity { + + u.v = &settingssimilaritydfr + + return u +} + +// Interface implementation for SettingsSimilarityDfr in SettingsSimilarity union +func (u *_settingsSimilarityDfr) SettingsSimilarityCaster() *types.SettingsSimilarity { + t := types.SettingsSimilarity(u.v) + return &t +} + +func (u *_settingsSimilarity) SettingsSimilarityIb(settingssimilarityib types.SettingsSimilarityIbVariant) *_settingsSimilarity { + + u.v = &settingssimilarityib + + return u +} + +// Interface implementation for SettingsSimilarityIb in SettingsSimilarity union +func (u *_settingsSimilarityIb) SettingsSimilarityCaster() *types.SettingsSimilarity { + t := types.SettingsSimilarity(u.v) + return &t +} + +func (u *_settingsSimilarity) SettingsSimilarityLmd(settingssimilaritylmd types.SettingsSimilarityLmdVariant) *_settingsSimilarity { + + u.v = &settingssimilaritylmd + + return u +} + +// Interface implementation for SettingsSimilarityLmd in SettingsSimilarity union +func (u *_settingsSimilarityLmd) SettingsSimilarityCaster() *types.SettingsSimilarity { + t := types.SettingsSimilarity(u.v) + return &t +} + +func (u *_settingsSimilarity) SettingsSimilarityLmj(settingssimilaritylmj types.SettingsSimilarityLmjVariant) *_settingsSimilarity { + + u.v = &settingssimilaritylmj + + return u +} + +// Interface implementation for SettingsSimilarityLmj in SettingsSimilarity union +func (u *_settingsSimilarityLmj) SettingsSimilarityCaster() *types.SettingsSimilarity { + t := types.SettingsSimilarity(u.v) + return &t +} + +func (u *_settingsSimilarity) SettingsSimilarityScripted(settingssimilarityscripted types.SettingsSimilarityScriptedVariant) *_settingsSimilarity { + + u.v = &settingssimilarityscripted + + return u +} + +// Interface implementation for SettingsSimilarityScripted in SettingsSimilarity union +func (u *_settingsSimilarityScripted) SettingsSimilarityCaster() *types.SettingsSimilarity { + t := types.SettingsSimilarity(u.v) + return &t +} + +func (u *_settingsSimilarity) SettingsSimilarityCaster() *types.SettingsSimilarity { + return &u.v +} diff --git a/typedapi/esdsl/settingssimilaritybm25.go b/typedapi/esdsl/settingssimilaritybm25.go new file mode 100644 index 0000000000..ab354f9b5a --- /dev/null +++ b/typedapi/esdsl/settingssimilaritybm25.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _settingsSimilarityBm25 struct { + v *types.SettingsSimilarityBm25 +} + +func NewSettingsSimilarityBm25() *_settingsSimilarityBm25 { + + return &_settingsSimilarityBm25{v: types.NewSettingsSimilarityBm25()} + +} + +func (s *_settingsSimilarityBm25) B(b types.Float64) *_settingsSimilarityBm25 { + + s.v.B = &b + + return s +} + +func (s *_settingsSimilarityBm25) DiscountOverlaps(discountoverlaps bool) *_settingsSimilarityBm25 { + + s.v.DiscountOverlaps = &discountoverlaps + + return s +} + +func (s *_settingsSimilarityBm25) K1(k1 types.Float64) *_settingsSimilarityBm25 { + + s.v.K1 = &k1 + + return s +} + +func (s *_settingsSimilarityBm25) SettingsSimilarityBm25Caster() *types.SettingsSimilarityBm25 { + return s.v +} diff --git a/typedapi/esdsl/settingssimilarityboolean.go b/typedapi/esdsl/settingssimilarityboolean.go new file mode 100644 index 0000000000..8162644f3d --- /dev/null +++ b/typedapi/esdsl/settingssimilarityboolean.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _settingsSimilarityBoolean struct { + v *types.SettingsSimilarityBoolean +} + +func NewSettingsSimilarityBoolean() *_settingsSimilarityBoolean { + + return &_settingsSimilarityBoolean{v: types.NewSettingsSimilarityBoolean()} + +} + +func (s *_settingsSimilarityBoolean) SettingsSimilarityBooleanCaster() *types.SettingsSimilarityBoolean { + return s.v +} diff --git a/typedapi/esdsl/settingssimilaritydfi.go b/typedapi/esdsl/settingssimilaritydfi.go new file mode 100644 index 0000000000..c8b874b962 --- /dev/null +++ b/typedapi/esdsl/settingssimilaritydfi.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfiindependencemeasure" +) + +type _settingsSimilarityDfi struct { + v *types.SettingsSimilarityDfi +} + +func NewSettingsSimilarityDfi(independencemeasure dfiindependencemeasure.DFIIndependenceMeasure) *_settingsSimilarityDfi { + + tmp := &_settingsSimilarityDfi{v: types.NewSettingsSimilarityDfi()} + + tmp.IndependenceMeasure(independencemeasure) + + return tmp + +} + +func (s *_settingsSimilarityDfi) IndependenceMeasure(independencemeasure dfiindependencemeasure.DFIIndependenceMeasure) *_settingsSimilarityDfi { + + s.v.IndependenceMeasure = independencemeasure + return s +} + +func (s *_settingsSimilarityDfi) SettingsSimilarityDfiCaster() *types.SettingsSimilarityDfi { + return s.v +} diff --git a/typedapi/esdsl/settingssimilaritydfr.go b/typedapi/esdsl/settingssimilaritydfr.go new file mode 100644 index 0000000000..de7fd2d0ea --- /dev/null +++ b/typedapi/esdsl/settingssimilaritydfr.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfraftereffect" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfrbasicmodel" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalization" +) + +type _settingsSimilarityDfr struct { + v *types.SettingsSimilarityDfr +} + +func NewSettingsSimilarityDfr(aftereffect dfraftereffect.DFRAfterEffect, basicmodel dfrbasicmodel.DFRBasicModel, normalization normalization.Normalization) *_settingsSimilarityDfr { + + tmp := &_settingsSimilarityDfr{v: types.NewSettingsSimilarityDfr()} + + tmp.AfterEffect(aftereffect) + + tmp.BasicModel(basicmodel) + + tmp.Normalization(normalization) + + return tmp + +} + +func (s *_settingsSimilarityDfr) AfterEffect(aftereffect dfraftereffect.DFRAfterEffect) *_settingsSimilarityDfr { + + s.v.AfterEffect = aftereffect + return s +} + +func (s *_settingsSimilarityDfr) BasicModel(basicmodel dfrbasicmodel.DFRBasicModel) *_settingsSimilarityDfr { + + s.v.BasicModel = basicmodel + return s +} + +func (s *_settingsSimilarityDfr) Normalization(normalization normalization.Normalization) *_settingsSimilarityDfr { + + s.v.Normalization = normalization + return s +} + +func (s *_settingsSimilarityDfr) SettingsSimilarityDfrCaster() *types.SettingsSimilarityDfr { + return s.v +} diff --git a/typedapi/esdsl/settingssimilarityib.go b/typedapi/esdsl/settingssimilarityib.go new file mode 100644 index 0000000000..a49ffc7d73 --- /dev/null +++ b/typedapi/esdsl/settingssimilarityib.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ibdistribution" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/iblambda" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalization" +) + +type _settingsSimilarityIb struct { + v *types.SettingsSimilarityIb +} + +func NewSettingsSimilarityIb(distribution ibdistribution.IBDistribution, lambda iblambda.IBLambda, normalization normalization.Normalization) *_settingsSimilarityIb { + + tmp := &_settingsSimilarityIb{v: types.NewSettingsSimilarityIb()} + + tmp.Distribution(distribution) + + tmp.Lambda(lambda) + + tmp.Normalization(normalization) + + return tmp + +} + +func (s *_settingsSimilarityIb) Distribution(distribution ibdistribution.IBDistribution) *_settingsSimilarityIb { + + s.v.Distribution = distribution + return s +} + +func (s *_settingsSimilarityIb) Lambda(lambda iblambda.IBLambda) *_settingsSimilarityIb { + + s.v.Lambda = lambda + return s +} + +func (s *_settingsSimilarityIb) Normalization(normalization normalization.Normalization) *_settingsSimilarityIb { + + s.v.Normalization = normalization + return s +} + +func (s *_settingsSimilarityIb) SettingsSimilarityIbCaster() *types.SettingsSimilarityIb { + return s.v +} diff --git a/typedapi/esdsl/settingssimilaritylmd.go b/typedapi/esdsl/settingssimilaritylmd.go new file mode 100644 index 0000000000..c0e081f2bf --- /dev/null +++ b/typedapi/esdsl/settingssimilaritylmd.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _settingsSimilarityLmd struct { + v *types.SettingsSimilarityLmd +} + +func NewSettingsSimilarityLmd() *_settingsSimilarityLmd { + + return &_settingsSimilarityLmd{v: types.NewSettingsSimilarityLmd()} + +} + +func (s *_settingsSimilarityLmd) Mu(mu types.Float64) *_settingsSimilarityLmd { + + s.v.Mu = &mu + + return s +} + +func (s *_settingsSimilarityLmd) SettingsSimilarityLmdCaster() *types.SettingsSimilarityLmd { + return s.v +} diff --git a/typedapi/esdsl/settingssimilaritylmj.go b/typedapi/esdsl/settingssimilaritylmj.go new file mode 100644 index 0000000000..b8f99e0b91 --- /dev/null +++ b/typedapi/esdsl/settingssimilaritylmj.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _settingsSimilarityLmj struct { + v *types.SettingsSimilarityLmj +} + +func NewSettingsSimilarityLmj() *_settingsSimilarityLmj { + + return &_settingsSimilarityLmj{v: types.NewSettingsSimilarityLmj()} + +} + +func (s *_settingsSimilarityLmj) Lambda(lambda types.Float64) *_settingsSimilarityLmj { + + s.v.Lambda = &lambda + + return s +} + +func (s *_settingsSimilarityLmj) SettingsSimilarityLmjCaster() *types.SettingsSimilarityLmj { + return s.v +} diff --git a/typedapi/esdsl/settingssimilarityscripted.go b/typedapi/esdsl/settingssimilarityscripted.go new file mode 100644 index 0000000000..8f5a1d63dc --- /dev/null +++ b/typedapi/esdsl/settingssimilarityscripted.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _settingsSimilarityScripted struct { + v *types.SettingsSimilarityScripted +} + +func NewSettingsSimilarityScripted(script types.ScriptVariant) *_settingsSimilarityScripted { + + tmp := &_settingsSimilarityScripted{v: types.NewSettingsSimilarityScripted()} + + tmp.Script(script) + + return tmp + +} + +func (s *_settingsSimilarityScripted) Script(script types.ScriptVariant) *_settingsSimilarityScripted { + + s.v.Script = *script.ScriptCaster() + + return s +} + +func (s *_settingsSimilarityScripted) WeightScript(weightscript types.ScriptVariant) *_settingsSimilarityScripted { + + s.v.WeightScript = weightscript.ScriptCaster() + + return s +} + +func (s *_settingsSimilarityScripted) SettingsSimilarityScriptedCaster() *types.SettingsSimilarityScripted { + return s.v +} diff --git a/typedapi/esdsl/shapefieldquery.go b/typedapi/esdsl/shapefieldquery.go new file mode 100644 index 0000000000..816a5bd2f6 --- /dev/null +++ b/typedapi/esdsl/shapefieldquery.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoshaperelation" +) + +type _shapeFieldQuery struct { + v *types.ShapeFieldQuery +} + +func NewShapeFieldQuery() *_shapeFieldQuery { + + return &_shapeFieldQuery{v: types.NewShapeFieldQuery()} + +} + +// Queries using a pre-indexed shape. +func (s *_shapeFieldQuery) IndexedShape(indexedshape types.FieldLookupVariant) *_shapeFieldQuery { + + s.v.IndexedShape = indexedshape.FieldLookupCaster() + + return s +} + +// Spatial relation between the query shape and the document shape. +func (s *_shapeFieldQuery) Relation(relation geoshaperelation.GeoShapeRelation) *_shapeFieldQuery { + + s.v.Relation = &relation + return s +} + +// Queries using an inline shape definition in GeoJSON or Well Known Text (WKT) +// format. +func (s *_shapeFieldQuery) Shape(geoshape json.RawMessage) *_shapeFieldQuery { + + s.v.Shape = geoshape + + return s +} + +func (s *_shapeFieldQuery) ShapeFieldQueryCaster() *types.ShapeFieldQuery { + return s.v +} diff --git a/typedapi/esdsl/shapeproperty.go b/typedapi/esdsl/shapeproperty.go new file mode 100644 index 0000000000..f1faa96bd5 --- /dev/null +++ b/typedapi/esdsl/shapeproperty.go @@ -0,0 +1,181 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoorientation" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _shapeProperty struct { + v *types.ShapeProperty +} + +func NewShapeProperty() *_shapeProperty { + + return &_shapeProperty{v: types.NewShapeProperty()} + +} + +func (s *_shapeProperty) Coerce(coerce bool) *_shapeProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_shapeProperty) CopyTo(fields ...string) *_shapeProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_shapeProperty) DocValues(docvalues bool) *_shapeProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_shapeProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_shapeProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_shapeProperty) Fields(fields map[string]types.Property) *_shapeProperty { + + s.v.Fields = fields + return s +} + +func (s *_shapeProperty) AddField(key string, value types.PropertyVariant) *_shapeProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_shapeProperty) IgnoreAbove(ignoreabove int) *_shapeProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_shapeProperty) IgnoreMalformed(ignoremalformed bool) *_shapeProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_shapeProperty) IgnoreZValue(ignorezvalue bool) *_shapeProperty { + + s.v.IgnoreZValue = &ignorezvalue + + return s +} + +// Metadata about the field. +func (s *_shapeProperty) Meta(meta map[string]string) *_shapeProperty { + + s.v.Meta = meta + return s +} + +func (s *_shapeProperty) AddMeta(key string, value string) *_shapeProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_shapeProperty) Orientation(orientation geoorientation.GeoOrientation) *_shapeProperty { + + s.v.Orientation = &orientation + return s +} + +func (s *_shapeProperty) Properties(properties map[string]types.Property) *_shapeProperty { + + s.v.Properties = properties + return s +} + +func (s *_shapeProperty) AddProperty(key string, value types.PropertyVariant) *_shapeProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_shapeProperty) Store(store bool) *_shapeProperty { + + s.v.Store = &store + + return s +} + +func (s *_shapeProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_shapeProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_shapeProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_shapeProperty) ShapePropertyCaster() *types.ShapeProperty { + return s.v +} diff --git a/typedapi/esdsl/shapequery.go b/typedapi/esdsl/shapequery.go new file mode 100644 index 0000000000..4fe99f65fa --- /dev/null +++ b/typedapi/esdsl/shapequery.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _shapeQuery struct { + v *types.ShapeQuery +} + +// Queries documents that contain fields indexed using the `shape` type. +func NewShapeQuery() *_shapeQuery { + + return &_shapeQuery{v: types.NewShapeQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_shapeQuery) Boost(boost float32) *_shapeQuery { + + s.v.Boost = &boost + + return s +} + +// When set to `true` the query ignores an unmapped field and will not match any +// documents. +func (s *_shapeQuery) IgnoreUnmapped(ignoreunmapped bool) *_shapeQuery { + + s.v.IgnoreUnmapped = &ignoreunmapped + + return s +} + +func (s *_shapeQuery) QueryName_(queryname_ string) *_shapeQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_shapeQuery) ShapeQuery(shapequery map[string]types.ShapeFieldQuery) *_shapeQuery { + + s.v.ShapeQuery = shapequery + return s +} + +func (s *_shapeQuery) AddShapeQuery(key string, value types.ShapeFieldQueryVariant) *_shapeQuery { + + var tmp map[string]types.ShapeFieldQuery + if s.v.ShapeQuery == nil { + s.v.ShapeQuery = make(map[string]types.ShapeFieldQuery) + } else { + tmp = s.v.ShapeQuery + } + + tmp[key] = *value.ShapeFieldQueryCaster() + + s.v.ShapeQuery = tmp + return s +} + +func (s *_shapeQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.Shape = s.v + + return container +} + +func (s *_shapeQuery) ShapeQueryCaster() *types.ShapeQuery { + return s.v +} diff --git a/typedapi/esdsl/sharedfilesystemrepository.go b/typedapi/esdsl/sharedfilesystemrepository.go new file mode 100644 index 0000000000..c9dc56f4fd --- /dev/null +++ b/typedapi/esdsl/sharedfilesystemrepository.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _sharedFileSystemRepository struct { + v *types.SharedFileSystemRepository +} + +func NewSharedFileSystemRepository(settings types.SharedFileSystemRepositorySettingsVariant) *_sharedFileSystemRepository { + + tmp := &_sharedFileSystemRepository{v: types.NewSharedFileSystemRepository()} + + tmp.Settings(settings) + + return tmp + +} + +// The repository settings. +func (s *_sharedFileSystemRepository) Settings(settings types.SharedFileSystemRepositorySettingsVariant) *_sharedFileSystemRepository { + + s.v.Settings = *settings.SharedFileSystemRepositorySettingsCaster() + + return s +} + +func (s *_sharedFileSystemRepository) Uuid(uuid string) *_sharedFileSystemRepository { + + s.v.Uuid = &uuid + + return s +} + +func (s *_sharedFileSystemRepository) SharedFileSystemRepositoryCaster() *types.SharedFileSystemRepository { + return s.v +} diff --git a/typedapi/esdsl/sharedfilesystemrepositorysettings.go b/typedapi/esdsl/sharedfilesystemrepositorysettings.go new file mode 100644 index 0000000000..8460ab6279 --- /dev/null +++ b/typedapi/esdsl/sharedfilesystemrepositorysettings.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _sharedFileSystemRepositorySettings struct { + v *types.SharedFileSystemRepositorySettings +} + +func NewSharedFileSystemRepositorySettings(location string) *_sharedFileSystemRepositorySettings { + + tmp := &_sharedFileSystemRepositorySettings{v: types.NewSharedFileSystemRepositorySettings()} + + tmp.Location(location) + + return tmp + +} + +// Big files can be broken down into multiple smaller blobs in the blob store +// during snapshotting. +// It is not recommended to change this value from its default unless there is +// an explicit reason for limiting the size of blobs in the repository. +// Setting a value lower than the default can result in an increased number of +// API calls to the blob store during snapshot create and restore operations +// compared to using the default value and thus make both operations slower and +// more costly. +// Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. +// The default varies by repository type. +func (s *_sharedFileSystemRepositorySettings) ChunkSize(bytesize types.ByteSizeVariant) *_sharedFileSystemRepositorySettings { + + s.v.ChunkSize = *bytesize.ByteSizeCaster() + + return s +} + +// When set to `true`, metadata files are stored in compressed format. +// This setting doesn't affect index files that are already compressed by +// default. +func (s *_sharedFileSystemRepositorySettings) Compress(compress bool) *_sharedFileSystemRepositorySettings { + + s.v.Compress = &compress + + return s +} + +// The location of the shared filesystem used to store and retrieve snapshots. +// This location must be registered in the `path.repo` setting on all master and +// data nodes in the cluster. +// Unlike `path.repo`, this setting supports only a single file path. +func (s *_sharedFileSystemRepositorySettings) Location(location string) *_sharedFileSystemRepositorySettings { + + s.v.Location = location + + return s +} + +// The maximum number of snapshots the repository can contain. +// The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. +func (s *_sharedFileSystemRepositorySettings) MaxNumberOfSnapshots(maxnumberofsnapshots int) *_sharedFileSystemRepositorySettings { + + s.v.MaxNumberOfSnapshots = &maxnumberofsnapshots + + return s +} + +// The maximum snapshot restore rate per node. +// It defaults to unlimited. +// Note that restores are also throttled through recovery settings. +func (s *_sharedFileSystemRepositorySettings) MaxRestoreBytesPerSec(bytesize types.ByteSizeVariant) *_sharedFileSystemRepositorySettings { + + s.v.MaxRestoreBytesPerSec = *bytesize.ByteSizeCaster() + + return s +} + +// The maximum snapshot creation rate per node. +// It defaults to 40mb per second. +// Note that if the recovery settings for managed services are set, then it +// defaults to unlimited, and the rate is additionally throttled through +// recovery settings. +func (s *_sharedFileSystemRepositorySettings) MaxSnapshotBytesPerSec(bytesize types.ByteSizeVariant) *_sharedFileSystemRepositorySettings { + + s.v.MaxSnapshotBytesPerSec = *bytesize.ByteSizeCaster() + + return s +} + +// If `true`, the repository is read-only. +// The cluster can retrieve and restore snapshots from the repository but not +// write to the repository or create snapshots in it. +// +// Only a cluster with write access can create snapshots in the repository. +// All other clusters connected to the repository should have the `readonly` +// parameter set to `true`. +// +// If `false`, the cluster can write to the repository and create snapshots in +// it. +// +// IMPORTANT: If you register the same snapshot repository with multiple +// clusters, only one cluster should have write access to the repository. +// Having multiple clusters write to the repository at the same time risks +// corrupting the contents of the repository. +func (s *_sharedFileSystemRepositorySettings) Readonly(readonly bool) *_sharedFileSystemRepositorySettings { + + s.v.Readonly = &readonly + + return s +} + +func (s *_sharedFileSystemRepositorySettings) SharedFileSystemRepositorySettingsCaster() *types.SharedFileSystemRepositorySettings { + return s.v +} diff --git a/typedapi/esdsl/shingletokenfilter.go b/typedapi/esdsl/shingletokenfilter.go new file mode 100644 index 0000000000..f192a0688e --- /dev/null +++ b/typedapi/esdsl/shingletokenfilter.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _shingleTokenFilter struct { + v *types.ShingleTokenFilter +} + +func NewShingleTokenFilter() *_shingleTokenFilter { + + return &_shingleTokenFilter{v: types.NewShingleTokenFilter()} + +} + +func (s *_shingleTokenFilter) FillerToken(fillertoken string) *_shingleTokenFilter { + + s.v.FillerToken = &fillertoken + + return s +} + +func (s *_shingleTokenFilter) MaxShingleSize(maxshinglesize string) *_shingleTokenFilter { + + s.v.MaxShingleSize = maxshinglesize + + return s +} + +func (s *_shingleTokenFilter) MinShingleSize(minshinglesize string) *_shingleTokenFilter { + + s.v.MinShingleSize = minshinglesize + + return s +} + +func (s *_shingleTokenFilter) OutputUnigrams(outputunigrams bool) *_shingleTokenFilter { + + s.v.OutputUnigrams = &outputunigrams + + return s +} + +func (s *_shingleTokenFilter) OutputUnigramsIfNoShingles(outputunigramsifnoshingles bool) *_shingleTokenFilter { + + s.v.OutputUnigramsIfNoShingles = &outputunigramsifnoshingles + + return s +} + +func (s *_shingleTokenFilter) TokenSeparator(tokenseparator string) *_shingleTokenFilter { + + s.v.TokenSeparator = &tokenseparator + + return s +} + +func (s *_shingleTokenFilter) Version(versionstring string) *_shingleTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_shingleTokenFilter) ShingleTokenFilterCaster() *types.ShingleTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/shortnumberproperty.go b/typedapi/esdsl/shortnumberproperty.go new file mode 100644 index 0000000000..232b7975ab --- /dev/null +++ b/typedapi/esdsl/shortnumberproperty.go @@ -0,0 +1,220 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" +) + +type _shortNumberProperty struct { + v *types.ShortNumberProperty +} + +func NewShortNumberProperty() *_shortNumberProperty { + + return &_shortNumberProperty{v: types.NewShortNumberProperty()} + +} + +func (s *_shortNumberProperty) Boost(boost types.Float64) *_shortNumberProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_shortNumberProperty) Coerce(coerce bool) *_shortNumberProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_shortNumberProperty) CopyTo(fields ...string) *_shortNumberProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_shortNumberProperty) DocValues(docvalues bool) *_shortNumberProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_shortNumberProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_shortNumberProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_shortNumberProperty) Fields(fields map[string]types.Property) *_shortNumberProperty { + + s.v.Fields = fields + return s +} + +func (s *_shortNumberProperty) AddField(key string, value types.PropertyVariant) *_shortNumberProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_shortNumberProperty) IgnoreAbove(ignoreabove int) *_shortNumberProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_shortNumberProperty) IgnoreMalformed(ignoremalformed bool) *_shortNumberProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_shortNumberProperty) Index(index bool) *_shortNumberProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_shortNumberProperty) Meta(meta map[string]string) *_shortNumberProperty { + + s.v.Meta = meta + return s +} + +func (s *_shortNumberProperty) AddMeta(key string, value string) *_shortNumberProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_shortNumberProperty) NullValue(nullvalue int) *_shortNumberProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_shortNumberProperty) OnScriptError(onscripterror onscripterror.OnScriptError) *_shortNumberProperty { + + s.v.OnScriptError = &onscripterror + return s +} + +func (s *_shortNumberProperty) Properties(properties map[string]types.Property) *_shortNumberProperty { + + s.v.Properties = properties + return s +} + +func (s *_shortNumberProperty) AddProperty(key string, value types.PropertyVariant) *_shortNumberProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_shortNumberProperty) Script(script types.ScriptVariant) *_shortNumberProperty { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_shortNumberProperty) Store(store bool) *_shortNumberProperty { + + s.v.Store = &store + + return s +} + +func (s *_shortNumberProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_shortNumberProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_shortNumberProperty) TimeSeriesDimension(timeseriesdimension bool) *_shortNumberProperty { + + s.v.TimeSeriesDimension = ×eriesdimension + + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_shortNumberProperty) TimeSeriesMetric(timeseriesmetric timeseriesmetrictype.TimeSeriesMetricType) *_shortNumberProperty { + + s.v.TimeSeriesMetric = ×eriesmetric + return s +} + +func (s *_shortNumberProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_shortNumberProperty) ShortNumberPropertyCaster() *types.ShortNumberProperty { + return s.v +} diff --git a/typedapi/esdsl/shrinkaction.go b/typedapi/esdsl/shrinkaction.go new file mode 100644 index 0000000000..290452b22b --- /dev/null +++ b/typedapi/esdsl/shrinkaction.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _shrinkAction struct { + v *types.ShrinkAction +} + +func NewShrinkAction() *_shrinkAction { + + return &_shrinkAction{v: types.NewShrinkAction()} + +} + +func (s *_shrinkAction) AllowWriteAfterShrink(allowwriteaftershrink bool) *_shrinkAction { + + s.v.AllowWriteAfterShrink = &allowwriteaftershrink + + return s +} + +func (s *_shrinkAction) MaxPrimaryShardSize(bytesize types.ByteSizeVariant) *_shrinkAction { + + s.v.MaxPrimaryShardSize = *bytesize.ByteSizeCaster() + + return s +} + +func (s *_shrinkAction) NumberOfShards(numberofshards int) *_shrinkAction { + + s.v.NumberOfShards = &numberofshards + + return s +} + +func (s *_shrinkAction) ShrinkActionCaster() *types.ShrinkAction { + return s.v +} diff --git a/typedapi/esdsl/significanttermsaggregation.go b/typedapi/esdsl/significanttermsaggregation.go new file mode 100644 index 0000000000..54597e6ca1 --- /dev/null +++ b/typedapi/esdsl/significanttermsaggregation.go @@ -0,0 +1,179 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationexecutionhint" +) + +type _significantTermsAggregation struct { + v *types.SignificantTermsAggregation +} + +// Returns interesting or unusual occurrences of terms in a set. +func NewSignificantTermsAggregation() *_significantTermsAggregation { + + return &_significantTermsAggregation{v: types.NewSignificantTermsAggregation()} + +} + +// A background filter that can be used to focus in on significant terms within +// a narrower context, instead of the entire index. +func (s *_significantTermsAggregation) BackgroundFilter(backgroundfilter types.QueryVariant) *_significantTermsAggregation { + + s.v.BackgroundFilter = backgroundfilter.QueryCaster() + + return s +} + +// Use Chi square, as described in "Information Retrieval", Manning et al., +// Chapter 13.5.2, as the significance score. +func (s *_significantTermsAggregation) ChiSquare(chisquare types.ChiSquareHeuristicVariant) *_significantTermsAggregation { + + s.v.ChiSquare = chisquare.ChiSquareHeuristicCaster() + + return s +} + +// Terms to exclude. +func (s *_significantTermsAggregation) Exclude(termsexcludes ...string) *_significantTermsAggregation { + + s.v.Exclude = termsexcludes + + return s +} + +// Mechanism by which the aggregation should be executed: using field values +// directly or using global ordinals. +func (s *_significantTermsAggregation) ExecutionHint(executionhint termsaggregationexecutionhint.TermsAggregationExecutionHint) *_significantTermsAggregation { + + s.v.ExecutionHint = &executionhint + return s +} + +// The field from which to return significant terms. +func (s *_significantTermsAggregation) Field(field string) *_significantTermsAggregation { + + s.v.Field = &field + + return s +} + +// Use Google normalized distance as described in "The Google Similarity +// Distance", Cilibrasi and Vitanyi, 2007, as the significance score. +func (s *_significantTermsAggregation) Gnd(gnd types.GoogleNormalizedDistanceHeuristicVariant) *_significantTermsAggregation { + + s.v.Gnd = gnd.GoogleNormalizedDistanceHeuristicCaster() + + return s +} + +// Terms to include. +func (s *_significantTermsAggregation) Include(termsinclude types.TermsIncludeVariant) *_significantTermsAggregation { + + s.v.Include = *termsinclude.TermsIncludeCaster() + + return s +} + +// Use JLH score as the significance score. +func (s *_significantTermsAggregation) Jlh(jlh types.EmptyObjectVariant) *_significantTermsAggregation { + + s.v.Jlh = jlh.EmptyObjectCaster() + + return s +} + +// Only return terms that are found in more than `min_doc_count` hits. +func (s *_significantTermsAggregation) MinDocCount(mindoccount int64) *_significantTermsAggregation { + + s.v.MinDocCount = &mindoccount + + return s +} + +// Use mutual information as described in "Information Retrieval", Manning et +// al., Chapter 13.5.1, as the significance score. +func (s *_significantTermsAggregation) MutualInformation(mutualinformation types.MutualInformationHeuristicVariant) *_significantTermsAggregation { + + s.v.MutualInformation = mutualinformation.MutualInformationHeuristicCaster() + + return s +} + +// A simple calculation of the number of documents in the foreground sample with +// a term divided by the number of documents in the background with the term. +func (s *_significantTermsAggregation) Percentage(percentage types.PercentageScoreHeuristicVariant) *_significantTermsAggregation { + + s.v.Percentage = percentage.PercentageScoreHeuristicCaster() + + return s +} + +// Customized score, implemented via a script. +func (s *_significantTermsAggregation) ScriptHeuristic(scriptheuristic types.ScriptedHeuristicVariant) *_significantTermsAggregation { + + s.v.ScriptHeuristic = scriptheuristic.ScriptedHeuristicCaster() + + return s +} + +// Regulates the certainty a shard has if the term should actually be added to +// the candidate list or not with respect to the `min_doc_count`. +// Terms will only be considered if their local shard frequency within the set +// is higher than the `shard_min_doc_count`. +func (s *_significantTermsAggregation) ShardMinDocCount(shardmindoccount int64) *_significantTermsAggregation { + + s.v.ShardMinDocCount = &shardmindoccount + + return s +} + +// Can be used to control the volumes of candidate terms produced by each shard. +// By default, `shard_size` will be automatically estimated based on the number +// of shards and the `size` parameter. +func (s *_significantTermsAggregation) ShardSize(shardsize int) *_significantTermsAggregation { + + s.v.ShardSize = &shardsize + + return s +} + +// The number of buckets returned out of the overall terms list. +func (s *_significantTermsAggregation) Size(size int) *_significantTermsAggregation { + + s.v.Size = &size + + return s +} + +func (s *_significantTermsAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.SignificantTerms = s.v + + return container +} + +func (s *_significantTermsAggregation) SignificantTermsAggregationCaster() *types.SignificantTermsAggregation { + return s.v +} diff --git a/typedapi/esdsl/significanttextaggregation.go b/typedapi/esdsl/significanttextaggregation.go new file mode 100644 index 0000000000..a6c01b2bdd --- /dev/null +++ b/typedapi/esdsl/significanttextaggregation.go @@ -0,0 +1,195 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationexecutionhint" +) + +type _significantTextAggregation struct { + v *types.SignificantTextAggregation +} + +// Returns interesting or unusual occurrences of free-text terms in a set. +func NewSignificantTextAggregation() *_significantTextAggregation { + + return &_significantTextAggregation{v: types.NewSignificantTextAggregation()} + +} + +// A background filter that can be used to focus in on significant terms within +// a narrower context, instead of the entire index. +func (s *_significantTextAggregation) BackgroundFilter(backgroundfilter types.QueryVariant) *_significantTextAggregation { + + s.v.BackgroundFilter = backgroundfilter.QueryCaster() + + return s +} + +// Use Chi square, as described in "Information Retrieval", Manning et al., +// Chapter 13.5.2, as the significance score. +func (s *_significantTextAggregation) ChiSquare(chisquare types.ChiSquareHeuristicVariant) *_significantTextAggregation { + + s.v.ChiSquare = chisquare.ChiSquareHeuristicCaster() + + return s +} + +// Values to exclude. +func (s *_significantTextAggregation) Exclude(termsexcludes ...string) *_significantTextAggregation { + + s.v.Exclude = termsexcludes + + return s +} + +// Determines whether the aggregation will use field values directly or global +// ordinals. +func (s *_significantTextAggregation) ExecutionHint(executionhint termsaggregationexecutionhint.TermsAggregationExecutionHint) *_significantTextAggregation { + + s.v.ExecutionHint = &executionhint + return s +} + +// The field from which to return significant text. +func (s *_significantTextAggregation) Field(field string) *_significantTextAggregation { + + s.v.Field = &field + + return s +} + +// Whether to out duplicate text to deal with noisy data. +func (s *_significantTextAggregation) FilterDuplicateText(filterduplicatetext bool) *_significantTextAggregation { + + s.v.FilterDuplicateText = &filterduplicatetext + + return s +} + +// Use Google normalized distance as described in "The Google Similarity +// Distance", Cilibrasi and Vitanyi, 2007, as the significance score. +func (s *_significantTextAggregation) Gnd(gnd types.GoogleNormalizedDistanceHeuristicVariant) *_significantTextAggregation { + + s.v.Gnd = gnd.GoogleNormalizedDistanceHeuristicCaster() + + return s +} + +// Values to include. +func (s *_significantTextAggregation) Include(termsinclude types.TermsIncludeVariant) *_significantTextAggregation { + + s.v.Include = *termsinclude.TermsIncludeCaster() + + return s +} + +// Use JLH score as the significance score. +func (s *_significantTextAggregation) Jlh(jlh types.EmptyObjectVariant) *_significantTextAggregation { + + s.v.Jlh = jlh.EmptyObjectCaster() + + return s +} + +// Only return values that are found in more than `min_doc_count` hits. +func (s *_significantTextAggregation) MinDocCount(mindoccount int64) *_significantTextAggregation { + + s.v.MinDocCount = &mindoccount + + return s +} + +// Use mutual information as described in "Information Retrieval", Manning et +// al., Chapter 13.5.1, as the significance score. +func (s *_significantTextAggregation) MutualInformation(mutualinformation types.MutualInformationHeuristicVariant) *_significantTextAggregation { + + s.v.MutualInformation = mutualinformation.MutualInformationHeuristicCaster() + + return s +} + +// A simple calculation of the number of documents in the foreground sample with +// a term divided by the number of documents in the background with the term. +func (s *_significantTextAggregation) Percentage(percentage types.PercentageScoreHeuristicVariant) *_significantTextAggregation { + + s.v.Percentage = percentage.PercentageScoreHeuristicCaster() + + return s +} + +// Customized score, implemented via a script. +func (s *_significantTextAggregation) ScriptHeuristic(scriptheuristic types.ScriptedHeuristicVariant) *_significantTextAggregation { + + s.v.ScriptHeuristic = scriptheuristic.ScriptedHeuristicCaster() + + return s +} + +// Regulates the certainty a shard has if the values should actually be added to +// the candidate list or not with respect to the min_doc_count. +// Values will only be considered if their local shard frequency within the set +// is higher than the `shard_min_doc_count`. +func (s *_significantTextAggregation) ShardMinDocCount(shardmindoccount int64) *_significantTextAggregation { + + s.v.ShardMinDocCount = &shardmindoccount + + return s +} + +// The number of candidate terms produced by each shard. +// By default, `shard_size` will be automatically estimated based on the number +// of shards and the `size` parameter. +func (s *_significantTextAggregation) ShardSize(shardsize int) *_significantTextAggregation { + + s.v.ShardSize = &shardsize + + return s +} + +// The number of buckets returned out of the overall terms list. +func (s *_significantTextAggregation) Size(size int) *_significantTextAggregation { + + s.v.Size = &size + + return s +} + +// Overrides the JSON `_source` fields from which text will be analyzed. +func (s *_significantTextAggregation) SourceFields(fields ...string) *_significantTextAggregation { + + s.v.SourceFields = fields + + return s +} + +func (s *_significantTextAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.SignificantText = s.v + + return container +} + +func (s *_significantTextAggregation) SignificantTextAggregationCaster() *types.SignificantTextAggregation { + return s.v +} diff --git a/typedapi/esdsl/simpleanalyzer.go b/typedapi/esdsl/simpleanalyzer.go new file mode 100644 index 0000000000..3822b7720d --- /dev/null +++ b/typedapi/esdsl/simpleanalyzer.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _simpleAnalyzer struct { + v *types.SimpleAnalyzer +} + +func NewSimpleAnalyzer() *_simpleAnalyzer { + + return &_simpleAnalyzer{v: types.NewSimpleAnalyzer()} + +} + +func (s *_simpleAnalyzer) Version(versionstring string) *_simpleAnalyzer { + + s.v.Version = &versionstring + + return s +} + +func (s *_simpleAnalyzer) SimpleAnalyzerCaster() *types.SimpleAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/simplemovingaverageaggregation.go b/typedapi/esdsl/simplemovingaverageaggregation.go new file mode 100644 index 0000000000..b95ba0cdcb --- /dev/null +++ b/typedapi/esdsl/simplemovingaverageaggregation.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _simpleMovingAverageAggregation struct { + v *types.SimpleMovingAverageAggregation +} + +func NewSimpleMovingAverageAggregation(settings types.EmptyObjectVariant) *_simpleMovingAverageAggregation { + + tmp := &_simpleMovingAverageAggregation{v: types.NewSimpleMovingAverageAggregation()} + + tmp.Settings(settings) + + return tmp + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_simpleMovingAverageAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_simpleMovingAverageAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_simpleMovingAverageAggregation) Format(format string) *_simpleMovingAverageAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_simpleMovingAverageAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_simpleMovingAverageAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +func (s *_simpleMovingAverageAggregation) Minimize(minimize bool) *_simpleMovingAverageAggregation { + + s.v.Minimize = &minimize + + return s +} + +func (s *_simpleMovingAverageAggregation) Predict(predict int) *_simpleMovingAverageAggregation { + + s.v.Predict = &predict + + return s +} + +func (s *_simpleMovingAverageAggregation) Settings(settings types.EmptyObjectVariant) *_simpleMovingAverageAggregation { + + s.v.Settings = *settings.EmptyObjectCaster() + + return s +} + +func (s *_simpleMovingAverageAggregation) Window(window int) *_simpleMovingAverageAggregation { + + s.v.Window = &window + + return s +} + +func (s *_simpleMovingAverageAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.MovingAvg = s.v + + return container +} + +func (s *_simpleMovingAverageAggregation) SimpleMovingAverageAggregationCaster() *types.SimpleMovingAverageAggregation { + return s.v +} diff --git a/typedapi/esdsl/simplepatternsplittokenizer.go b/typedapi/esdsl/simplepatternsplittokenizer.go new file mode 100644 index 0000000000..1d7eeaaa20 --- /dev/null +++ b/typedapi/esdsl/simplepatternsplittokenizer.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _simplePatternSplitTokenizer struct { + v *types.SimplePatternSplitTokenizer +} + +func NewSimplePatternSplitTokenizer() *_simplePatternSplitTokenizer { + + return &_simplePatternSplitTokenizer{v: types.NewSimplePatternSplitTokenizer()} + +} + +func (s *_simplePatternSplitTokenizer) Pattern(pattern string) *_simplePatternSplitTokenizer { + + s.v.Pattern = &pattern + + return s +} + +func (s *_simplePatternSplitTokenizer) Version(versionstring string) *_simplePatternSplitTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_simplePatternSplitTokenizer) SimplePatternSplitTokenizerCaster() *types.SimplePatternSplitTokenizer { + return s.v +} diff --git a/typedapi/esdsl/simplepatterntokenizer.go b/typedapi/esdsl/simplepatterntokenizer.go new file mode 100644 index 0000000000..2d8d22acef --- /dev/null +++ b/typedapi/esdsl/simplepatterntokenizer.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _simplePatternTokenizer struct { + v *types.SimplePatternTokenizer +} + +func NewSimplePatternTokenizer() *_simplePatternTokenizer { + + return &_simplePatternTokenizer{v: types.NewSimplePatternTokenizer()} + +} + +func (s *_simplePatternTokenizer) Pattern(pattern string) *_simplePatternTokenizer { + + s.v.Pattern = &pattern + + return s +} + +func (s *_simplePatternTokenizer) Version(versionstring string) *_simplePatternTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_simplePatternTokenizer) SimplePatternTokenizerCaster() *types.SimplePatternTokenizer { + return s.v +} diff --git a/typedapi/esdsl/simplequerystringflags.go b/typedapi/esdsl/simplequerystringflags.go new file mode 100644 index 0000000000..57085eb2a8 --- /dev/null +++ b/typedapi/esdsl/simplequerystringflags.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _simpleQueryStringFlags struct { + v types.SimpleQueryStringFlags +} + +func NewSimpleQueryStringFlags(simplequerystringflags types.PipeSeparatedFlagsSimpleQueryStringFlagVariant) *_simpleQueryStringFlags { + + return &_simpleQueryStringFlags{v: simplequerystringflags} + +} + +func (u *_simpleQueryStringFlags) SimpleQueryStringFlagsCaster() *types.SimpleQueryStringFlags { + return &u.v +} diff --git a/typedapi/esdsl/simplequerystringquery.go b/typedapi/esdsl/simplequerystringquery.go new file mode 100644 index 0000000000..56695b1fdf --- /dev/null +++ b/typedapi/esdsl/simplequerystringquery.go @@ -0,0 +1,213 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" +) + +type _simpleQueryStringQuery struct { + v *types.SimpleQueryStringQuery +} + +// Returns roles based on a provided query string, using a parser with a limited +// but fault-tolerant syntax. +func NewSimpleQueryStringQuery(query string) *_simpleQueryStringQuery { + + tmp := &_simpleQueryStringQuery{v: types.NewSimpleQueryStringQuery()} + + tmp.Query(query) + + return tmp + +} + +// If `true`, the query attempts to analyze wildcard terms in the query string. +func (s *_simpleQueryStringQuery) AnalyzeWildcard(analyzewildcard bool) *_simpleQueryStringQuery { + + s.v.AnalyzeWildcard = &analyzewildcard + + return s +} + +// Analyzer used to convert text in the query string into tokens. +func (s *_simpleQueryStringQuery) Analyzer(analyzer string) *_simpleQueryStringQuery { + + s.v.Analyzer = &analyzer + + return s +} + +// If `true`, the parser creates a match_phrase query for each multi-position +// token. +func (s *_simpleQueryStringQuery) AutoGenerateSynonymsPhraseQuery(autogeneratesynonymsphrasequery bool) *_simpleQueryStringQuery { + + s.v.AutoGenerateSynonymsPhraseQuery = &autogeneratesynonymsphrasequery + + return s +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_simpleQueryStringQuery) Boost(boost float32) *_simpleQueryStringQuery { + + s.v.Boost = &boost + + return s +} + +// Default boolean logic used to interpret text in the query string if no +// operators are specified. +func (s *_simpleQueryStringQuery) DefaultOperator(defaultoperator operator.Operator) *_simpleQueryStringQuery { + + s.v.DefaultOperator = &defaultoperator + return s +} + +// Array of fields you wish to search. +// Accepts wildcard expressions. +// You also can boost relevance scores for matches to particular fields using a +// caret (`^`) notation. +// Defaults to the `index.query.default_field index` setting, which has a +// default value of `*`. +func (s *_simpleQueryStringQuery) Fields(fields ...string) *_simpleQueryStringQuery { + + for _, v := range fields { + + s.v.Fields = append(s.v.Fields, v) + + } + return s +} + +// List of enabled operators for the simple query string syntax. +func (s *_simpleQueryStringQuery) Flags(simplequerystringflags types.PipeSeparatedFlagsSimpleQueryStringFlag) *_simpleQueryStringQuery { + + s.v.Flags = simplequerystringflags + + return s +} + +// Maximum number of terms to which the query expands for fuzzy matching. +func (s *_simpleQueryStringQuery) FuzzyMaxExpansions(fuzzymaxexpansions int) *_simpleQueryStringQuery { + + s.v.FuzzyMaxExpansions = &fuzzymaxexpansions + + return s +} + +// Number of beginning characters left unchanged for fuzzy matching. +func (s *_simpleQueryStringQuery) FuzzyPrefixLength(fuzzyprefixlength int) *_simpleQueryStringQuery { + + s.v.FuzzyPrefixLength = &fuzzyprefixlength + + return s +} + +// If `true`, edits for fuzzy matching include transpositions of two adjacent +// characters (for example, `ab` to `ba`). +func (s *_simpleQueryStringQuery) FuzzyTranspositions(fuzzytranspositions bool) *_simpleQueryStringQuery { + + s.v.FuzzyTranspositions = &fuzzytranspositions + + return s +} + +// If `true`, format-based errors, such as providing a text value for a numeric +// field, are ignored. +func (s *_simpleQueryStringQuery) Lenient(lenient bool) *_simpleQueryStringQuery { + + s.v.Lenient = &lenient + + return s +} + +// Minimum number of clauses that must match for a document to be returned. +func (s *_simpleQueryStringQuery) MinimumShouldMatch(minimumshouldmatch types.MinimumShouldMatchVariant) *_simpleQueryStringQuery { + + s.v.MinimumShouldMatch = *minimumshouldmatch.MinimumShouldMatchCaster() + + return s +} + +// Query string in the simple query string syntax you wish to parse and use for +// search. +func (s *_simpleQueryStringQuery) Query(query string) *_simpleQueryStringQuery { + + s.v.Query = query + + return s +} + +func (s *_simpleQueryStringQuery) QueryName_(queryname_ string) *_simpleQueryStringQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Suffix appended to quoted text in the query string. +func (s *_simpleQueryStringQuery) QuoteFieldSuffix(quotefieldsuffix string) *_simpleQueryStringQuery { + + s.v.QuoteFieldSuffix = "efieldsuffix + + return s +} + +func (s *_simpleQueryStringQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.SimpleQueryString = s.v + + return container +} + +func (s *_simpleQueryStringQuery) ApiKeyQueryContainerCaster() *types.ApiKeyQueryContainer { + container := types.NewApiKeyQueryContainer() + + container.SimpleQueryString = s.v + + return container +} + +func (s *_simpleQueryStringQuery) RoleQueryContainerCaster() *types.RoleQueryContainer { + container := types.NewRoleQueryContainer() + + container.SimpleQueryString = s.v + + return container +} + +func (s *_simpleQueryStringQuery) UserQueryContainerCaster() *types.UserQueryContainer { + container := types.NewUserQueryContainer() + + container.SimpleQueryString = s.v + + return container +} + +func (s *_simpleQueryStringQuery) SimpleQueryStringQueryCaster() *types.SimpleQueryStringQuery { + return s.v +} diff --git a/typedapi/esdsl/simulatedactions.go b/typedapi/esdsl/simulatedactions.go new file mode 100644 index 0000000000..92e1552b6e --- /dev/null +++ b/typedapi/esdsl/simulatedactions.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _simulatedActions struct { + v *types.SimulatedActions +} + +func NewSimulatedActions(all types.SimulatedActionsVariant, useall bool) *_simulatedActions { + + tmp := &_simulatedActions{v: types.NewSimulatedActions()} + + tmp.All(all) + + tmp.UseAll(useall) + + return tmp + +} + +func (s *_simulatedActions) Actions(actions ...string) *_simulatedActions { + + for _, v := range actions { + + s.v.Actions = append(s.v.Actions, v) + + } + return s +} + +func (s *_simulatedActions) All(all types.SimulatedActionsVariant) *_simulatedActions { + + s.v.All = all.SimulatedActionsCaster() + + return s +} + +func (s *_simulatedActions) UseAll(useall bool) *_simulatedActions { + + s.v.UseAll = useall + + return s +} + +func (s *_simulatedActions) SimulatedActionsCaster() *types.SimulatedActions { + return s.v +} diff --git a/typedapi/esdsl/sizefield.go b/typedapi/esdsl/sizefield.go new file mode 100644 index 0000000000..6428e0567e --- /dev/null +++ b/typedapi/esdsl/sizefield.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _sizeField struct { + v *types.SizeField +} + +func NewSizeField(enabled bool) *_sizeField { + + tmp := &_sizeField{v: types.NewSizeField()} + + tmp.Enabled(enabled) + + return tmp + +} + +func (s *_sizeField) Enabled(enabled bool) *_sizeField { + + s.v.Enabled = enabled + + return s +} + +func (s *_sizeField) SizeFieldCaster() *types.SizeField { + return s.v +} diff --git a/typedapi/esdsl/slackaction.go b/typedapi/esdsl/slackaction.go new file mode 100644 index 0000000000..e6450f953a --- /dev/null +++ b/typedapi/esdsl/slackaction.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _slackAction struct { + v *types.SlackAction +} + +func NewSlackAction(message types.SlackMessageVariant) *_slackAction { + + tmp := &_slackAction{v: types.NewSlackAction()} + + tmp.Message(message) + + return tmp + +} + +func (s *_slackAction) Account(account string) *_slackAction { + + s.v.Account = &account + + return s +} + +func (s *_slackAction) Message(message types.SlackMessageVariant) *_slackAction { + + s.v.Message = *message.SlackMessageCaster() + + return s +} + +func (s *_slackAction) SlackActionCaster() *types.SlackAction { + return s.v +} diff --git a/typedapi/esdsl/slackattachment.go b/typedapi/esdsl/slackattachment.go new file mode 100644 index 0000000000..18ea76c415 --- /dev/null +++ b/typedapi/esdsl/slackattachment.go @@ -0,0 +1,151 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _slackAttachment struct { + v *types.SlackAttachment +} + +func NewSlackAttachment(authorname string, title string) *_slackAttachment { + + tmp := &_slackAttachment{v: types.NewSlackAttachment()} + + tmp.AuthorName(authorname) + + tmp.Title(title) + + return tmp + +} + +func (s *_slackAttachment) AuthorIcon(authoricon string) *_slackAttachment { + + s.v.AuthorIcon = &authoricon + + return s +} + +func (s *_slackAttachment) AuthorLink(authorlink string) *_slackAttachment { + + s.v.AuthorLink = &authorlink + + return s +} + +func (s *_slackAttachment) AuthorName(authorname string) *_slackAttachment { + + s.v.AuthorName = authorname + + return s +} + +func (s *_slackAttachment) Color(color string) *_slackAttachment { + + s.v.Color = &color + + return s +} + +func (s *_slackAttachment) Fallback(fallback string) *_slackAttachment { + + s.v.Fallback = &fallback + + return s +} + +func (s *_slackAttachment) Fields(fields ...types.SlackAttachmentFieldVariant) *_slackAttachment { + + for _, v := range fields { + + s.v.Fields = append(s.v.Fields, *v.SlackAttachmentFieldCaster()) + + } + return s +} + +func (s *_slackAttachment) Footer(footer string) *_slackAttachment { + + s.v.Footer = &footer + + return s +} + +func (s *_slackAttachment) FooterIcon(footericon string) *_slackAttachment { + + s.v.FooterIcon = &footericon + + return s +} + +func (s *_slackAttachment) ImageUrl(imageurl string) *_slackAttachment { + + s.v.ImageUrl = &imageurl + + return s +} + +func (s *_slackAttachment) Pretext(pretext string) *_slackAttachment { + + s.v.Pretext = &pretext + + return s +} + +func (s *_slackAttachment) Text(text string) *_slackAttachment { + + s.v.Text = &text + + return s +} + +func (s *_slackAttachment) ThumbUrl(thumburl string) *_slackAttachment { + + s.v.ThumbUrl = &thumburl + + return s +} + +func (s *_slackAttachment) Title(title string) *_slackAttachment { + + s.v.Title = title + + return s +} + +func (s *_slackAttachment) TitleLink(titlelink string) *_slackAttachment { + + s.v.TitleLink = &titlelink + + return s +} + +func (s *_slackAttachment) Ts(epochtimeunitseconds int64) *_slackAttachment { + + s.v.Ts = &epochtimeunitseconds + + return s +} + +func (s *_slackAttachment) SlackAttachmentCaster() *types.SlackAttachment { + return s.v +} diff --git a/typedapi/esdsl/slackattachmentfield.go b/typedapi/esdsl/slackattachmentfield.go new file mode 100644 index 0000000000..df50a9c6da --- /dev/null +++ b/typedapi/esdsl/slackattachmentfield.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _slackAttachmentField struct { + v *types.SlackAttachmentField +} + +func NewSlackAttachmentField(int bool, title string, value string) *_slackAttachmentField { + + tmp := &_slackAttachmentField{v: types.NewSlackAttachmentField()} + + tmp.Int(int) + + tmp.Title(title) + + tmp.Value(value) + + return tmp + +} + +func (s *_slackAttachmentField) Int(int bool) *_slackAttachmentField { + + s.v.Int = int + + return s +} + +func (s *_slackAttachmentField) Title(title string) *_slackAttachmentField { + + s.v.Title = title + + return s +} + +func (s *_slackAttachmentField) Value(value string) *_slackAttachmentField { + + s.v.Value = value + + return s +} + +func (s *_slackAttachmentField) SlackAttachmentFieldCaster() *types.SlackAttachmentField { + return s.v +} diff --git a/typedapi/esdsl/slackdynamicattachment.go b/typedapi/esdsl/slackdynamicattachment.go new file mode 100644 index 0000000000..d9ad458193 --- /dev/null +++ b/typedapi/esdsl/slackdynamicattachment.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _slackDynamicAttachment struct { + v *types.SlackDynamicAttachment +} + +func NewSlackDynamicAttachment(attachmenttemplate types.SlackAttachmentVariant, listpath string) *_slackDynamicAttachment { + + tmp := &_slackDynamicAttachment{v: types.NewSlackDynamicAttachment()} + + tmp.AttachmentTemplate(attachmenttemplate) + + tmp.ListPath(listpath) + + return tmp + +} + +func (s *_slackDynamicAttachment) AttachmentTemplate(attachmenttemplate types.SlackAttachmentVariant) *_slackDynamicAttachment { + + s.v.AttachmentTemplate = *attachmenttemplate.SlackAttachmentCaster() + + return s +} + +func (s *_slackDynamicAttachment) ListPath(listpath string) *_slackDynamicAttachment { + + s.v.ListPath = listpath + + return s +} + +func (s *_slackDynamicAttachment) SlackDynamicAttachmentCaster() *types.SlackDynamicAttachment { + return s.v +} diff --git a/typedapi/esdsl/slackmessage.go b/typedapi/esdsl/slackmessage.go new file mode 100644 index 0000000000..4d50196c8b --- /dev/null +++ b/typedapi/esdsl/slackmessage.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _slackMessage struct { + v *types.SlackMessage +} + +func NewSlackMessage(from string, text string) *_slackMessage { + + tmp := &_slackMessage{v: types.NewSlackMessage()} + + tmp.From(from) + + tmp.Text(text) + + return tmp + +} + +func (s *_slackMessage) Attachments(attachments ...types.SlackAttachmentVariant) *_slackMessage { + + for _, v := range attachments { + + s.v.Attachments = append(s.v.Attachments, *v.SlackAttachmentCaster()) + + } + return s +} + +func (s *_slackMessage) DynamicAttachments(dynamicattachments types.SlackDynamicAttachmentVariant) *_slackMessage { + + s.v.DynamicAttachments = dynamicattachments.SlackDynamicAttachmentCaster() + + return s +} + +func (s *_slackMessage) From(from string) *_slackMessage { + + s.v.From = from + + return s +} + +func (s *_slackMessage) Icon(icon string) *_slackMessage { + + s.v.Icon = &icon + + return s +} + +func (s *_slackMessage) Text(text string) *_slackMessage { + + s.v.Text = text + + return s +} + +func (s *_slackMessage) To(tos ...string) *_slackMessage { + + for _, v := range tos { + + s.v.To = append(s.v.To, v) + + } + return s +} + +func (s *_slackMessage) SlackMessageCaster() *types.SlackMessage { + return s.v +} diff --git a/typedapi/esdsl/slicedscroll.go b/typedapi/esdsl/slicedscroll.go new file mode 100644 index 0000000000..b3c614e2c6 --- /dev/null +++ b/typedapi/esdsl/slicedscroll.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _slicedScroll struct { + v *types.SlicedScroll +} + +func NewSlicedScroll(max int) *_slicedScroll { + + tmp := &_slicedScroll{v: types.NewSlicedScroll()} + + tmp.Max(max) + + return tmp + +} + +func (s *_slicedScroll) Field(field string) *_slicedScroll { + + s.v.Field = &field + + return s +} + +func (s *_slicedScroll) Id(id string) *_slicedScroll { + + s.v.Id = id + + return s +} + +func (s *_slicedScroll) Max(max int) *_slicedScroll { + + s.v.Max = max + + return s +} + +func (s *_slicedScroll) SlicedScrollCaster() *types.SlicedScroll { + return s.v +} diff --git a/typedapi/esdsl/slowlogsettings.go b/typedapi/esdsl/slowlogsettings.go new file mode 100644 index 0000000000..c57239dfb1 --- /dev/null +++ b/typedapi/esdsl/slowlogsettings.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _slowlogSettings struct { + v *types.SlowlogSettings +} + +func NewSlowlogSettings() *_slowlogSettings { + + return &_slowlogSettings{v: types.NewSlowlogSettings()} + +} + +func (s *_slowlogSettings) Level(level string) *_slowlogSettings { + + s.v.Level = &level + + return s +} + +func (s *_slowlogSettings) Reformat(reformat bool) *_slowlogSettings { + + s.v.Reformat = &reformat + + return s +} + +func (s *_slowlogSettings) Source(source int) *_slowlogSettings { + + s.v.Source = &source + + return s +} + +func (s *_slowlogSettings) Threshold(threshold types.SlowlogTresholdsVariant) *_slowlogSettings { + + s.v.Threshold = threshold.SlowlogTresholdsCaster() + + return s +} + +func (s *_slowlogSettings) SlowlogSettingsCaster() *types.SlowlogSettings { + return s.v +} diff --git a/typedapi/esdsl/slowlogtresholdlevels.go b/typedapi/esdsl/slowlogtresholdlevels.go new file mode 100644 index 0000000000..ec1f39d7da --- /dev/null +++ b/typedapi/esdsl/slowlogtresholdlevels.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _slowlogTresholdLevels struct { + v *types.SlowlogTresholdLevels +} + +func NewSlowlogTresholdLevels() *_slowlogTresholdLevels { + + return &_slowlogTresholdLevels{v: types.NewSlowlogTresholdLevels()} + +} + +func (s *_slowlogTresholdLevels) Debug(duration types.DurationVariant) *_slowlogTresholdLevels { + + s.v.Debug = *duration.DurationCaster() + + return s +} + +func (s *_slowlogTresholdLevels) Info(duration types.DurationVariant) *_slowlogTresholdLevels { + + s.v.Info = *duration.DurationCaster() + + return s +} + +func (s *_slowlogTresholdLevels) Trace(duration types.DurationVariant) *_slowlogTresholdLevels { + + s.v.Trace = *duration.DurationCaster() + + return s +} + +func (s *_slowlogTresholdLevels) Warn(duration types.DurationVariant) *_slowlogTresholdLevels { + + s.v.Warn = *duration.DurationCaster() + + return s +} + +func (s *_slowlogTresholdLevels) SlowlogTresholdLevelsCaster() *types.SlowlogTresholdLevels { + return s.v +} diff --git a/typedapi/esdsl/slowlogtresholds.go b/typedapi/esdsl/slowlogtresholds.go new file mode 100644 index 0000000000..4d19d72494 --- /dev/null +++ b/typedapi/esdsl/slowlogtresholds.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _slowlogTresholds struct { + v *types.SlowlogTresholds +} + +func NewSlowlogTresholds() *_slowlogTresholds { + + return &_slowlogTresholds{v: types.NewSlowlogTresholds()} + +} + +func (s *_slowlogTresholds) Fetch(fetch types.SlowlogTresholdLevelsVariant) *_slowlogTresholds { + + s.v.Fetch = fetch.SlowlogTresholdLevelsCaster() + + return s +} + +func (s *_slowlogTresholds) Query(query types.SlowlogTresholdLevelsVariant) *_slowlogTresholds { + + s.v.Query = query.SlowlogTresholdLevelsCaster() + + return s +} + +func (s *_slowlogTresholds) SlowlogTresholdsCaster() *types.SlowlogTresholds { + return s.v +} diff --git a/typedapi/esdsl/smoothingmodelcontainer.go b/typedapi/esdsl/smoothingmodelcontainer.go new file mode 100644 index 0000000000..86667e791c --- /dev/null +++ b/typedapi/esdsl/smoothingmodelcontainer.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _smoothingModelContainer struct { + v *types.SmoothingModelContainer +} + +func NewSmoothingModelContainer() *_smoothingModelContainer { + return &_smoothingModelContainer{v: types.NewSmoothingModelContainer()} +} + +// AdditionalSmoothingModelContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_smoothingModelContainer) AdditionalSmoothingModelContainerProperty(key string, value json.RawMessage) *_smoothingModelContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalSmoothingModelContainerProperty = tmp + return s +} + +// A smoothing model that uses an additive smoothing where a constant (typically +// `1.0` or smaller) is added to all counts to balance weights. +func (s *_smoothingModelContainer) Laplace(laplace types.LaplaceSmoothingModelVariant) *_smoothingModelContainer { + + s.v.Laplace = laplace.LaplaceSmoothingModelCaster() + + return s +} + +// A smoothing model that takes the weighted mean of the unigrams, bigrams, and +// trigrams based on user supplied weights (lambdas). +func (s *_smoothingModelContainer) LinearInterpolation(linearinterpolation types.LinearInterpolationSmoothingModelVariant) *_smoothingModelContainer { + + s.v.LinearInterpolation = linearinterpolation.LinearInterpolationSmoothingModelCaster() + + return s +} + +// A simple backoff model that backs off to lower order n-gram models if the +// higher order count is `0` and discounts the lower order n-gram model by a +// constant factor. +func (s *_smoothingModelContainer) StupidBackoff(stupidbackoff types.StupidBackoffSmoothingModelVariant) *_smoothingModelContainer { + + s.v.StupidBackoff = stupidbackoff.StupidBackoffSmoothingModelCaster() + + return s +} + +func (s *_smoothingModelContainer) SmoothingModelContainerCaster() *types.SmoothingModelContainer { + return s.v +} diff --git a/typedapi/esdsl/snowballanalyzer.go b/typedapi/esdsl/snowballanalyzer.go new file mode 100644 index 0000000000..587e0e197d --- /dev/null +++ b/typedapi/esdsl/snowballanalyzer.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snowballlanguage" +) + +type _snowballAnalyzer struct { + v *types.SnowballAnalyzer +} + +func NewSnowballAnalyzer(language snowballlanguage.SnowballLanguage) *_snowballAnalyzer { + + tmp := &_snowballAnalyzer{v: types.NewSnowballAnalyzer()} + + tmp.Language(language) + + return tmp + +} + +func (s *_snowballAnalyzer) Language(language snowballlanguage.SnowballLanguage) *_snowballAnalyzer { + + s.v.Language = language + return s +} + +func (s *_snowballAnalyzer) Stopwords(stopwords ...string) *_snowballAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_snowballAnalyzer) Version(versionstring string) *_snowballAnalyzer { + + s.v.Version = &versionstring + + return s +} + +func (s *_snowballAnalyzer) SnowballAnalyzerCaster() *types.SnowballAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/snowballtokenfilter.go b/typedapi/esdsl/snowballtokenfilter.go new file mode 100644 index 0000000000..34d4625baf --- /dev/null +++ b/typedapi/esdsl/snowballtokenfilter.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snowballlanguage" +) + +type _snowballTokenFilter struct { + v *types.SnowballTokenFilter +} + +func NewSnowballTokenFilter() *_snowballTokenFilter { + + return &_snowballTokenFilter{v: types.NewSnowballTokenFilter()} + +} + +func (s *_snowballTokenFilter) Language(language snowballlanguage.SnowballLanguage) *_snowballTokenFilter { + + s.v.Language = &language + return s +} + +func (s *_snowballTokenFilter) Version(versionstring string) *_snowballTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_snowballTokenFilter) SnowballTokenFilterCaster() *types.SnowballTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/softdeletes.go b/typedapi/esdsl/softdeletes.go new file mode 100644 index 0000000000..4493afdc1d --- /dev/null +++ b/typedapi/esdsl/softdeletes.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _softDeletes struct { + v *types.SoftDeletes +} + +func NewSoftDeletes() *_softDeletes { + + return &_softDeletes{v: types.NewSoftDeletes()} + +} + +// Indicates whether soft deletes are enabled on the index. +func (s *_softDeletes) Enabled(enabled bool) *_softDeletes { + + s.v.Enabled = &enabled + + return s +} + +// The maximum period to retain a shard history retention lease before it is +// considered expired. +// Shard history retention leases ensure that soft deletes are retained during +// merges on the Lucene +// index. If a soft delete is merged away before it can be replicated to a +// follower the following +// process will fail due to incomplete history on the leader. +func (s *_softDeletes) RetentionLease(retentionlease types.RetentionLeaseVariant) *_softDeletes { + + s.v.RetentionLease = retentionlease.RetentionLeaseCaster() + + return s +} + +func (s *_softDeletes) SoftDeletesCaster() *types.SoftDeletes { + return s.v +} diff --git a/typedapi/esdsl/soranianalyzer.go b/typedapi/esdsl/soranianalyzer.go new file mode 100644 index 0000000000..01b359c4c5 --- /dev/null +++ b/typedapi/esdsl/soranianalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _soraniAnalyzer struct { + v *types.SoraniAnalyzer +} + +func NewSoraniAnalyzer() *_soraniAnalyzer { + + return &_soraniAnalyzer{v: types.NewSoraniAnalyzer()} + +} + +func (s *_soraniAnalyzer) StemExclusion(stemexclusions ...string) *_soraniAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_soraniAnalyzer) Stopwords(stopwords ...string) *_soraniAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_soraniAnalyzer) StopwordsPath(stopwordspath string) *_soraniAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_soraniAnalyzer) SoraniAnalyzerCaster() *types.SoraniAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/sort.go b/typedapi/esdsl/sort.go new file mode 100644 index 0000000000..288f08a782 --- /dev/null +++ b/typedapi/esdsl/sort.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _sort struct { + v types.Sort +} + +func NewSort() *_sort { + return &_sort{v: []types.SortCombinations{}} +} + +func (u *_sort) SortCaster() *types.Sort { + return &u.v +} diff --git a/typedapi/esdsl/sortcombinations.go b/typedapi/esdsl/sortcombinations.go new file mode 100644 index 0000000000..b277c04706 --- /dev/null +++ b/typedapi/esdsl/sortcombinations.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _sortCombinations struct { + v types.SortCombinations +} + +func NewSortCombinations() *_sortCombinations { + return &_sortCombinations{v: nil} +} + +func (u *_sortCombinations) Field(field string) *_sortCombinations { + + u.v = &field + + return u +} + +func (u *_sortCombinations) SortOptions(sortoptions types.SortOptionsVariant) *_sortCombinations { + + u.v = &sortoptions + + return u +} + +// Interface implementation for SortOptions in SortCombinations union +func (u *_sortOptions) SortCombinationsCaster() *types.SortCombinations { + t := types.SortCombinations(u.v) + return &t +} + +func (u *_sortCombinations) SortCombinationsCaster() *types.SortCombinations { + return &u.v +} diff --git a/typedapi/esdsl/sortoptions.go b/typedapi/esdsl/sortoptions.go new file mode 100644 index 0000000000..94dea94f8f --- /dev/null +++ b/typedapi/esdsl/sortoptions.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _sortOptions struct { + v *types.SortOptions +} + +func NewSortOptions() *_sortOptions { + return &_sortOptions{v: types.NewSortOptions()} +} + +func (s *_sortOptions) Doc_(doc_ types.ScoreSortVariant) *_sortOptions { + + s.v.Doc_ = doc_.ScoreSortCaster() + + return s +} + +func (s *_sortOptions) GeoDistance_(geodistance_ types.GeoDistanceSortVariant) *_sortOptions { + + s.v.GeoDistance_ = geodistance_.GeoDistanceSortCaster() + + return s +} + +func (s *_sortOptions) Score_(score_ types.ScoreSortVariant) *_sortOptions { + + s.v.Score_ = score_.ScoreSortCaster() + + return s +} + +func (s *_sortOptions) Script_(script_ types.ScriptSortVariant) *_sortOptions { + + s.v.Script_ = script_.ScriptSortCaster() + + return s +} + +func (s *_sortOptions) SortOptions(sortoptions map[string]types.FieldSort) *_sortOptions { + + s.v.SortOptions = sortoptions + return s +} + +func (s *_sortOptions) AddSortOption(key string, value types.FieldSortVariant) *_sortOptions { + + var tmp map[string]types.FieldSort + if s.v.SortOptions == nil { + s.v.SortOptions = make(map[string]types.FieldSort) + } else { + tmp = s.v.SortOptions + } + + tmp[key] = *value.FieldSortCaster() + + s.v.SortOptions = tmp + return s +} + +func (s *_sortOptions) SortOptionsCaster() *types.SortOptions { + return s.v +} diff --git a/typedapi/esdsl/sortprocessor.go b/typedapi/esdsl/sortprocessor.go new file mode 100644 index 0000000000..e69c96eff7 --- /dev/null +++ b/typedapi/esdsl/sortprocessor.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" +) + +type _sortProcessor struct { + v *types.SortProcessor +} + +// Sorts the elements of an array ascending or descending. +// Homogeneous arrays of numbers will be sorted numerically, while arrays of +// strings or heterogeneous arrays of strings + numbers will be sorted +// lexicographically. +// Throws an error when the field is not an array. +func NewSortProcessor() *_sortProcessor { + + return &_sortProcessor{v: types.NewSortProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_sortProcessor) Description(description string) *_sortProcessor { + + s.v.Description = &description + + return s +} + +// The field to be sorted. +func (s *_sortProcessor) Field(field string) *_sortProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_sortProcessor) If(if_ types.ScriptVariant) *_sortProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_sortProcessor) IgnoreFailure(ignorefailure bool) *_sortProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// Handle failures for the processor. +func (s *_sortProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_sortProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// The sort order to use. +// Accepts `"asc"` or `"desc"`. +func (s *_sortProcessor) Order(order sortorder.SortOrder) *_sortProcessor { + + s.v.Order = &order + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_sortProcessor) Tag(tag string) *_sortProcessor { + + s.v.Tag = &tag + + return s +} + +// The field to assign the sorted value to. +// By default, the field is updated in-place. +func (s *_sortProcessor) TargetField(field string) *_sortProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_sortProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Sort = s.v + + return container +} + +func (s *_sortProcessor) SortProcessorCaster() *types.SortProcessor { + return s.v +} diff --git a/typedapi/esdsl/sourceconfig.go b/typedapi/esdsl/sourceconfig.go new file mode 100644 index 0000000000..08e091a214 --- /dev/null +++ b/typedapi/esdsl/sourceconfig.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _sourceConfig struct { + v types.SourceConfig +} + +func NewSourceConfig() *_sourceConfig { + return &_sourceConfig{v: nil} +} + +func (u *_sourceConfig) Bool(bool bool) *_sourceConfig { + + u.v = &bool + + return u +} + +func (u *_sourceConfig) SourceFilter(sourcefilter types.SourceFilterVariant) *_sourceConfig { + + u.v = &sourcefilter + + return u +} + +// Interface implementation for SourceFilter in SourceConfig union +func (u *_sourceFilter) SourceConfigCaster() *types.SourceConfig { + t := types.SourceConfig(u.v) + return &t +} + +func (u *_sourceConfig) SourceConfigCaster() *types.SourceConfig { + return &u.v +} diff --git a/typedapi/esdsl/sourcefield.go b/typedapi/esdsl/sourcefield.go new file mode 100644 index 0000000000..d7b4ba51dc --- /dev/null +++ b/typedapi/esdsl/sourcefield.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sourcefieldmode" +) + +type _sourceField struct { + v *types.SourceField +} + +func NewSourceField() *_sourceField { + + return &_sourceField{v: types.NewSourceField()} + +} + +func (s *_sourceField) Compress(compress bool) *_sourceField { + + s.v.Compress = &compress + + return s +} + +func (s *_sourceField) CompressThreshold(compressthreshold string) *_sourceField { + + s.v.CompressThreshold = &compressthreshold + + return s +} + +func (s *_sourceField) Enabled(enabled bool) *_sourceField { + + s.v.Enabled = &enabled + + return s +} + +func (s *_sourceField) Excludes(excludes ...string) *_sourceField { + + for _, v := range excludes { + + s.v.Excludes = append(s.v.Excludes, v) + + } + return s +} + +func (s *_sourceField) Includes(includes ...string) *_sourceField { + + for _, v := range includes { + + s.v.Includes = append(s.v.Includes, v) + + } + return s +} + +func (s *_sourceField) Mode(mode sourcefieldmode.SourceFieldMode) *_sourceField { + + s.v.Mode = &mode + return s +} + +func (s *_sourceField) SourceFieldCaster() *types.SourceField { + return s.v +} diff --git a/typedapi/esdsl/sourcefilter.go b/typedapi/esdsl/sourcefilter.go new file mode 100644 index 0000000000..b6cc349c6b --- /dev/null +++ b/typedapi/esdsl/sourcefilter.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _sourceFilter struct { + v *types.SourceFilter +} + +func NewSourceFilter() *_sourceFilter { + + return &_sourceFilter{v: types.NewSourceFilter()} + +} + +func (s *_sourceFilter) Excludes(fields ...string) *_sourceFilter { + + s.v.Excludes = fields + + return s +} + +func (s *_sourceFilter) Includes(fields ...string) *_sourceFilter { + + s.v.Includes = fields + + return s +} + +func (s *_sourceFilter) SourceFilterCaster() *types.SourceFilter { + return s.v +} diff --git a/typedapi/esdsl/sourceindex.go b/typedapi/esdsl/sourceindex.go new file mode 100644 index 0000000000..1167f08d72 --- /dev/null +++ b/typedapi/esdsl/sourceindex.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _sourceIndex struct { + v *types.SourceIndex +} + +func NewSourceIndex() *_sourceIndex { + + return &_sourceIndex{v: types.NewSourceIndex()} + +} + +func (s *_sourceIndex) Index(indexname string) *_sourceIndex { + + s.v.Index = indexname + + return s +} + +func (s *_sourceIndex) SourceIndexCaster() *types.SourceIndex { + return s.v +} diff --git a/typedapi/esdsl/sourceonlyrepository.go b/typedapi/esdsl/sourceonlyrepository.go new file mode 100644 index 0000000000..43335f0b30 --- /dev/null +++ b/typedapi/esdsl/sourceonlyrepository.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _sourceOnlyRepository struct { + v *types.SourceOnlyRepository +} + +func NewSourceOnlyRepository(settings types.SourceOnlyRepositorySettingsVariant) *_sourceOnlyRepository { + + tmp := &_sourceOnlyRepository{v: types.NewSourceOnlyRepository()} + + tmp.Settings(settings) + + return tmp + +} + +// The repository settings. +func (s *_sourceOnlyRepository) Settings(settings types.SourceOnlyRepositorySettingsVariant) *_sourceOnlyRepository { + + s.v.Settings = *settings.SourceOnlyRepositorySettingsCaster() + + return s +} + +func (s *_sourceOnlyRepository) Uuid(uuid string) *_sourceOnlyRepository { + + s.v.Uuid = &uuid + + return s +} + +func (s *_sourceOnlyRepository) SourceOnlyRepositoryCaster() *types.SourceOnlyRepository { + return s.v +} diff --git a/typedapi/esdsl/sourceonlyrepositorysettings.go b/typedapi/esdsl/sourceonlyrepositorysettings.go new file mode 100644 index 0000000000..fb31320c1c --- /dev/null +++ b/typedapi/esdsl/sourceonlyrepositorysettings.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _sourceOnlyRepositorySettings struct { + v *types.SourceOnlyRepositorySettings +} + +func NewSourceOnlyRepositorySettings() *_sourceOnlyRepositorySettings { + + return &_sourceOnlyRepositorySettings{v: types.NewSourceOnlyRepositorySettings()} + +} + +// Big files can be broken down into multiple smaller blobs in the blob store +// during snapshotting. +// It is not recommended to change this value from its default unless there is +// an explicit reason for limiting the size of blobs in the repository. +// Setting a value lower than the default can result in an increased number of +// API calls to the blob store during snapshot create and restore operations +// compared to using the default value and thus make both operations slower and +// more costly. +// Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. +// The default varies by repository type. +func (s *_sourceOnlyRepositorySettings) ChunkSize(bytesize types.ByteSizeVariant) *_sourceOnlyRepositorySettings { + + s.v.ChunkSize = *bytesize.ByteSizeCaster() + + return s +} + +// When set to `true`, metadata files are stored in compressed format. +// This setting doesn't affect index files that are already compressed by +// default. +func (s *_sourceOnlyRepositorySettings) Compress(compress bool) *_sourceOnlyRepositorySettings { + + s.v.Compress = &compress + + return s +} + +// The delegated repository type. For valid values, refer to the `type` +// parameter. +// Source repositories can use `settings` properties for its delegated +// repository type. +func (s *_sourceOnlyRepositorySettings) DelegateType(delegatetype string) *_sourceOnlyRepositorySettings { + + s.v.DelegateType = &delegatetype + + return s +} + +// The maximum number of snapshots the repository can contain. +// The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. +func (s *_sourceOnlyRepositorySettings) MaxNumberOfSnapshots(maxnumberofsnapshots int) *_sourceOnlyRepositorySettings { + + s.v.MaxNumberOfSnapshots = &maxnumberofsnapshots + + return s +} + +// The maximum snapshot restore rate per node. +// It defaults to unlimited. +// Note that restores are also throttled through recovery settings. +func (s *_sourceOnlyRepositorySettings) MaxRestoreBytesPerSec(bytesize types.ByteSizeVariant) *_sourceOnlyRepositorySettings { + + s.v.MaxRestoreBytesPerSec = *bytesize.ByteSizeCaster() + + return s +} + +// The maximum snapshot creation rate per node. +// It defaults to 40mb per second. +// Note that if the recovery settings for managed services are set, then it +// defaults to unlimited, and the rate is additionally throttled through +// recovery settings. +func (s *_sourceOnlyRepositorySettings) MaxSnapshotBytesPerSec(bytesize types.ByteSizeVariant) *_sourceOnlyRepositorySettings { + + s.v.MaxSnapshotBytesPerSec = *bytesize.ByteSizeCaster() + + return s +} + +// If `true`, the repository is read-only. +// The cluster can retrieve and restore snapshots from the repository but not +// write to the repository or create snapshots in it. +// +// Only a cluster with write access can create snapshots in the repository. +// All other clusters connected to the repository should have the `readonly` +// parameter set to `true`. +// +// If `false`, the cluster can write to the repository and create snapshots in +// it. +// +// IMPORTANT: If you register the same snapshot repository with multiple +// clusters, only one cluster should have write access to the repository. +// Having multiple clusters write to the repository at the same time risks +// corrupting the contents of the repository. +func (s *_sourceOnlyRepositorySettings) ReadOnly(readonly bool) *_sourceOnlyRepositorySettings { + + s.v.ReadOnly = &readonly + + return s +} + +func (s *_sourceOnlyRepositorySettings) SourceOnlyRepositorySettingsCaster() *types.SourceOnlyRepositorySettings { + return s.v +} diff --git a/typedapi/esdsl/spancontainingquery.go b/typedapi/esdsl/spancontainingquery.go new file mode 100644 index 0000000000..e3517193ad --- /dev/null +++ b/typedapi/esdsl/spancontainingquery.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _spanContainingQuery struct { + v *types.SpanContainingQuery +} + +// Returns matches which enclose another span query. +func NewSpanContainingQuery(big types.SpanQueryVariant, little types.SpanQueryVariant) *_spanContainingQuery { + + tmp := &_spanContainingQuery{v: types.NewSpanContainingQuery()} + + tmp.Big(big) + + tmp.Little(little) + + return tmp + +} + +// Can be any span query. +// Matching spans from `big` that contain matches from `little` are returned. +func (s *_spanContainingQuery) Big(big types.SpanQueryVariant) *_spanContainingQuery { + + s.v.Big = *big.SpanQueryCaster() + + return s +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_spanContainingQuery) Boost(boost float32) *_spanContainingQuery { + + s.v.Boost = &boost + + return s +} + +// Can be any span query. +// Matching spans from `big` that contain matches from `little` are returned. +func (s *_spanContainingQuery) Little(little types.SpanQueryVariant) *_spanContainingQuery { + + s.v.Little = *little.SpanQueryCaster() + + return s +} + +func (s *_spanContainingQuery) QueryName_(queryname_ string) *_spanContainingQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_spanContainingQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.SpanContaining = s.v + + return container +} + +func (s *_spanContainingQuery) SpanQueryCaster() *types.SpanQuery { + container := types.NewSpanQuery() + + container.SpanContaining = s.v + + return container +} + +func (s *_spanContainingQuery) SpanContainingQueryCaster() *types.SpanContainingQuery { + return s.v +} diff --git a/typedapi/esdsl/spanfieldmaskingquery.go b/typedapi/esdsl/spanfieldmaskingquery.go new file mode 100644 index 0000000000..55b684b9f9 --- /dev/null +++ b/typedapi/esdsl/spanfieldmaskingquery.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _spanFieldMaskingQuery struct { + v *types.SpanFieldMaskingQuery +} + +// Wrapper to allow span queries to participate in composite single-field span +// queries by _lying_ about their search field. +func NewSpanFieldMaskingQuery(query types.SpanQueryVariant) *_spanFieldMaskingQuery { + + tmp := &_spanFieldMaskingQuery{v: types.NewSpanFieldMaskingQuery()} + + tmp.Query(query) + + return tmp + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_spanFieldMaskingQuery) Boost(boost float32) *_spanFieldMaskingQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_spanFieldMaskingQuery) Field(field string) *_spanFieldMaskingQuery { + + s.v.Field = field + + return s +} + +func (s *_spanFieldMaskingQuery) Query(query types.SpanQueryVariant) *_spanFieldMaskingQuery { + + s.v.Query = *query.SpanQueryCaster() + + return s +} + +func (s *_spanFieldMaskingQuery) QueryName_(queryname_ string) *_spanFieldMaskingQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_spanFieldMaskingQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.SpanFieldMasking = s.v + + return container +} + +func (s *_spanFieldMaskingQuery) SpanQueryCaster() *types.SpanQuery { + container := types.NewSpanQuery() + + container.SpanFieldMasking = s.v + + return container +} + +func (s *_spanFieldMaskingQuery) SpanFieldMaskingQueryCaster() *types.SpanFieldMaskingQuery { + return s.v +} diff --git a/typedapi/esdsl/spanfirstquery.go b/typedapi/esdsl/spanfirstquery.go new file mode 100644 index 0000000000..ad596aaced --- /dev/null +++ b/typedapi/esdsl/spanfirstquery.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _spanFirstQuery struct { + v *types.SpanFirstQuery +} + +// Accepts another span query whose matches must appear within the first N +// positions of the field. +func NewSpanFirstQuery(end int, match types.SpanQueryVariant) *_spanFirstQuery { + + tmp := &_spanFirstQuery{v: types.NewSpanFirstQuery()} + + tmp.End(end) + + tmp.Match(match) + + return tmp + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_spanFirstQuery) Boost(boost float32) *_spanFirstQuery { + + s.v.Boost = &boost + + return s +} + +// Controls the maximum end position permitted in a match. +func (s *_spanFirstQuery) End(end int) *_spanFirstQuery { + + s.v.End = end + + return s +} + +// Can be any other span type query. +func (s *_spanFirstQuery) Match(match types.SpanQueryVariant) *_spanFirstQuery { + + s.v.Match = *match.SpanQueryCaster() + + return s +} + +func (s *_spanFirstQuery) QueryName_(queryname_ string) *_spanFirstQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_spanFirstQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.SpanFirst = s.v + + return container +} + +func (s *_spanFirstQuery) SpanQueryCaster() *types.SpanQuery { + container := types.NewSpanQuery() + + container.SpanFirst = s.v + + return container +} + +func (s *_spanFirstQuery) SpanFirstQueryCaster() *types.SpanFirstQuery { + return s.v +} diff --git a/typedapi/esdsl/spangapquery.go b/typedapi/esdsl/spangapquery.go new file mode 100644 index 0000000000..9b26a029e5 --- /dev/null +++ b/typedapi/esdsl/spangapquery.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _spanGapQuery struct { + v types.SpanGapQuery +} + +func NewSpanGapQuery(spangapquery map[string]int) *_spanGapQuery { + return &_spanGapQuery{v: make(map[string]int, 0)} +} + +func (u *_spanGapQuery) SpanGapQueryCaster() *types.SpanGapQuery { + return &u.v +} diff --git a/typedapi/esdsl/spanishanalyzer.go b/typedapi/esdsl/spanishanalyzer.go new file mode 100644 index 0000000000..18d4c0ab26 --- /dev/null +++ b/typedapi/esdsl/spanishanalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _spanishAnalyzer struct { + v *types.SpanishAnalyzer +} + +func NewSpanishAnalyzer() *_spanishAnalyzer { + + return &_spanishAnalyzer{v: types.NewSpanishAnalyzer()} + +} + +func (s *_spanishAnalyzer) StemExclusion(stemexclusions ...string) *_spanishAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_spanishAnalyzer) Stopwords(stopwords ...string) *_spanishAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_spanishAnalyzer) StopwordsPath(stopwordspath string) *_spanishAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_spanishAnalyzer) SpanishAnalyzerCaster() *types.SpanishAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/spanmultitermquery.go b/typedapi/esdsl/spanmultitermquery.go new file mode 100644 index 0000000000..400ddaf619 --- /dev/null +++ b/typedapi/esdsl/spanmultitermquery.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _spanMultiTermQuery struct { + v *types.SpanMultiTermQuery +} + +// Allows you to wrap a multi term query (one of `wildcard`, `fuzzy`, `prefix`, +// `range`, or `regexp` query) as a `span` query, so it can be nested. +func NewSpanMultiTermQuery(match types.QueryVariant) *_spanMultiTermQuery { + + tmp := &_spanMultiTermQuery{v: types.NewSpanMultiTermQuery()} + + tmp.Match(match) + + return tmp + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_spanMultiTermQuery) Boost(boost float32) *_spanMultiTermQuery { + + s.v.Boost = &boost + + return s +} + +// Should be a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, +// or `regexp` query). +func (s *_spanMultiTermQuery) Match(match types.QueryVariant) *_spanMultiTermQuery { + + s.v.Match = *match.QueryCaster() + + return s +} + +func (s *_spanMultiTermQuery) QueryName_(queryname_ string) *_spanMultiTermQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_spanMultiTermQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.SpanMulti = s.v + + return container +} + +func (s *_spanMultiTermQuery) SpanQueryCaster() *types.SpanQuery { + container := types.NewSpanQuery() + + container.SpanMulti = s.v + + return container +} + +func (s *_spanMultiTermQuery) SpanMultiTermQueryCaster() *types.SpanMultiTermQuery { + return s.v +} diff --git a/typedapi/esdsl/spannearquery.go b/typedapi/esdsl/spannearquery.go new file mode 100644 index 0000000000..59e2277658 --- /dev/null +++ b/typedapi/esdsl/spannearquery.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _spanNearQuery struct { + v *types.SpanNearQuery +} + +// Matches spans which are near one another. +// You can specify `slop`, the maximum number of intervening unmatched +// positions, as well as whether matches are required to be in-order. +func NewSpanNearQuery() *_spanNearQuery { + + return &_spanNearQuery{v: types.NewSpanNearQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_spanNearQuery) Boost(boost float32) *_spanNearQuery { + + s.v.Boost = &boost + + return s +} + +// Array of one or more other span type queries. +func (s *_spanNearQuery) Clauses(clauses ...types.SpanQueryVariant) *_spanNearQuery { + + for _, v := range clauses { + + s.v.Clauses = append(s.v.Clauses, *v.SpanQueryCaster()) + + } + return s +} + +// Controls whether matches are required to be in-order. +func (s *_spanNearQuery) InOrder(inorder bool) *_spanNearQuery { + + s.v.InOrder = &inorder + + return s +} + +func (s *_spanNearQuery) QueryName_(queryname_ string) *_spanNearQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Controls the maximum number of intervening unmatched positions permitted. +func (s *_spanNearQuery) Slop(slop int) *_spanNearQuery { + + s.v.Slop = &slop + + return s +} + +func (s *_spanNearQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.SpanNear = s.v + + return container +} + +func (s *_spanNearQuery) SpanQueryCaster() *types.SpanQuery { + container := types.NewSpanQuery() + + container.SpanNear = s.v + + return container +} + +func (s *_spanNearQuery) SpanNearQueryCaster() *types.SpanNearQuery { + return s.v +} diff --git a/typedapi/esdsl/spannotquery.go b/typedapi/esdsl/spannotquery.go new file mode 100644 index 0000000000..6fc164ccbf --- /dev/null +++ b/typedapi/esdsl/spannotquery.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _spanNotQuery struct { + v *types.SpanNotQuery +} + +// Removes matches which overlap with another span query or which are within x +// tokens before (controlled by the parameter `pre`) or y tokens after +// (controlled by the parameter `post`) another span query. +func NewSpanNotQuery(exclude types.SpanQueryVariant, include types.SpanQueryVariant) *_spanNotQuery { + + tmp := &_spanNotQuery{v: types.NewSpanNotQuery()} + + tmp.Exclude(exclude) + + tmp.Include(include) + + return tmp + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_spanNotQuery) Boost(boost float32) *_spanNotQuery { + + s.v.Boost = &boost + + return s +} + +// The number of tokens from within the include span that can’t have overlap +// with the exclude span. +// Equivalent to setting both `pre` and `post`. +func (s *_spanNotQuery) Dist(dist int) *_spanNotQuery { + + s.v.Dist = &dist + + return s +} + +// Span query whose matches must not overlap those returned. +func (s *_spanNotQuery) Exclude(exclude types.SpanQueryVariant) *_spanNotQuery { + + s.v.Exclude = *exclude.SpanQueryCaster() + + return s +} + +// Span query whose matches are filtered. +func (s *_spanNotQuery) Include(include types.SpanQueryVariant) *_spanNotQuery { + + s.v.Include = *include.SpanQueryCaster() + + return s +} + +// The number of tokens after the include span that can’t have overlap with the +// exclude span. +func (s *_spanNotQuery) Post(post int) *_spanNotQuery { + + s.v.Post = &post + + return s +} + +// The number of tokens before the include span that can’t have overlap with the +// exclude span. +func (s *_spanNotQuery) Pre(pre int) *_spanNotQuery { + + s.v.Pre = &pre + + return s +} + +func (s *_spanNotQuery) QueryName_(queryname_ string) *_spanNotQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_spanNotQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.SpanNot = s.v + + return container +} + +func (s *_spanNotQuery) SpanQueryCaster() *types.SpanQuery { + container := types.NewSpanQuery() + + container.SpanNot = s.v + + return container +} + +func (s *_spanNotQuery) SpanNotQueryCaster() *types.SpanNotQuery { + return s.v +} diff --git a/typedapi/esdsl/spanorquery.go b/typedapi/esdsl/spanorquery.go new file mode 100644 index 0000000000..13e33b636b --- /dev/null +++ b/typedapi/esdsl/spanorquery.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _spanOrQuery struct { + v *types.SpanOrQuery +} + +// Matches the union of its span clauses. +func NewSpanOrQuery() *_spanOrQuery { + + return &_spanOrQuery{v: types.NewSpanOrQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_spanOrQuery) Boost(boost float32) *_spanOrQuery { + + s.v.Boost = &boost + + return s +} + +// Array of one or more other span type queries. +func (s *_spanOrQuery) Clauses(clauses ...types.SpanQueryVariant) *_spanOrQuery { + + for _, v := range clauses { + + s.v.Clauses = append(s.v.Clauses, *v.SpanQueryCaster()) + + } + return s +} + +func (s *_spanOrQuery) QueryName_(queryname_ string) *_spanOrQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_spanOrQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.SpanOr = s.v + + return container +} + +func (s *_spanOrQuery) SpanQueryCaster() *types.SpanQuery { + container := types.NewSpanQuery() + + container.SpanOr = s.v + + return container +} + +func (s *_spanOrQuery) SpanOrQueryCaster() *types.SpanOrQuery { + return s.v +} diff --git a/typedapi/esdsl/spanquery.go b/typedapi/esdsl/spanquery.go new file mode 100644 index 0000000000..08d7d1bf4a --- /dev/null +++ b/typedapi/esdsl/spanquery.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _spanQuery struct { + v *types.SpanQuery +} + +func NewSpanQuery() *_spanQuery { + return &_spanQuery{v: types.NewSpanQuery()} +} + +// AdditionalSpanQueryProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_spanQuery) AdditionalSpanQueryProperty(key string, value json.RawMessage) *_spanQuery { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalSpanQueryProperty = tmp + return s +} + +// Accepts a list of span queries, but only returns those spans which also match +// a second span query. +func (s *_spanQuery) SpanContaining(spancontaining types.SpanContainingQueryVariant) *_spanQuery { + + s.v.SpanContaining = spancontaining.SpanContainingQueryCaster() + + return s +} + +// Allows queries like `span_near` or `span_or` across different fields. +func (s *_spanQuery) SpanFieldMasking(spanfieldmasking types.SpanFieldMaskingQueryVariant) *_spanQuery { + + s.v.SpanFieldMasking = spanfieldmasking.SpanFieldMaskingQueryCaster() + + return s +} + +// Accepts another span query whose matches must appear within the first N +// positions of the field. +func (s *_spanQuery) SpanFirst(spanfirst types.SpanFirstQueryVariant) *_spanQuery { + + s.v.SpanFirst = spanfirst.SpanFirstQueryCaster() + + return s +} + +func (s *_spanQuery) SpanGap(spangapquery types.SpanGapQueryVariant) *_spanQuery { + + s.v.SpanGap = *spangapquery.SpanGapQueryCaster() + + return s +} + +// Wraps a `term`, `range`, `prefix`, `wildcard`, `regexp`, or `fuzzy` query. +func (s *_spanQuery) SpanMulti(spanmulti types.SpanMultiTermQueryVariant) *_spanQuery { + + s.v.SpanMulti = spanmulti.SpanMultiTermQueryCaster() + + return s +} + +// Accepts multiple span queries whose matches must be within the specified +// distance of each other, and possibly in the same order. +func (s *_spanQuery) SpanNear(spannear types.SpanNearQueryVariant) *_spanQuery { + + s.v.SpanNear = spannear.SpanNearQueryCaster() + + return s +} + +// Wraps another span query, and excludes any documents which match that query. +func (s *_spanQuery) SpanNot(spannot types.SpanNotQueryVariant) *_spanQuery { + + s.v.SpanNot = spannot.SpanNotQueryCaster() + + return s +} + +// Combines multiple span queries and returns documents which match any of the +// specified queries. +func (s *_spanQuery) SpanOr(spanor types.SpanOrQueryVariant) *_spanQuery { + + s.v.SpanOr = spanor.SpanOrQueryCaster() + + return s +} + +// The equivalent of the `term` query but for use with other span queries. +// SpanTerm is a single key dictionnary. +// It will replace the current value on each call. +func (s *_spanQuery) SpanTerm(key string, value types.SpanTermQueryVariant) *_spanQuery { + + tmp := make(map[string]types.SpanTermQuery) + + tmp[key] = *value.SpanTermQueryCaster() + + s.v.SpanTerm = tmp + return s +} + +// The result from a single span query is returned as long is its span falls +// within the spans returned by a list of other span queries. +func (s *_spanQuery) SpanWithin(spanwithin types.SpanWithinQueryVariant) *_spanQuery { + + s.v.SpanWithin = spanwithin.SpanWithinQueryCaster() + + return s +} + +func (s *_spanQuery) SpanQueryCaster() *types.SpanQuery { + return s.v +} diff --git a/typedapi/esdsl/spantermquery.go b/typedapi/esdsl/spantermquery.go new file mode 100644 index 0000000000..eedf373ac9 --- /dev/null +++ b/typedapi/esdsl/spantermquery.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _spanTermQuery struct { + k string + v *types.SpanTermQuery +} + +// Matches spans containing a term. +func NewSpanTermQuery(field string, value types.FieldValueVariant) *_spanTermQuery { + tmp := &_spanTermQuery{ + k: field, + v: types.NewSpanTermQuery(), + } + + tmp.Value(value) + return tmp +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_spanTermQuery) Boost(boost float32) *_spanTermQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_spanTermQuery) QueryName_(queryname_ string) *_spanTermQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_spanTermQuery) Value(fieldvalue types.FieldValueVariant) *_spanTermQuery { + + s.v.Value = *fieldvalue.FieldValueCaster() + + return s +} + +func (s *_spanTermQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.SpanTerm = map[string]types.SpanTermQuery{ + s.k: *s.v, + } + return container +} + +func (s *_spanTermQuery) SpanQueryCaster() *types.SpanQuery { + container := types.NewSpanQuery() + container.SpanTerm = map[string]types.SpanTermQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleSpanTermQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleSpanTermQuery() *_spanTermQuery { + return &_spanTermQuery{ + k: "", + v: types.NewSpanTermQuery(), + } +} + +func (s *_spanTermQuery) SpanTermQueryCaster() *types.SpanTermQuery { + return s.v.SpanTermQueryCaster() +} diff --git a/typedapi/esdsl/spanwithinquery.go b/typedapi/esdsl/spanwithinquery.go new file mode 100644 index 0000000000..f4d73782c3 --- /dev/null +++ b/typedapi/esdsl/spanwithinquery.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _spanWithinQuery struct { + v *types.SpanWithinQuery +} + +// Returns matches which are enclosed inside another span query. +func NewSpanWithinQuery(big types.SpanQueryVariant, little types.SpanQueryVariant) *_spanWithinQuery { + + tmp := &_spanWithinQuery{v: types.NewSpanWithinQuery()} + + tmp.Big(big) + + tmp.Little(little) + + return tmp + +} + +// Can be any span query. +// Matching spans from `little` that are enclosed within `big` are returned. +func (s *_spanWithinQuery) Big(big types.SpanQueryVariant) *_spanWithinQuery { + + s.v.Big = *big.SpanQueryCaster() + + return s +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_spanWithinQuery) Boost(boost float32) *_spanWithinQuery { + + s.v.Boost = &boost + + return s +} + +// Can be any span query. +// Matching spans from `little` that are enclosed within `big` are returned. +func (s *_spanWithinQuery) Little(little types.SpanQueryVariant) *_spanWithinQuery { + + s.v.Little = *little.SpanQueryCaster() + + return s +} + +func (s *_spanWithinQuery) QueryName_(queryname_ string) *_spanWithinQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_spanWithinQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.SpanWithin = s.v + + return container +} + +func (s *_spanWithinQuery) SpanQueryCaster() *types.SpanQuery { + container := types.NewSpanQuery() + + container.SpanWithin = s.v + + return container +} + +func (s *_spanWithinQuery) SpanWithinQueryCaster() *types.SpanWithinQuery { + return s.v +} diff --git a/typedapi/esdsl/sparsevectorproperty.go b/typedapi/esdsl/sparsevectorproperty.go new file mode 100644 index 0000000000..b83967a6c9 --- /dev/null +++ b/typedapi/esdsl/sparsevectorproperty.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _sparseVectorProperty struct { + v *types.SparseVectorProperty +} + +func NewSparseVectorProperty() *_sparseVectorProperty { + + return &_sparseVectorProperty{v: types.NewSparseVectorProperty()} + +} + +func (s *_sparseVectorProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_sparseVectorProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_sparseVectorProperty) Fields(fields map[string]types.Property) *_sparseVectorProperty { + + s.v.Fields = fields + return s +} + +func (s *_sparseVectorProperty) AddField(key string, value types.PropertyVariant) *_sparseVectorProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_sparseVectorProperty) IgnoreAbove(ignoreabove int) *_sparseVectorProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +// Metadata about the field. +func (s *_sparseVectorProperty) Meta(meta map[string]string) *_sparseVectorProperty { + + s.v.Meta = meta + return s +} + +func (s *_sparseVectorProperty) AddMeta(key string, value string) *_sparseVectorProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_sparseVectorProperty) Properties(properties map[string]types.Property) *_sparseVectorProperty { + + s.v.Properties = properties + return s +} + +func (s *_sparseVectorProperty) AddProperty(key string, value types.PropertyVariant) *_sparseVectorProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_sparseVectorProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_sparseVectorProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_sparseVectorProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_sparseVectorProperty) SparseVectorPropertyCaster() *types.SparseVectorProperty { + return s.v +} diff --git a/typedapi/esdsl/sparsevectorquery.go b/typedapi/esdsl/sparsevectorquery.go new file mode 100644 index 0000000000..72e807f378 --- /dev/null +++ b/typedapi/esdsl/sparsevectorquery.go @@ -0,0 +1,150 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _sparseVectorQuery struct { + v *types.SparseVectorQuery +} + +func NewSparseVectorQuery() *_sparseVectorQuery { + return &_sparseVectorQuery{v: types.NewSparseVectorQuery()} +} + +// AdditionalSparseVectorQueryProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_sparseVectorQuery) AdditionalSparseVectorQueryProperty(key string, value json.RawMessage) *_sparseVectorQuery { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalSparseVectorQueryProperty = tmp + return s +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_sparseVectorQuery) Boost(boost float32) *_sparseVectorQuery { + + s.v.Boost = &boost + + return s +} + +// The name of the field that contains the token-weight pairs to be searched +// against. +// This field must be a mapped sparse_vector field. +func (s *_sparseVectorQuery) Field(field string) *_sparseVectorQuery { + + s.v.Field = field + + return s +} + +// The inference ID to use to convert the query text into token-weight pairs. +// It must be the same inference ID that was used to create the tokens from the +// input text. +// Only one of inference_id and query_vector is allowed. +// If inference_id is specified, query must also be specified. +// Only one of inference_id or query_vector may be supplied in a request. +func (s *_sparseVectorQuery) InferenceId(id string) *_sparseVectorQuery { + + s.v.InferenceId = &id + + return s +} + +// Whether to perform pruning, omitting the non-significant tokens from the +// query to improve query performance. +// If prune is true but the pruning_config is not specified, pruning will occur +// but default values will be used. +// Default: false +func (s *_sparseVectorQuery) Prune(prune bool) *_sparseVectorQuery { + + s.v.Prune = &prune + + return s +} + +// Optional pruning configuration. +// If enabled, this will omit non-significant tokens from the query in order to +// improve query performance. +// This is only used if prune is set to true. +// If prune is set to true but pruning_config is not specified, default values +// will be used. +func (s *_sparseVectorQuery) PruningConfig(pruningconfig types.TokenPruningConfigVariant) *_sparseVectorQuery { + + s.v.PruningConfig = pruningconfig.TokenPruningConfigCaster() + + return s +} + +// The query text you want to use for search. +// If inference_id is specified, query must also be specified. +func (s *_sparseVectorQuery) Query(query string) *_sparseVectorQuery { + + s.v.Query = &query + + return s +} + +func (s *_sparseVectorQuery) QueryName_(queryname_ string) *_sparseVectorQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Dictionary of precomputed sparse vectors and their associated weights. +// Only one of inference_id or query_vector may be supplied in a request. +func (s *_sparseVectorQuery) QueryVector(queryvector map[string]float32) *_sparseVectorQuery { + + s.v.QueryVector = queryvector + return s +} + +func (s *_sparseVectorQuery) AddQueryVector(key string, value float32) *_sparseVectorQuery { + + var tmp map[string]float32 + if s.v.QueryVector == nil { + s.v.QueryVector = make(map[string]float32) + } else { + tmp = s.v.QueryVector + } + + tmp[key] = value + + s.v.QueryVector = tmp + return s +} + +func (s *_sparseVectorQuery) SparseVectorQueryCaster() *types.SparseVectorQuery { + return s.v +} diff --git a/typedapi/esdsl/splitprocessor.go b/typedapi/esdsl/splitprocessor.go new file mode 100644 index 0000000000..264b48984d --- /dev/null +++ b/typedapi/esdsl/splitprocessor.go @@ -0,0 +1,138 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _splitProcessor struct { + v *types.SplitProcessor +} + +// Splits a field into an array using a separator character. +// Only works on string fields. +func NewSplitProcessor(separator string) *_splitProcessor { + + tmp := &_splitProcessor{v: types.NewSplitProcessor()} + + tmp.Separator(separator) + + return tmp + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_splitProcessor) Description(description string) *_splitProcessor { + + s.v.Description = &description + + return s +} + +// The field to split. +func (s *_splitProcessor) Field(field string) *_splitProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_splitProcessor) If(if_ types.ScriptVariant) *_splitProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_splitProcessor) IgnoreFailure(ignorefailure bool) *_splitProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist, the processor quietly exits without +// modifying the document. +func (s *_splitProcessor) IgnoreMissing(ignoremissing bool) *_splitProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_splitProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_splitProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Preserves empty trailing fields, if any. +func (s *_splitProcessor) PreserveTrailing(preservetrailing bool) *_splitProcessor { + + s.v.PreserveTrailing = &preservetrailing + + return s +} + +// A regex which matches the separator, for example, `,` or `\s+`. +func (s *_splitProcessor) Separator(separator string) *_splitProcessor { + + s.v.Separator = separator + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_splitProcessor) Tag(tag string) *_splitProcessor { + + s.v.Tag = &tag + + return s +} + +// The field to assign the split value to. +// By default, the field is updated in-place. +func (s *_splitProcessor) TargetField(field string) *_splitProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_splitProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Split = s.v + + return container +} + +func (s *_splitProcessor) SplitProcessorCaster() *types.SplitProcessor { + return s.v +} diff --git a/typedapi/esdsl/standardanalyzer.go b/typedapi/esdsl/standardanalyzer.go new file mode 100644 index 0000000000..e96cb20c85 --- /dev/null +++ b/typedapi/esdsl/standardanalyzer.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _standardAnalyzer struct { + v *types.StandardAnalyzer +} + +func NewStandardAnalyzer() *_standardAnalyzer { + + return &_standardAnalyzer{v: types.NewStandardAnalyzer()} + +} + +// The maximum token length. If a token is seen that exceeds this length then it +// is split at `max_token_length` intervals. +// Defaults to `255`. +func (s *_standardAnalyzer) MaxTokenLength(maxtokenlength int) *_standardAnalyzer { + + s.v.MaxTokenLength = &maxtokenlength + + return s +} + +// A pre-defined stop words list like `_english_` or an array containing a list +// of stop words. +// Defaults to `_none_`. +func (s *_standardAnalyzer) Stopwords(stopwords ...string) *_standardAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +// The path to a file containing stop words. +func (s *_standardAnalyzer) StopwordsPath(stopwordspath string) *_standardAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_standardAnalyzer) StandardAnalyzerCaster() *types.StandardAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/standardretriever.go b/typedapi/esdsl/standardretriever.go new file mode 100644 index 0000000000..67e7f21535 --- /dev/null +++ b/typedapi/esdsl/standardretriever.go @@ -0,0 +1,111 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _standardRetriever struct { + v *types.StandardRetriever +} + +// A retriever that replaces the functionality of a traditional query. +func NewStandardRetriever() *_standardRetriever { + + return &_standardRetriever{v: types.NewStandardRetriever()} + +} + +// Collapses the top documents by a specified key into a single top document per +// key. +func (s *_standardRetriever) Collapse(collapse types.FieldCollapseVariant) *_standardRetriever { + + s.v.Collapse = collapse.FieldCollapseCaster() + + return s +} + +// Query to filter the documents that can match. +func (s *_standardRetriever) Filter(filters ...types.QueryVariant) *_standardRetriever { + + s.v.Filter = make([]types.Query, len(filters)) + for i, v := range filters { + s.v.Filter[i] = *v.QueryCaster() + } + + return s +} + +// Minimum _score for matching documents. Documents with a lower _score are not +// included in the top documents. +func (s *_standardRetriever) MinScore(minscore float32) *_standardRetriever { + + s.v.MinScore = &minscore + + return s +} + +// Defines a query to retrieve a set of top documents. +func (s *_standardRetriever) Query(query types.QueryVariant) *_standardRetriever { + + s.v.Query = query.QueryCaster() + + return s +} + +// Defines a search after object parameter used for pagination. +func (s *_standardRetriever) SearchAfter(sortresults ...types.FieldValueVariant) *_standardRetriever { + + for _, v := range sortresults { + s.v.SearchAfter = append(s.v.SearchAfter, *v.FieldValueCaster()) + } + + return s +} + +// A sort object that that specifies the order of matching documents. +func (s *_standardRetriever) Sort(sorts ...types.SortCombinationsVariant) *_standardRetriever { + + for _, v := range sorts { + s.v.Sort = append(s.v.Sort, *v.SortCombinationsCaster()) + } + + return s +} + +// Maximum number of documents to collect for each shard. +func (s *_standardRetriever) TerminateAfter(terminateafter int) *_standardRetriever { + + s.v.TerminateAfter = &terminateafter + + return s +} + +func (s *_standardRetriever) RetrieverContainerCaster() *types.RetrieverContainer { + container := types.NewRetrieverContainer() + + container.Standard = s.v + + return container +} + +func (s *_standardRetriever) StandardRetrieverCaster() *types.StandardRetriever { + return s.v +} diff --git a/typedapi/esdsl/standardtokenizer.go b/typedapi/esdsl/standardtokenizer.go new file mode 100644 index 0000000000..f88766724e --- /dev/null +++ b/typedapi/esdsl/standardtokenizer.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _standardTokenizer struct { + v *types.StandardTokenizer +} + +func NewStandardTokenizer() *_standardTokenizer { + + return &_standardTokenizer{v: types.NewStandardTokenizer()} + +} + +func (s *_standardTokenizer) MaxTokenLength(maxtokenlength int) *_standardTokenizer { + + s.v.MaxTokenLength = &maxtokenlength + + return s +} + +func (s *_standardTokenizer) Version(versionstring string) *_standardTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_standardTokenizer) StandardTokenizerCaster() *types.StandardTokenizer { + return s.v +} diff --git a/typedapi/esdsl/statsaggregation.go b/typedapi/esdsl/statsaggregation.go new file mode 100644 index 0000000000..8f783dc237 --- /dev/null +++ b/typedapi/esdsl/statsaggregation.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _statsAggregation struct { + v *types.StatsAggregation +} + +// A multi-value metrics aggregation that computes stats over numeric values +// extracted from the aggregated documents. +func NewStatsAggregation() *_statsAggregation { + + return &_statsAggregation{v: types.NewStatsAggregation()} + +} + +// The field on which to run the aggregation. +func (s *_statsAggregation) Field(field string) *_statsAggregation { + + s.v.Field = &field + + return s +} + +func (s *_statsAggregation) Format(format string) *_statsAggregation { + + s.v.Format = &format + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_statsAggregation) Missing(missing types.MissingVariant) *_statsAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_statsAggregation) Script(script types.ScriptVariant) *_statsAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_statsAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Stats = s.v + + return container +} + +func (s *_statsAggregation) StatsAggregationCaster() *types.StatsAggregation { + return s.v +} diff --git a/typedapi/esdsl/statsbucketaggregation.go b/typedapi/esdsl/statsbucketaggregation.go new file mode 100644 index 0000000000..d2f9f85a44 --- /dev/null +++ b/typedapi/esdsl/statsbucketaggregation.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _statsBucketAggregation struct { + v *types.StatsBucketAggregation +} + +// A sibling pipeline aggregation which calculates a variety of stats across all +// bucket of a specified metric in a sibling aggregation. +func NewStatsBucketAggregation() *_statsBucketAggregation { + + return &_statsBucketAggregation{v: types.NewStatsBucketAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_statsBucketAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_statsBucketAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_statsBucketAggregation) Format(format string) *_statsBucketAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_statsBucketAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_statsBucketAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +func (s *_statsBucketAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.StatsBucket = s.v + + return container +} + +func (s *_statsBucketAggregation) StatsBucketAggregationCaster() *types.StatsBucketAggregation { + return s.v +} diff --git a/typedapi/esdsl/stemmeroverridetokenfilter.go b/typedapi/esdsl/stemmeroverridetokenfilter.go new file mode 100644 index 0000000000..147eb4a0e8 --- /dev/null +++ b/typedapi/esdsl/stemmeroverridetokenfilter.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _stemmerOverrideTokenFilter struct { + v *types.StemmerOverrideTokenFilter +} + +func NewStemmerOverrideTokenFilter() *_stemmerOverrideTokenFilter { + + return &_stemmerOverrideTokenFilter{v: types.NewStemmerOverrideTokenFilter()} + +} + +func (s *_stemmerOverrideTokenFilter) Rules(rules ...string) *_stemmerOverrideTokenFilter { + + for _, v := range rules { + + s.v.Rules = append(s.v.Rules, v) + + } + return s +} + +func (s *_stemmerOverrideTokenFilter) RulesPath(rulespath string) *_stemmerOverrideTokenFilter { + + s.v.RulesPath = &rulespath + + return s +} + +func (s *_stemmerOverrideTokenFilter) Version(versionstring string) *_stemmerOverrideTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_stemmerOverrideTokenFilter) StemmerOverrideTokenFilterCaster() *types.StemmerOverrideTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/stemmertokenfilter.go b/typedapi/esdsl/stemmertokenfilter.go new file mode 100644 index 0000000000..efe3d901bf --- /dev/null +++ b/typedapi/esdsl/stemmertokenfilter.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _stemmerTokenFilter struct { + v *types.StemmerTokenFilter +} + +func NewStemmerTokenFilter() *_stemmerTokenFilter { + + return &_stemmerTokenFilter{v: types.NewStemmerTokenFilter()} + +} + +func (s *_stemmerTokenFilter) Language(language string) *_stemmerTokenFilter { + + s.v.Language = &language + + return s +} + +func (s *_stemmerTokenFilter) Version(versionstring string) *_stemmerTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_stemmerTokenFilter) StemmerTokenFilterCaster() *types.StemmerTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/stepkey.go b/typedapi/esdsl/stepkey.go new file mode 100644 index 0000000000..39543c623f --- /dev/null +++ b/typedapi/esdsl/stepkey.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _stepKey struct { + v *types.StepKey +} + +func NewStepKey(phase string) *_stepKey { + + tmp := &_stepKey{v: types.NewStepKey()} + + tmp.Phase(phase) + + return tmp + +} + +// The optional action to which the index will be moved. +func (s *_stepKey) Action(action string) *_stepKey { + + s.v.Action = &action + + return s +} + +// The optional step name to which the index will be moved. +func (s *_stepKey) Name(name string) *_stepKey { + + s.v.Name = &name + + return s +} + +func (s *_stepKey) Phase(phase string) *_stepKey { + + s.v.Phase = phase + + return s +} + +func (s *_stepKey) StepKeyCaster() *types.StepKey { + return s.v +} diff --git a/typedapi/esdsl/stopanalyzer.go b/typedapi/esdsl/stopanalyzer.go new file mode 100644 index 0000000000..3964b39153 --- /dev/null +++ b/typedapi/esdsl/stopanalyzer.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _stopAnalyzer struct { + v *types.StopAnalyzer +} + +func NewStopAnalyzer() *_stopAnalyzer { + + return &_stopAnalyzer{v: types.NewStopAnalyzer()} + +} + +// A pre-defined stop words list like `_english_` or an array containing a list +// of stop words. +// Defaults to `_none_`. +func (s *_stopAnalyzer) Stopwords(stopwords ...string) *_stopAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +// The path to a file containing stop words. +func (s *_stopAnalyzer) StopwordsPath(stopwordspath string) *_stopAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_stopAnalyzer) Version(versionstring string) *_stopAnalyzer { + + s.v.Version = &versionstring + + return s +} + +func (s *_stopAnalyzer) StopAnalyzerCaster() *types.StopAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/stoptokenfilter.go b/typedapi/esdsl/stoptokenfilter.go new file mode 100644 index 0000000000..f8836be96c --- /dev/null +++ b/typedapi/esdsl/stoptokenfilter.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _stopTokenFilter struct { + v *types.StopTokenFilter +} + +func NewStopTokenFilter() *_stopTokenFilter { + + return &_stopTokenFilter{v: types.NewStopTokenFilter()} + +} + +func (s *_stopTokenFilter) IgnoreCase(ignorecase bool) *_stopTokenFilter { + + s.v.IgnoreCase = &ignorecase + + return s +} + +func (s *_stopTokenFilter) RemoveTrailing(removetrailing bool) *_stopTokenFilter { + + s.v.RemoveTrailing = &removetrailing + + return s +} + +func (s *_stopTokenFilter) Stopwords(stopwords ...string) *_stopTokenFilter { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_stopTokenFilter) StopwordsPath(stopwordspath string) *_stopTokenFilter { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_stopTokenFilter) Version(versionstring string) *_stopTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_stopTokenFilter) StopTokenFilterCaster() *types.StopTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/stopwords.go b/typedapi/esdsl/stopwords.go new file mode 100644 index 0000000000..b424bac753 --- /dev/null +++ b/typedapi/esdsl/stopwords.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _stopWords struct { + v types.StopWords +} + +func NewStopWords() *_stopWords { + return &_stopWords{v: nil} +} + +func (u *_stopWords) Strings(strings ...string) *_stopWords { + + u.v = make([]string, len(strings)) + u.v = strings + + return u +} + +func (u *_stopWords) StopWordsCaster() *types.StopWords { + return &u.v +} diff --git a/typedapi/esdsl/storage.go b/typedapi/esdsl/storage.go new file mode 100644 index 0000000000..20c204ba74 --- /dev/null +++ b/typedapi/esdsl/storage.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/storagetype" +) + +type _storage struct { + v *types.Storage +} + +func NewStorage(type_ storagetype.StorageType) *_storage { + + tmp := &_storage{v: types.NewStorage()} + + tmp.Type(type_) + + return tmp + +} + +// You can restrict the use of the mmapfs and the related hybridfs store type +// via the setting node.store.allow_mmap. +// This is a boolean setting indicating whether or not memory-mapping is +// allowed. The default is to allow it. This +// setting is useful, for example, if you are in an environment where you can +// not control the ability to create a lot +// of memory maps so you need disable the ability to use memory-mapping. +func (s *_storage) AllowMmap(allowmmap bool) *_storage { + + s.v.AllowMmap = &allowmmap + + return s +} + +func (s *_storage) Type(type_ storagetype.StorageType) *_storage { + + s.v.Type = type_ + return s +} + +func (s *_storage) StorageCaster() *types.Storage { + return s.v +} diff --git a/typedapi/esdsl/storedscript.go b/typedapi/esdsl/storedscript.go new file mode 100644 index 0000000000..b939e15082 --- /dev/null +++ b/typedapi/esdsl/storedscript.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptlanguage" +) + +type _storedScript struct { + v *types.StoredScript +} + +func NewStoredScript(lang scriptlanguage.ScriptLanguage, source string) *_storedScript { + + tmp := &_storedScript{v: types.NewStoredScript()} + + tmp.Lang(lang) + + tmp.Source(source) + + return tmp + +} + +// The language the script is written in. +// For serach templates, use `mustache`. +func (s *_storedScript) Lang(lang scriptlanguage.ScriptLanguage) *_storedScript { + + s.v.Lang = lang + return s +} + +func (s *_storedScript) Options(options map[string]string) *_storedScript { + + s.v.Options = options + return s +} + +func (s *_storedScript) AddOption(key string, value string) *_storedScript { + + var tmp map[string]string + if s.v.Options == nil { + s.v.Options = make(map[string]string) + } else { + tmp = s.v.Options + } + + tmp[key] = value + + s.v.Options = tmp + return s +} + +// The script source. +// For search templates, an object containing the search template. +func (s *_storedScript) Source(source string) *_storedScript { + + s.v.Source = source + + return s +} + +func (s *_storedScript) StoredScriptCaster() *types.StoredScript { + return s.v +} diff --git a/typedapi/esdsl/stringifiedboolean.go b/typedapi/esdsl/stringifiedboolean.go new file mode 100644 index 0000000000..3bfd6a5e87 --- /dev/null +++ b/typedapi/esdsl/stringifiedboolean.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _stringifiedboolean struct { + v types.Stringifiedboolean +} + +func NewStringifiedboolean() *_stringifiedboolean { + return &_stringifiedboolean{v: nil} +} + +func (u *_stringifiedboolean) Bool(bool bool) *_stringifiedboolean { + + u.v = &bool + + return u +} + +func (u *_stringifiedboolean) String(string string) *_stringifiedboolean { + + u.v = &string + + return u +} + +func (u *_stringifiedboolean) StringifiedbooleanCaster() *types.Stringifiedboolean { + return &u.v +} diff --git a/typedapi/esdsl/stringifiedepochtimeunitmillis.go b/typedapi/esdsl/stringifiedepochtimeunitmillis.go new file mode 100644 index 0000000000..633d5f03fe --- /dev/null +++ b/typedapi/esdsl/stringifiedepochtimeunitmillis.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _stringifiedEpochTimeUnitMillis struct { + v types.StringifiedEpochTimeUnitMillis +} + +func NewStringifiedEpochTimeUnitMillis() *_stringifiedEpochTimeUnitMillis { + return &_stringifiedEpochTimeUnitMillis{v: nil} +} + +func (u *_stringifiedEpochTimeUnitMillis) EpochTimeUnitMillis(epochtimeunitmillis int64) *_stringifiedEpochTimeUnitMillis { + + u.v = &epochtimeunitmillis + + return u +} + +func (u *_stringifiedEpochTimeUnitMillis) String(string string) *_stringifiedEpochTimeUnitMillis { + + u.v = &string + + return u +} + +func (u *_stringifiedEpochTimeUnitMillis) StringifiedEpochTimeUnitMillisCaster() *types.StringifiedEpochTimeUnitMillis { + return &u.v +} diff --git a/typedapi/esdsl/stringifiedinteger.go b/typedapi/esdsl/stringifiedinteger.go new file mode 100644 index 0000000000..90ec165d8b --- /dev/null +++ b/typedapi/esdsl/stringifiedinteger.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _stringifiedinteger struct { + v types.Stringifiedinteger +} + +func NewStringifiedinteger() *_stringifiedinteger { + return &_stringifiedinteger{v: nil} +} + +func (u *_stringifiedinteger) Int(int int) *_stringifiedinteger { + + u.v = &int + + return u +} + +func (u *_stringifiedinteger) String(string string) *_stringifiedinteger { + + u.v = &string + + return u +} + +func (u *_stringifiedinteger) StringifiedintegerCaster() *types.Stringifiedinteger { + return &u.v +} diff --git a/typedapi/esdsl/stringstatsaggregation.go b/typedapi/esdsl/stringstatsaggregation.go new file mode 100644 index 0000000000..8600e47aa3 --- /dev/null +++ b/typedapi/esdsl/stringstatsaggregation.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _stringStatsAggregation struct { + v *types.StringStatsAggregation +} + +// A multi-value metrics aggregation that computes statistics over string values +// extracted from the aggregated documents. +func NewStringStatsAggregation() *_stringStatsAggregation { + + return &_stringStatsAggregation{v: types.NewStringStatsAggregation()} + +} + +// The field on which to run the aggregation. +func (s *_stringStatsAggregation) Field(field string) *_stringStatsAggregation { + + s.v.Field = &field + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_stringStatsAggregation) Missing(missing types.MissingVariant) *_stringStatsAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_stringStatsAggregation) Script(script types.ScriptVariant) *_stringStatsAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +// Shows the probability distribution for all characters. +func (s *_stringStatsAggregation) ShowDistribution(showdistribution bool) *_stringStatsAggregation { + + s.v.ShowDistribution = &showdistribution + + return s +} + +func (s *_stringStatsAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.StringStats = s.v + + return container +} + +func (s *_stringStatsAggregation) StringStatsAggregationCaster() *types.StringStatsAggregation { + return s.v +} diff --git a/typedapi/esdsl/stupidbackoffsmoothingmodel.go b/typedapi/esdsl/stupidbackoffsmoothingmodel.go new file mode 100644 index 0000000000..4079625c8a --- /dev/null +++ b/typedapi/esdsl/stupidbackoffsmoothingmodel.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _stupidBackoffSmoothingModel struct { + v *types.StupidBackoffSmoothingModel +} + +// A simple backoff model that backs off to lower order n-gram models if the +// higher order count is `0` and discounts the lower order n-gram model by a +// constant factor. +func NewStupidBackoffSmoothingModel(discount types.Float64) *_stupidBackoffSmoothingModel { + + tmp := &_stupidBackoffSmoothingModel{v: types.NewStupidBackoffSmoothingModel()} + + tmp.Discount(discount) + + return tmp + +} + +// A constant factor that the lower order n-gram model is discounted by. +func (s *_stupidBackoffSmoothingModel) Discount(discount types.Float64) *_stupidBackoffSmoothingModel { + + s.v.Discount = discount + + return s +} + +func (s *_stupidBackoffSmoothingModel) SmoothingModelContainerCaster() *types.SmoothingModelContainer { + container := types.NewSmoothingModelContainer() + + container.StupidBackoff = s.v + + return container +} + +func (s *_stupidBackoffSmoothingModel) StupidBackoffSmoothingModelCaster() *types.StupidBackoffSmoothingModel { + return s.v +} diff --git a/typedapi/esdsl/suggestcontext.go b/typedapi/esdsl/suggestcontext.go new file mode 100644 index 0000000000..15a0b2d76f --- /dev/null +++ b/typedapi/esdsl/suggestcontext.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _suggestContext struct { + v *types.SuggestContext +} + +func NewSuggestContext(type_ string) *_suggestContext { + + tmp := &_suggestContext{v: types.NewSuggestContext()} + + tmp.Type(type_) + + return tmp + +} + +func (s *_suggestContext) Name(name string) *_suggestContext { + + s.v.Name = name + + return s +} + +func (s *_suggestContext) Path(field string) *_suggestContext { + + s.v.Path = &field + + return s +} + +func (s *_suggestContext) Precision(precision string) *_suggestContext { + + s.v.Precision = precision + + return s +} + +func (s *_suggestContext) Type(type_ string) *_suggestContext { + + s.v.Type = type_ + + return s +} + +func (s *_suggestContext) SuggestContextCaster() *types.SuggestContext { + return s.v +} diff --git a/typedapi/esdsl/suggester.go b/typedapi/esdsl/suggester.go new file mode 100644 index 0000000000..0bb3598396 --- /dev/null +++ b/typedapi/esdsl/suggester.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _suggester struct { + v *types.Suggester +} + +func NewSuggester() *_suggester { + + return &_suggester{v: types.NewSuggester()} + +} + +func (s *_suggester) Suggesters(suggesters map[string]types.FieldSuggester) *_suggester { + + s.v.Suggesters = suggesters + return s +} + +func (s *_suggester) AddSuggester(key string, value types.FieldSuggesterVariant) *_suggester { + + var tmp map[string]types.FieldSuggester + if s.v.Suggesters == nil { + s.v.Suggesters = make(map[string]types.FieldSuggester) + } else { + tmp = s.v.Suggesters + } + + tmp[key] = *value.FieldSuggesterCaster() + + s.v.Suggesters = tmp + return s +} + +// Global suggest text, to avoid repetition when the same text is used in +// several suggesters +func (s *_suggester) Text(text string) *_suggester { + + s.v.Text = &text + + return s +} + +func (s *_suggester) SuggesterCaster() *types.Suggester { + return s.v +} diff --git a/typedapi/esdsl/suggestfuzziness.go b/typedapi/esdsl/suggestfuzziness.go new file mode 100644 index 0000000000..f50bbaaf5e --- /dev/null +++ b/typedapi/esdsl/suggestfuzziness.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _suggestFuzziness struct { + v *types.SuggestFuzziness +} + +func NewSuggestFuzziness() *_suggestFuzziness { + + return &_suggestFuzziness{v: types.NewSuggestFuzziness()} + +} + +// The fuzziness factor. +func (s *_suggestFuzziness) Fuzziness(fuzziness types.FuzzinessVariant) *_suggestFuzziness { + + s.v.Fuzziness = *fuzziness.FuzzinessCaster() + + return s +} + +// Minimum length of the input before fuzzy suggestions are returned. +func (s *_suggestFuzziness) MinLength(minlength int) *_suggestFuzziness { + + s.v.MinLength = &minlength + + return s +} + +// Minimum length of the input, which is not checked for fuzzy alternatives. +func (s *_suggestFuzziness) PrefixLength(prefixlength int) *_suggestFuzziness { + + s.v.PrefixLength = &prefixlength + + return s +} + +// If set to `true`, transpositions are counted as one change instead of two. +func (s *_suggestFuzziness) Transpositions(transpositions bool) *_suggestFuzziness { + + s.v.Transpositions = &transpositions + + return s +} + +// If `true`, all measurements (like fuzzy edit distance, transpositions, and +// lengths) are measured in Unicode code points instead of in bytes. +// This is slightly slower than raw bytes. +func (s *_suggestFuzziness) UnicodeAware(unicodeaware bool) *_suggestFuzziness { + + s.v.UnicodeAware = &unicodeaware + + return s +} + +func (s *_suggestFuzziness) SuggestFuzzinessCaster() *types.SuggestFuzziness { + return s.v +} diff --git a/typedapi/esdsl/sumaggregation.go b/typedapi/esdsl/sumaggregation.go new file mode 100644 index 0000000000..f39416c643 --- /dev/null +++ b/typedapi/esdsl/sumaggregation.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _sumAggregation struct { + v *types.SumAggregation +} + +// A single-value metrics aggregation that sums numeric values that are +// extracted from the aggregated documents. +func NewSumAggregation() *_sumAggregation { + + return &_sumAggregation{v: types.NewSumAggregation()} + +} + +// The field on which to run the aggregation. +func (s *_sumAggregation) Field(field string) *_sumAggregation { + + s.v.Field = &field + + return s +} + +func (s *_sumAggregation) Format(format string) *_sumAggregation { + + s.v.Format = &format + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_sumAggregation) Missing(missing types.MissingVariant) *_sumAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_sumAggregation) Script(script types.ScriptVariant) *_sumAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_sumAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Sum = s.v + + return container +} + +func (s *_sumAggregation) SumAggregationCaster() *types.SumAggregation { + return s.v +} diff --git a/typedapi/esdsl/sumbucketaggregation.go b/typedapi/esdsl/sumbucketaggregation.go new file mode 100644 index 0000000000..2b41a7a493 --- /dev/null +++ b/typedapi/esdsl/sumbucketaggregation.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" +) + +type _sumBucketAggregation struct { + v *types.SumBucketAggregation +} + +// A sibling pipeline aggregation which calculates the sum of a specified metric +// across all buckets in a sibling aggregation. +func NewSumBucketAggregation() *_sumBucketAggregation { + + return &_sumBucketAggregation{v: types.NewSumBucketAggregation()} + +} + +// Path to the buckets that contain one set of values to correlate. +func (s *_sumBucketAggregation) BucketsPath(bucketspath types.BucketsPathVariant) *_sumBucketAggregation { + + s.v.BucketsPath = *bucketspath.BucketsPathCaster() + + return s +} + +// `DecimalFormat` pattern for the output value. +// If specified, the formatted value is returned in the aggregation’s +// `value_as_string` property. +func (s *_sumBucketAggregation) Format(format string) *_sumBucketAggregation { + + s.v.Format = &format + + return s +} + +// Policy to apply when gaps are found in the data. +func (s *_sumBucketAggregation) GapPolicy(gappolicy gappolicy.GapPolicy) *_sumBucketAggregation { + + s.v.GapPolicy = &gappolicy + return s +} + +func (s *_sumBucketAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.SumBucket = s.v + + return container +} + +func (s *_sumBucketAggregation) SumBucketAggregationCaster() *types.SumBucketAggregation { + return s.v +} diff --git a/typedapi/esdsl/swedishanalyzer.go b/typedapi/esdsl/swedishanalyzer.go new file mode 100644 index 0000000000..d33c1f2ceb --- /dev/null +++ b/typedapi/esdsl/swedishanalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _swedishAnalyzer struct { + v *types.SwedishAnalyzer +} + +func NewSwedishAnalyzer() *_swedishAnalyzer { + + return &_swedishAnalyzer{v: types.NewSwedishAnalyzer()} + +} + +func (s *_swedishAnalyzer) StemExclusion(stemexclusions ...string) *_swedishAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_swedishAnalyzer) Stopwords(stopwords ...string) *_swedishAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_swedishAnalyzer) StopwordsPath(stopwordspath string) *_swedishAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_swedishAnalyzer) SwedishAnalyzerCaster() *types.SwedishAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/synccontainer.go b/typedapi/esdsl/synccontainer.go new file mode 100644 index 0000000000..7046df2b4e --- /dev/null +++ b/typedapi/esdsl/synccontainer.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _syncContainer struct { + v *types.SyncContainer +} + +func NewSyncContainer() *_syncContainer { + return &_syncContainer{v: types.NewSyncContainer()} +} + +// AdditionalSyncContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_syncContainer) AdditionalSyncContainerProperty(key string, value json.RawMessage) *_syncContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalSyncContainerProperty = tmp + return s +} + +// Specifies that the transform uses a time field to synchronize the source and +// destination indices. +func (s *_syncContainer) Time(time types.TimeSyncVariant) *_syncContainer { + + s.v.Time = time.TimeSyncCaster() + + return s +} + +func (s *_syncContainer) SyncContainerCaster() *types.SyncContainer { + return s.v +} diff --git a/typedapi/esdsl/syncrulesfeature.go b/typedapi/esdsl/syncrulesfeature.go new file mode 100644 index 0000000000..15739074c3 --- /dev/null +++ b/typedapi/esdsl/syncrulesfeature.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _syncRulesFeature struct { + v *types.SyncRulesFeature +} + +func NewSyncRulesFeature() *_syncRulesFeature { + + return &_syncRulesFeature{v: types.NewSyncRulesFeature()} + +} + +// Indicates whether advanced sync rules are enabled. +func (s *_syncRulesFeature) Advanced(advanced types.FeatureEnabledVariant) *_syncRulesFeature { + + s.v.Advanced = advanced.FeatureEnabledCaster() + + return s +} + +// Indicates whether basic sync rules are enabled. +func (s *_syncRulesFeature) Basic(basic types.FeatureEnabledVariant) *_syncRulesFeature { + + s.v.Basic = basic.FeatureEnabledCaster() + + return s +} + +func (s *_syncRulesFeature) SyncRulesFeatureCaster() *types.SyncRulesFeature { + return s.v +} diff --git a/typedapi/esdsl/synonymgraphtokenfilter.go b/typedapi/esdsl/synonymgraphtokenfilter.go new file mode 100644 index 0000000000..068417548a --- /dev/null +++ b/typedapi/esdsl/synonymgraphtokenfilter.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/synonymformat" +) + +type _synonymGraphTokenFilter struct { + v *types.SynonymGraphTokenFilter +} + +func NewSynonymGraphTokenFilter() *_synonymGraphTokenFilter { + + return &_synonymGraphTokenFilter{v: types.NewSynonymGraphTokenFilter()} + +} + +func (s *_synonymGraphTokenFilter) Expand(expand bool) *_synonymGraphTokenFilter { + + s.v.Expand = &expand + + return s +} + +func (s *_synonymGraphTokenFilter) Format(format synonymformat.SynonymFormat) *_synonymGraphTokenFilter { + + s.v.Format = &format + return s +} + +func (s *_synonymGraphTokenFilter) Lenient(lenient bool) *_synonymGraphTokenFilter { + + s.v.Lenient = &lenient + + return s +} + +func (s *_synonymGraphTokenFilter) Synonyms(synonyms ...string) *_synonymGraphTokenFilter { + + for _, v := range synonyms { + + s.v.Synonyms = append(s.v.Synonyms, v) + + } + return s +} + +func (s *_synonymGraphTokenFilter) SynonymsPath(synonymspath string) *_synonymGraphTokenFilter { + + s.v.SynonymsPath = &synonymspath + + return s +} + +func (s *_synonymGraphTokenFilter) SynonymsSet(synonymsset string) *_synonymGraphTokenFilter { + + s.v.SynonymsSet = &synonymsset + + return s +} + +func (s *_synonymGraphTokenFilter) Tokenizer(tokenizer string) *_synonymGraphTokenFilter { + + s.v.Tokenizer = &tokenizer + + return s +} + +func (s *_synonymGraphTokenFilter) Updateable(updateable bool) *_synonymGraphTokenFilter { + + s.v.Updateable = &updateable + + return s +} + +func (s *_synonymGraphTokenFilter) Version(versionstring string) *_synonymGraphTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_synonymGraphTokenFilter) SynonymGraphTokenFilterCaster() *types.SynonymGraphTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/synonymrule.go b/typedapi/esdsl/synonymrule.go new file mode 100644 index 0000000000..e3266c669d --- /dev/null +++ b/typedapi/esdsl/synonymrule.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _synonymRule struct { + v *types.SynonymRule +} + +func NewSynonymRule() *_synonymRule { + + return &_synonymRule{v: types.NewSynonymRule()} + +} + +// The identifier for the synonym rule. +// If you do not specify a synonym rule ID when you create a rule, an identifier +// is created automatically by Elasticsearch. +func (s *_synonymRule) Id(id string) *_synonymRule { + + s.v.Id = &id + + return s +} + +// The synonyms that conform the synonym rule in Solr format. +func (s *_synonymRule) Synonyms(synonymstring string) *_synonymRule { + + s.v.Synonyms = synonymstring + + return s +} + +func (s *_synonymRule) SynonymRuleCaster() *types.SynonymRule { + return s.v +} diff --git a/typedapi/esdsl/synonymtokenfilter.go b/typedapi/esdsl/synonymtokenfilter.go new file mode 100644 index 0000000000..90a5bbe4b1 --- /dev/null +++ b/typedapi/esdsl/synonymtokenfilter.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/synonymformat" +) + +type _synonymTokenFilter struct { + v *types.SynonymTokenFilter +} + +func NewSynonymTokenFilter() *_synonymTokenFilter { + + return &_synonymTokenFilter{v: types.NewSynonymTokenFilter()} + +} + +func (s *_synonymTokenFilter) Expand(expand bool) *_synonymTokenFilter { + + s.v.Expand = &expand + + return s +} + +func (s *_synonymTokenFilter) Format(format synonymformat.SynonymFormat) *_synonymTokenFilter { + + s.v.Format = &format + return s +} + +func (s *_synonymTokenFilter) Lenient(lenient bool) *_synonymTokenFilter { + + s.v.Lenient = &lenient + + return s +} + +func (s *_synonymTokenFilter) Synonyms(synonyms ...string) *_synonymTokenFilter { + + for _, v := range synonyms { + + s.v.Synonyms = append(s.v.Synonyms, v) + + } + return s +} + +func (s *_synonymTokenFilter) SynonymsPath(synonymspath string) *_synonymTokenFilter { + + s.v.SynonymsPath = &synonymspath + + return s +} + +func (s *_synonymTokenFilter) SynonymsSet(synonymsset string) *_synonymTokenFilter { + + s.v.SynonymsSet = &synonymsset + + return s +} + +func (s *_synonymTokenFilter) Tokenizer(tokenizer string) *_synonymTokenFilter { + + s.v.Tokenizer = &tokenizer + + return s +} + +func (s *_synonymTokenFilter) Updateable(updateable bool) *_synonymTokenFilter { + + s.v.Updateable = &updateable + + return s +} + +func (s *_synonymTokenFilter) Version(versionstring string) *_synonymTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_synonymTokenFilter) SynonymTokenFilterCaster() *types.SynonymTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/tablevaluescontainer.go b/typedapi/esdsl/tablevaluescontainer.go new file mode 100644 index 0000000000..b53d57de0b --- /dev/null +++ b/typedapi/esdsl/tablevaluescontainer.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _tableValuesContainer struct { + v *types.TableValuesContainer +} + +func NewTableValuesContainer() *_tableValuesContainer { + return &_tableValuesContainer{v: types.NewTableValuesContainer()} +} + +// AdditionalTableValuesContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_tableValuesContainer) AdditionalTableValuesContainerProperty(key string, value json.RawMessage) *_tableValuesContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalTableValuesContainerProperty = tmp + return s +} + +func (s *_tableValuesContainer) Float64(float64s ...[]types.Float64) *_tableValuesContainer { + + for _, v := range float64s { + + s.v.Float64 = append(s.v.Float64, v) + + } + return s +} + +func (s *_tableValuesContainer) Int(ints ...[]int) *_tableValuesContainer { + + for _, v := range ints { + + s.v.Int = append(s.v.Int, v) + + } + return s +} + +func (s *_tableValuesContainer) Int64(int64s ...[]int64) *_tableValuesContainer { + + for _, v := range int64s { + + s.v.Int64 = append(s.v.Int64, v) + + } + return s +} + +func (s *_tableValuesContainer) Keyword(keywords ...[]string) *_tableValuesContainer { + + for _, v := range keywords { + + s.v.Keyword = append(s.v.Keyword, v) + + } + return s +} + +func (s *_tableValuesContainer) TableValuesContainerCaster() *types.TableValuesContainer { + return s.v +} diff --git a/typedapi/esdsl/tablevaluesintegervalue.go b/typedapi/esdsl/tablevaluesintegervalue.go new file mode 100644 index 0000000000..7995af0fc6 --- /dev/null +++ b/typedapi/esdsl/tablevaluesintegervalue.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _tableValuesIntegerValue struct { + v types.TableValuesIntegerValue +} + +func NewTableValuesIntegerValue() *_tableValuesIntegerValue { + return &_tableValuesIntegerValue{v: []int{}} +} + +func (u *_tableValuesIntegerValue) TableValuesIntegerValueCaster() *types.TableValuesIntegerValue { + return &u.v +} diff --git a/typedapi/esdsl/tablevalueskeywordvalue.go b/typedapi/esdsl/tablevalueskeywordvalue.go new file mode 100644 index 0000000000..b58866dcc1 --- /dev/null +++ b/typedapi/esdsl/tablevalueskeywordvalue.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _tableValuesKeywordValue struct { + v types.TableValuesKeywordValue +} + +func NewTableValuesKeywordValue() *_tableValuesKeywordValue { + return &_tableValuesKeywordValue{v: nil} +} + +func (u *_tableValuesKeywordValue) Strings(strings ...string) *_tableValuesKeywordValue { + + u.v = make([]string, len(strings)) + u.v = strings + + return u +} + +func (u *_tableValuesKeywordValue) TableValuesKeywordValueCaster() *types.TableValuesKeywordValue { + return &u.v +} diff --git a/typedapi/esdsl/tablevalueslongdouble.go b/typedapi/esdsl/tablevalueslongdouble.go new file mode 100644 index 0000000000..712f3b4bea --- /dev/null +++ b/typedapi/esdsl/tablevalueslongdouble.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _tableValuesLongDouble struct { + v types.TableValuesLongDouble +} + +func NewTableValuesLongDouble() *_tableValuesLongDouble { + return &_tableValuesLongDouble{v: []types.Float64{}} +} + +func (u *_tableValuesLongDouble) TableValuesLongDoubleCaster() *types.TableValuesLongDouble { + return &u.v +} diff --git a/typedapi/esdsl/tablevalueslongvalue.go b/typedapi/esdsl/tablevalueslongvalue.go new file mode 100644 index 0000000000..23df236cca --- /dev/null +++ b/typedapi/esdsl/tablevalueslongvalue.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _tableValuesLongValue struct { + v types.TableValuesLongValue +} + +func NewTableValuesLongValue() *_tableValuesLongValue { + return &_tableValuesLongValue{v: []int64{}} +} + +func (u *_tableValuesLongValue) TableValuesLongValueCaster() *types.TableValuesLongValue { + return &u.v +} diff --git a/typedapi/esdsl/targetmeanencodingpreprocessor.go b/typedapi/esdsl/targetmeanencodingpreprocessor.go new file mode 100644 index 0000000000..d586eefab4 --- /dev/null +++ b/typedapi/esdsl/targetmeanencodingpreprocessor.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _targetMeanEncodingPreprocessor struct { + v *types.TargetMeanEncodingPreprocessor +} + +func NewTargetMeanEncodingPreprocessor(defaultvalue types.Float64, featurename string, field string) *_targetMeanEncodingPreprocessor { + + tmp := &_targetMeanEncodingPreprocessor{v: types.NewTargetMeanEncodingPreprocessor()} + + tmp.DefaultValue(defaultvalue) + + tmp.FeatureName(featurename) + + tmp.Field(field) + + return tmp + +} + +func (s *_targetMeanEncodingPreprocessor) DefaultValue(defaultvalue types.Float64) *_targetMeanEncodingPreprocessor { + + s.v.DefaultValue = defaultvalue + + return s +} + +func (s *_targetMeanEncodingPreprocessor) FeatureName(featurename string) *_targetMeanEncodingPreprocessor { + + s.v.FeatureName = featurename + + return s +} + +func (s *_targetMeanEncodingPreprocessor) Field(field string) *_targetMeanEncodingPreprocessor { + + s.v.Field = field + + return s +} + +func (s *_targetMeanEncodingPreprocessor) TargetMap(targetmap map[string]types.Float64) *_targetMeanEncodingPreprocessor { + + s.v.TargetMap = targetmap + return s +} + +func (s *_targetMeanEncodingPreprocessor) AddTargetMap(key string, value types.Float64) *_targetMeanEncodingPreprocessor { + + var tmp map[string]types.Float64 + if s.v.TargetMap == nil { + s.v.TargetMap = make(map[string]types.Float64) + } else { + tmp = s.v.TargetMap + } + + tmp[key] = value + + s.v.TargetMap = tmp + return s +} + +func (s *_targetMeanEncodingPreprocessor) PreprocessorCaster() *types.Preprocessor { + container := types.NewPreprocessor() + + container.TargetMeanEncoding = s.v + + return container +} + +func (s *_targetMeanEncodingPreprocessor) TargetMeanEncodingPreprocessorCaster() *types.TargetMeanEncodingPreprocessor { + return s.v +} diff --git a/typedapi/esdsl/tdigest.go b/typedapi/esdsl/tdigest.go new file mode 100644 index 0000000000..9809ad7d26 --- /dev/null +++ b/typedapi/esdsl/tdigest.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _tDigest struct { + v *types.TDigest +} + +func NewTDigest() *_tDigest { + + return &_tDigest{v: types.NewTDigest()} + +} + +// Limits the maximum number of nodes used by the underlying TDigest algorithm +// to `20 * compression`, enabling control of memory usage and approximation +// error. +func (s *_tDigest) Compression(compression int) *_tDigest { + + s.v.Compression = &compression + + return s +} + +func (s *_tDigest) TDigestCaster() *types.TDigest { + return s.v +} diff --git a/typedapi/esdsl/templateconfig.go b/typedapi/esdsl/templateconfig.go new file mode 100644 index 0000000000..f24c69d5e0 --- /dev/null +++ b/typedapi/esdsl/templateconfig.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _templateConfig struct { + v *types.TemplateConfig +} + +func NewTemplateConfig() *_templateConfig { + + return &_templateConfig{v: types.NewTemplateConfig()} + +} + +// If `true`, returns detailed information about score calculation as part of +// each hit. +func (s *_templateConfig) Explain(explain bool) *_templateConfig { + + s.v.Explain = &explain + + return s +} + +// The ID of the search template to use. If no `source` is specified, +// this parameter is required. +func (s *_templateConfig) Id(id string) *_templateConfig { + + s.v.Id = &id + + return s +} + +// Key-value pairs used to replace Mustache variables in the template. +// The key is the variable name. +// The value is the variable value. +func (s *_templateConfig) Params(params map[string]json.RawMessage) *_templateConfig { + + s.v.Params = params + return s +} + +func (s *_templateConfig) AddParam(key string, value json.RawMessage) *_templateConfig { + + var tmp map[string]json.RawMessage + if s.v.Params == nil { + s.v.Params = make(map[string]json.RawMessage) + } else { + tmp = s.v.Params + } + + tmp[key] = value + + s.v.Params = tmp + return s +} + +// If `true`, the query execution is profiled. +func (s *_templateConfig) Profile(profile bool) *_templateConfig { + + s.v.Profile = &profile + + return s +} + +// An inline search template. Supports the same parameters as the search API's +// request body. It also supports Mustache variables. If no `id` is specified, +// this +// parameter is required. +func (s *_templateConfig) Source(source string) *_templateConfig { + + s.v.Source = &source + + return s +} + +func (s *_templateConfig) TemplateConfigCaster() *types.TemplateConfig { + return s.v +} diff --git a/typedapi/esdsl/terminateprocessor.go b/typedapi/esdsl/terminateprocessor.go new file mode 100644 index 0000000000..c69816edde --- /dev/null +++ b/typedapi/esdsl/terminateprocessor.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _terminateProcessor struct { + v *types.TerminateProcessor +} + +// Terminates the current ingest pipeline, causing no further processors to be +// run. +// This will normally be executed conditionally, using the `if` option. +func NewTerminateProcessor() *_terminateProcessor { + + return &_terminateProcessor{v: types.NewTerminateProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_terminateProcessor) Description(description string) *_terminateProcessor { + + s.v.Description = &description + + return s +} + +// Conditionally execute the processor. +func (s *_terminateProcessor) If(if_ types.ScriptVariant) *_terminateProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_terminateProcessor) IgnoreFailure(ignorefailure bool) *_terminateProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// Handle failures for the processor. +func (s *_terminateProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_terminateProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_terminateProcessor) Tag(tag string) *_terminateProcessor { + + s.v.Tag = &tag + + return s +} + +func (s *_terminateProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Terminate = s.v + + return container +} + +func (s *_terminateProcessor) TerminateProcessorCaster() *types.TerminateProcessor { + return s.v +} diff --git a/typedapi/esdsl/termquery.go b/typedapi/esdsl/termquery.go new file mode 100644 index 0000000000..190e0f45a1 --- /dev/null +++ b/typedapi/esdsl/termquery.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _termQuery struct { + k string + v *types.TermQuery +} + +// Returns roles that contain an exact term in a provided field. +// To return a document, the query term must exactly match the queried field's +// value, including whitespace and capitalization. +func NewTermQuery(field string, value types.FieldValueVariant) *_termQuery { + tmp := &_termQuery{ + k: field, + v: types.NewTermQuery(), + } + + tmp.Value(value) + return tmp +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_termQuery) Boost(boost float32) *_termQuery { + + s.v.Boost = &boost + + return s +} + +// Allows ASCII case insensitive matching of the value with the indexed field +// values when set to `true`. +// When `false`, the case sensitivity of matching depends on the underlying +// field’s mapping. +func (s *_termQuery) CaseInsensitive(caseinsensitive bool) *_termQuery { + + s.v.CaseInsensitive = &caseinsensitive + + return s +} + +func (s *_termQuery) QueryName_(queryname_ string) *_termQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Term you wish to find in the provided field. +func (s *_termQuery) Value(fieldvalue types.FieldValueVariant) *_termQuery { + + s.v.Value = *fieldvalue.FieldValueCaster() + + return s +} + +func (s *_termQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.Term = map[string]types.TermQuery{ + s.k: *s.v, + } + return container +} + +func (s *_termQuery) ApiKeyQueryContainerCaster() *types.ApiKeyQueryContainer { + container := types.NewApiKeyQueryContainer() + container.Term = map[string]types.TermQuery{ + s.k: *s.v, + } + return container +} + +func (s *_termQuery) RoleQueryContainerCaster() *types.RoleQueryContainer { + container := types.NewRoleQueryContainer() + container.Term = map[string]types.TermQuery{ + s.k: *s.v, + } + return container +} + +func (s *_termQuery) UserQueryContainerCaster() *types.UserQueryContainer { + container := types.NewUserQueryContainer() + container.Term = map[string]types.TermQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleTermQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleTermQuery() *_termQuery { + return &_termQuery{ + k: "", + v: types.NewTermQuery(), + } +} + +func (s *_termQuery) TermQueryCaster() *types.TermQuery { + return s.v.TermQueryCaster() +} diff --git a/typedapi/esdsl/termrangequery.go b/typedapi/esdsl/termrangequery.go new file mode 100644 index 0000000000..f39eccf73e --- /dev/null +++ b/typedapi/esdsl/termrangequery.go @@ -0,0 +1,157 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation" +) + +type _termRangeQuery struct { + k string + v *types.TermRangeQuery +} + +// Returns roles that contain terms within a provided range. +func NewTermRangeQuery(key string) *_termRangeQuery { + return &_termRangeQuery{ + k: key, + v: types.NewTermRangeQuery(), + } +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_termRangeQuery) Boost(boost float32) *_termRangeQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_termRangeQuery) From(from string) *_termRangeQuery { + + s.v.From = &from + + return s +} + +// Greater than. +func (s *_termRangeQuery) Gt(gt string) *_termRangeQuery { + + s.v.Gt = > + + return s +} + +// Greater than or equal to. +func (s *_termRangeQuery) Gte(gte string) *_termRangeQuery { + + s.v.Gte = >e + + return s +} + +// Less than. +func (s *_termRangeQuery) Lt(lt string) *_termRangeQuery { + + s.v.Lt = < + + return s +} + +// Less than or equal to. +func (s *_termRangeQuery) Lte(lte string) *_termRangeQuery { + + s.v.Lte = <e + + return s +} + +func (s *_termRangeQuery) QueryName_(queryname_ string) *_termRangeQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Indicates how the range query matches values for `range` fields. +func (s *_termRangeQuery) Relation(relation rangerelation.RangeRelation) *_termRangeQuery { + + s.v.Relation = &relation + return s +} + +func (s *_termRangeQuery) To(to string) *_termRangeQuery { + + s.v.To = &to + + return s +} + +func (s *_termRangeQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.Range = map[string]types.RangeQuery{ + s.k: *s.v, + } + return container +} + +func (s *_termRangeQuery) ApiKeyQueryContainerCaster() *types.ApiKeyQueryContainer { + container := types.NewApiKeyQueryContainer() + container.Range = map[string]types.RangeQuery{ + s.k: *s.v, + } + return container +} + +func (s *_termRangeQuery) RoleQueryContainerCaster() *types.RoleQueryContainer { + container := types.NewRoleQueryContainer() + container.Range = map[string]types.RangeQuery{ + s.k: *s.v, + } + return container +} + +func (s *_termRangeQuery) UserQueryContainerCaster() *types.UserQueryContainer { + container := types.NewUserQueryContainer() + container.Range = map[string]types.RangeQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleTermRangeQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleTermRangeQuery() *_termRangeQuery { + return &_termRangeQuery{ + k: "", + v: types.NewTermRangeQuery(), + } +} + +func (s *_termRangeQuery) TermRangeQueryCaster() *types.TermRangeQuery { + return s.v.TermRangeQueryCaster() +} diff --git a/typedapi/esdsl/termsaggregation.go b/typedapi/esdsl/termsaggregation.go new file mode 100644 index 0000000000..bb4a71bb1f --- /dev/null +++ b/typedapi/esdsl/termsaggregation.go @@ -0,0 +1,209 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/missingorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationcollectmode" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationexecutionhint" +) + +type _termsAggregation struct { + v *types.TermsAggregation +} + +// A multi-bucket value source based aggregation where buckets are dynamically +// built - one per unique value. +func NewTermsAggregation() *_termsAggregation { + + return &_termsAggregation{v: types.NewTermsAggregation()} + +} + +// Determines how child aggregations should be calculated: breadth-first or +// depth-first. +func (s *_termsAggregation) CollectMode(collectmode termsaggregationcollectmode.TermsAggregationCollectMode) *_termsAggregation { + + s.v.CollectMode = &collectmode + return s +} + +// Values to exclude. +// Accepts regular expressions and partitions. +func (s *_termsAggregation) Exclude(termsexcludes ...string) *_termsAggregation { + + s.v.Exclude = termsexcludes + + return s +} + +// Determines whether the aggregation will use field values directly or global +// ordinals. +func (s *_termsAggregation) ExecutionHint(executionhint termsaggregationexecutionhint.TermsAggregationExecutionHint) *_termsAggregation { + + s.v.ExecutionHint = &executionhint + return s +} + +// The field from which to return terms. +func (s *_termsAggregation) Field(field string) *_termsAggregation { + + s.v.Field = &field + + return s +} + +func (s *_termsAggregation) Format(format string) *_termsAggregation { + + s.v.Format = &format + + return s +} + +// Values to include. +// Accepts regular expressions and partitions. +func (s *_termsAggregation) Include(termsinclude types.TermsIncludeVariant) *_termsAggregation { + + s.v.Include = *termsinclude.TermsIncludeCaster() + + return s +} + +// Only return values that are found in more than `min_doc_count` hits. +func (s *_termsAggregation) MinDocCount(mindoccount int) *_termsAggregation { + + s.v.MinDocCount = &mindoccount + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_termsAggregation) Missing(missing types.MissingVariant) *_termsAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_termsAggregation) MissingBucket(missingbucket bool) *_termsAggregation { + + s.v.MissingBucket = &missingbucket + + return s +} + +func (s *_termsAggregation) MissingOrder(missingorder missingorder.MissingOrder) *_termsAggregation { + + s.v.MissingOrder = &missingorder + return s +} + +// Specifies the sort order of the buckets. +// Defaults to sorting by descending document count. +func (s *_termsAggregation) Order(aggregateorder types.AggregateOrderVariant) *_termsAggregation { + + s.v.Order = *aggregateorder.AggregateOrderCaster() + + return s +} + +func (s *_termsAggregation) Script(script types.ScriptVariant) *_termsAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +// Regulates the certainty a shard has if the term should actually be added to +// the candidate list or not with respect to the `min_doc_count`. +// Terms will only be considered if their local shard frequency within the set +// is higher than the `shard_min_doc_count`. +func (s *_termsAggregation) ShardMinDocCount(shardmindoccount int64) *_termsAggregation { + + s.v.ShardMinDocCount = &shardmindoccount + + return s +} + +// The number of candidate terms produced by each shard. +// By default, `shard_size` will be automatically estimated based on the number +// of shards and the `size` parameter. +func (s *_termsAggregation) ShardSize(shardsize int) *_termsAggregation { + + s.v.ShardSize = &shardsize + + return s +} + +// Set to `true` to return the `doc_count_error_upper_bound`, which is an upper +// bound to the error on the `doc_count` returned by each shard. +func (s *_termsAggregation) ShowTermDocCountError(showtermdoccounterror bool) *_termsAggregation { + + s.v.ShowTermDocCountError = &showtermdoccounterror + + return s +} + +// The number of buckets returned out of the overall terms list. +func (s *_termsAggregation) Size(size int) *_termsAggregation { + + s.v.Size = &size + + return s +} + +// Coerced unmapped fields into the specified type. +func (s *_termsAggregation) ValueType(valuetype string) *_termsAggregation { + + s.v.ValueType = &valuetype + + return s +} + +func (s *_termsAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.Terms = s.v + + return container +} + +func (s *_termsAggregation) ApiKeyAggregationContainerCaster() *types.ApiKeyAggregationContainer { + container := types.NewApiKeyAggregationContainer() + + container.Terms = s.v + + return container +} + +func (s *_termsAggregation) PivotGroupByContainerCaster() *types.PivotGroupByContainer { + container := types.NewPivotGroupByContainer() + + container.Terms = s.v + + return container +} + +func (s *_termsAggregation) TermsAggregationCaster() *types.TermsAggregation { + return s.v +} diff --git a/typedapi/esdsl/termsexclude.go b/typedapi/esdsl/termsexclude.go new file mode 100644 index 0000000000..74358ab3e4 --- /dev/null +++ b/typedapi/esdsl/termsexclude.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _termsExclude struct { + v types.TermsExclude +} + +func NewTermsExclude() *_termsExclude { + return &_termsExclude{v: nil} +} + +func (u *_termsExclude) Strings(strings ...string) *_termsExclude { + + u.v = make([]string, len(strings)) + u.v = strings + + return u +} + +func (u *_termsExclude) TermsExcludeCaster() *types.TermsExclude { + return &u.v +} diff --git a/typedapi/esdsl/termsgrouping.go b/typedapi/esdsl/termsgrouping.go new file mode 100644 index 0000000000..a2d939a982 --- /dev/null +++ b/typedapi/esdsl/termsgrouping.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _termsGrouping struct { + v *types.TermsGrouping +} + +func NewTermsGrouping() *_termsGrouping { + + return &_termsGrouping{v: types.NewTermsGrouping()} + +} + +// The set of fields that you wish to collect terms for. +// This array can contain fields that are both keyword and numerics. +// Order does not matter. +func (s *_termsGrouping) Fields(fields ...string) *_termsGrouping { + + s.v.Fields = fields + + return s +} + +func (s *_termsGrouping) TermsGroupingCaster() *types.TermsGrouping { + return s.v +} diff --git a/typedapi/esdsl/termsinclude.go b/typedapi/esdsl/termsinclude.go new file mode 100644 index 0000000000..33c8e86d70 --- /dev/null +++ b/typedapi/esdsl/termsinclude.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _termsInclude struct { + v types.TermsInclude +} + +func NewTermsInclude() *_termsInclude { + return &_termsInclude{v: nil} +} + +func (u *_termsInclude) String(string string) *_termsInclude { + + u.v = &string + + return u +} + +func (u *_termsInclude) Strings(strings ...string) *_termsInclude { + + u.v = make([]string, len(strings)) + u.v = strings + + return u +} + +func (u *_termsInclude) TermsPartition(termspartition types.TermsPartitionVariant) *_termsInclude { + + u.v = &termspartition + + return u +} + +// Interface implementation for TermsPartition in TermsInclude union +func (u *_termsPartition) TermsIncludeCaster() *types.TermsInclude { + t := types.TermsInclude(u.v) + return &t +} + +func (u *_termsInclude) TermsIncludeCaster() *types.TermsInclude { + return &u.v +} diff --git a/typedapi/esdsl/termslookup.go b/typedapi/esdsl/termslookup.go new file mode 100644 index 0000000000..4624a357af --- /dev/null +++ b/typedapi/esdsl/termslookup.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _termsLookup struct { + v *types.TermsLookup +} + +func NewTermsLookup() *_termsLookup { + + return &_termsLookup{v: types.NewTermsLookup()} + +} + +func (s *_termsLookup) Id(id string) *_termsLookup { + + s.v.Id = id + + return s +} + +func (s *_termsLookup) Index(indexname string) *_termsLookup { + + s.v.Index = indexname + + return s +} + +func (s *_termsLookup) Path(field string) *_termsLookup { + + s.v.Path = field + + return s +} + +func (s *_termsLookup) Routing(routing string) *_termsLookup { + + s.v.Routing = &routing + + return s +} + +func (s *_termsLookup) TermsLookupCaster() *types.TermsLookup { + return s.v +} diff --git a/typedapi/esdsl/termspartition.go b/typedapi/esdsl/termspartition.go new file mode 100644 index 0000000000..0d95214ff9 --- /dev/null +++ b/typedapi/esdsl/termspartition.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _termsPartition struct { + v *types.TermsPartition +} + +func NewTermsPartition(numpartitions int64, partition int64) *_termsPartition { + + tmp := &_termsPartition{v: types.NewTermsPartition()} + + tmp.NumPartitions(numpartitions) + + tmp.Partition(partition) + + return tmp + +} + +// The number of partitions. +func (s *_termsPartition) NumPartitions(numpartitions int64) *_termsPartition { + + s.v.NumPartitions = numpartitions + + return s +} + +// The partition number for this request. +func (s *_termsPartition) Partition(partition int64) *_termsPartition { + + s.v.Partition = partition + + return s +} + +func (s *_termsPartition) TermsPartitionCaster() *types.TermsPartition { + return s.v +} diff --git a/typedapi/esdsl/termsquery.go b/typedapi/esdsl/termsquery.go new file mode 100644 index 0000000000..4caf680f2e --- /dev/null +++ b/typedapi/esdsl/termsquery.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _termsQuery struct { + v *types.TermsQuery +} + +// Returns users that contain one or more exact terms in a provided field. +// To return a document, one or more terms must exactly match a field value, +// including whitespace and capitalization. +func NewTermsQuery() *_termsQuery { + + return &_termsQuery{v: types.NewTermsQuery()} + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_termsQuery) Boost(boost float32) *_termsQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_termsQuery) QueryName_(queryname_ string) *_termsQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_termsQuery) TermsQuery(termsquery map[string]types.TermsQueryField) *_termsQuery { + + s.v.TermsQuery = termsquery + return s +} + +func (s *_termsQuery) AddTermsQuery(key string, value types.TermsQueryFieldVariant) *_termsQuery { + + var tmp map[string]types.TermsQueryField + if s.v.TermsQuery == nil { + s.v.TermsQuery = make(map[string]types.TermsQueryField) + } else { + tmp = s.v.TermsQuery + } + + tmp[key] = *value.TermsQueryFieldCaster() + + s.v.TermsQuery = tmp + return s +} + +func (s *_termsQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.Terms = s.v + + return container +} + +func (s *_termsQuery) ApiKeyQueryContainerCaster() *types.ApiKeyQueryContainer { + container := types.NewApiKeyQueryContainer() + + container.Terms = s.v + + return container +} + +func (s *_termsQuery) RoleQueryContainerCaster() *types.RoleQueryContainer { + container := types.NewRoleQueryContainer() + + container.Terms = s.v + + return container +} + +func (s *_termsQuery) UserQueryContainerCaster() *types.UserQueryContainer { + container := types.NewUserQueryContainer() + + container.Terms = s.v + + return container +} + +func (s *_termsQuery) TermsQueryCaster() *types.TermsQuery { + return s.v +} diff --git a/typedapi/esdsl/termsqueryfield.go b/typedapi/esdsl/termsqueryfield.go new file mode 100644 index 0000000000..b00e74b724 --- /dev/null +++ b/typedapi/esdsl/termsqueryfield.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _termsQueryField struct { + v types.TermsQueryField +} + +func NewTermsQueryField() *_termsQueryField { + return &_termsQueryField{v: nil} +} + +func (u *_termsQueryField) FieldValues(fieldvalues ...types.FieldValueVariant) *_termsQueryField { + + u.v = make([]types.FieldValue, len(fieldvalues)) + for i, v := range fieldvalues { + u.v.([]types.FieldValue)[i] = *v.FieldValueCaster() + } + + return u +} + +func (u *_termsQueryField) TermsLookup(termslookup types.TermsLookupVariant) *_termsQueryField { + + u.v = &termslookup + + return u +} + +// Interface implementation for TermsLookup in TermsQueryField union +func (u *_termsLookup) TermsQueryFieldCaster() *types.TermsQueryField { + t := types.TermsQueryField(u.v) + return &t +} + +func (u *_termsQueryField) TermsQueryFieldCaster() *types.TermsQueryField { + return &u.v +} diff --git a/typedapi/esdsl/termssetquery.go b/typedapi/esdsl/termssetquery.go new file mode 100644 index 0000000000..ea8f0d7570 --- /dev/null +++ b/typedapi/esdsl/termssetquery.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _termsSetQuery struct { + k string + v *types.TermsSetQuery +} + +// Returns documents that contain a minimum number of exact terms in a provided +// field. +// To return a document, a required number of terms must exactly match the field +// values, including whitespace and capitalization. +func NewTermsSetQuery(key string) *_termsSetQuery { + return &_termsSetQuery{ + k: key, + v: types.NewTermsSetQuery(), + } +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_termsSetQuery) Boost(boost float32) *_termsSetQuery { + + s.v.Boost = &boost + + return s +} + +// Specification describing number of matching terms required to return a +// document. +func (s *_termsSetQuery) MinimumShouldMatch(minimumshouldmatch types.MinimumShouldMatchVariant) *_termsSetQuery { + + s.v.MinimumShouldMatch = *minimumshouldmatch.MinimumShouldMatchCaster() + + return s +} + +// Numeric field containing the number of matching terms required to return a +// document. +func (s *_termsSetQuery) MinimumShouldMatchField(field string) *_termsSetQuery { + + s.v.MinimumShouldMatchField = &field + + return s +} + +// Custom script containing the number of matching terms required to return a +// document. +func (s *_termsSetQuery) MinimumShouldMatchScript(minimumshouldmatchscript types.ScriptVariant) *_termsSetQuery { + + s.v.MinimumShouldMatchScript = minimumshouldmatchscript.ScriptCaster() + + return s +} + +func (s *_termsSetQuery) QueryName_(queryname_ string) *_termsSetQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Array of terms you wish to find in the provided field. +func (s *_termsSetQuery) Terms(terms ...types.FieldValueVariant) *_termsSetQuery { + + for _, v := range terms { + + s.v.Terms = append(s.v.Terms, *v.FieldValueCaster()) + + } + return s +} + +func (s *_termsSetQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.TermsSet = map[string]types.TermsSetQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleTermsSetQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleTermsSetQuery() *_termsSetQuery { + return &_termsSetQuery{ + k: "", + v: types.NewTermsSetQuery(), + } +} + +func (s *_termsSetQuery) TermsSetQueryCaster() *types.TermsSetQuery { + return s.v.TermsSetQueryCaster() +} diff --git a/typedapi/esdsl/termsuggester.go b/typedapi/esdsl/termsuggester.go new file mode 100644 index 0000000000..f20b841d4f --- /dev/null +++ b/typedapi/esdsl/termsuggester.go @@ -0,0 +1,187 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/stringdistance" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestmode" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestsort" +) + +type _termSuggester struct { + v *types.TermSuggester +} + +// Suggests terms based on edit distance. +func NewTermSuggester() *_termSuggester { + + return &_termSuggester{v: types.NewTermSuggester()} + +} + +// The analyzer to analyze the suggest text with. +// Defaults to the search analyzer of the suggest field. +func (s *_termSuggester) Analyzer(analyzer string) *_termSuggester { + + s.v.Analyzer = &analyzer + + return s +} + +// The field to fetch the candidate suggestions from. +// Needs to be set globally or per suggestion. +func (s *_termSuggester) Field(field string) *_termSuggester { + + s.v.Field = field + + return s +} + +func (s *_termSuggester) LowercaseTerms(lowercaseterms bool) *_termSuggester { + + s.v.LowercaseTerms = &lowercaseterms + + return s +} + +// The maximum edit distance candidate suggestions can have in order to be +// considered as a suggestion. +// Can only be `1` or `2`. +func (s *_termSuggester) MaxEdits(maxedits int) *_termSuggester { + + s.v.MaxEdits = &maxedits + + return s +} + +// A factor that is used to multiply with the shard_size in order to inspect +// more candidate spelling corrections on the shard level. +// Can improve accuracy at the cost of performance. +func (s *_termSuggester) MaxInspections(maxinspections int) *_termSuggester { + + s.v.MaxInspections = &maxinspections + + return s +} + +// The maximum threshold in number of documents in which a suggest text token +// can exist in order to be included. +// Can be a relative percentage number (for example `0.4`) or an absolute number +// to represent document frequencies. +// If a value higher than 1 is specified, then fractional can not be specified. +func (s *_termSuggester) MaxTermFreq(maxtermfreq float32) *_termSuggester { + + s.v.MaxTermFreq = &maxtermfreq + + return s +} + +// The minimal threshold in number of documents a suggestion should appear in. +// This can improve quality by only suggesting high frequency terms. +// Can be specified as an absolute number or as a relative percentage of number +// of documents. +// If a value higher than 1 is specified, then the number cannot be fractional. +func (s *_termSuggester) MinDocFreq(mindocfreq float32) *_termSuggester { + + s.v.MinDocFreq = &mindocfreq + + return s +} + +// The minimum length a suggest text term must have in order to be included. +func (s *_termSuggester) MinWordLength(minwordlength int) *_termSuggester { + + s.v.MinWordLength = &minwordlength + + return s +} + +// The number of minimal prefix characters that must match in order be a +// candidate for suggestions. +// Increasing this number improves spellcheck performance. +func (s *_termSuggester) PrefixLength(prefixlength int) *_termSuggester { + + s.v.PrefixLength = &prefixlength + + return s +} + +// Sets the maximum number of suggestions to be retrieved from each individual +// shard. +func (s *_termSuggester) ShardSize(shardsize int) *_termSuggester { + + s.v.ShardSize = &shardsize + + return s +} + +// The maximum corrections to be returned per suggest text token. +func (s *_termSuggester) Size(size int) *_termSuggester { + + s.v.Size = &size + + return s +} + +// Defines how suggestions should be sorted per suggest text term. +func (s *_termSuggester) Sort(sort suggestsort.SuggestSort) *_termSuggester { + + s.v.Sort = &sort + return s +} + +// The string distance implementation to use for comparing how similar suggested +// terms are. +func (s *_termSuggester) StringDistance(stringdistance stringdistance.StringDistance) *_termSuggester { + + s.v.StringDistance = &stringdistance + return s +} + +// Controls what suggestions are included or controls for what suggest text +// terms, suggestions should be suggested. +func (s *_termSuggester) SuggestMode(suggestmode suggestmode.SuggestMode) *_termSuggester { + + s.v.SuggestMode = &suggestmode + return s +} + +// The suggest text. +// Needs to be set globally or per suggestion. +func (s *_termSuggester) Text(text string) *_termSuggester { + + s.v.Text = &text + + return s +} + +func (s *_termSuggester) FieldSuggesterCaster() *types.FieldSuggester { + container := types.NewFieldSuggester() + + container.Term = s.v + + return container +} + +func (s *_termSuggester) TermSuggesterCaster() *types.TermSuggester { + return s.v +} diff --git a/typedapi/esdsl/termvectorsfilter.go b/typedapi/esdsl/termvectorsfilter.go new file mode 100644 index 0000000000..34c1a3e253 --- /dev/null +++ b/typedapi/esdsl/termvectorsfilter.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _termVectorsFilter struct { + v *types.TermVectorsFilter +} + +func NewTermVectorsFilter() *_termVectorsFilter { + + return &_termVectorsFilter{v: types.NewTermVectorsFilter()} + +} + +// Ignore words which occur in more than this many docs. +// Defaults to unbounded. +func (s *_termVectorsFilter) MaxDocFreq(maxdocfreq int) *_termVectorsFilter { + + s.v.MaxDocFreq = &maxdocfreq + + return s +} + +// The maximum number of terms that must be returned per field. +func (s *_termVectorsFilter) MaxNumTerms(maxnumterms int) *_termVectorsFilter { + + s.v.MaxNumTerms = &maxnumterms + + return s +} + +// Ignore words with more than this frequency in the source doc. +// It defaults to unbounded. +func (s *_termVectorsFilter) MaxTermFreq(maxtermfreq int) *_termVectorsFilter { + + s.v.MaxTermFreq = &maxtermfreq + + return s +} + +// The maximum word length above which words will be ignored. +// Defaults to unbounded. +func (s *_termVectorsFilter) MaxWordLength(maxwordlength int) *_termVectorsFilter { + + s.v.MaxWordLength = &maxwordlength + + return s +} + +// Ignore terms which do not occur in at least this many docs. +func (s *_termVectorsFilter) MinDocFreq(mindocfreq int) *_termVectorsFilter { + + s.v.MinDocFreq = &mindocfreq + + return s +} + +// Ignore words with less than this frequency in the source doc. +func (s *_termVectorsFilter) MinTermFreq(mintermfreq int) *_termVectorsFilter { + + s.v.MinTermFreq = &mintermfreq + + return s +} + +// The minimum word length below which words will be ignored. +func (s *_termVectorsFilter) MinWordLength(minwordlength int) *_termVectorsFilter { + + s.v.MinWordLength = &minwordlength + + return s +} + +func (s *_termVectorsFilter) TermVectorsFilterCaster() *types.TermVectorsFilter { + return s.v +} diff --git a/typedapi/esdsl/testpopulation.go b/typedapi/esdsl/testpopulation.go new file mode 100644 index 0000000000..67d9173d5f --- /dev/null +++ b/typedapi/esdsl/testpopulation.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _testPopulation struct { + v *types.TestPopulation +} + +func NewTestPopulation() *_testPopulation { + + return &_testPopulation{v: types.NewTestPopulation()} + +} + +// The field to aggregate. +func (s *_testPopulation) Field(field string) *_testPopulation { + + s.v.Field = field + + return s +} + +// A filter used to define a set of records to run unpaired t-test on. +func (s *_testPopulation) Filter(filter types.QueryVariant) *_testPopulation { + + s.v.Filter = filter.QueryCaster() + + return s +} + +func (s *_testPopulation) Script(script types.ScriptVariant) *_testPopulation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_testPopulation) TestPopulationCaster() *types.TestPopulation { + return s.v +} diff --git a/typedapi/esdsl/textclassificationinferenceoptions.go b/typedapi/esdsl/textclassificationinferenceoptions.go new file mode 100644 index 0000000000..e2bdb393f1 --- /dev/null +++ b/typedapi/esdsl/textclassificationinferenceoptions.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _textClassificationInferenceOptions struct { + v *types.TextClassificationInferenceOptions +} + +// Text classification configuration for inference. +func NewTextClassificationInferenceOptions() *_textClassificationInferenceOptions { + + return &_textClassificationInferenceOptions{v: types.NewTextClassificationInferenceOptions()} + +} + +// Classification labels to apply other than the stored labels. Must have the +// same deminsions as the default configured labels +func (s *_textClassificationInferenceOptions) ClassificationLabels(classificationlabels ...string) *_textClassificationInferenceOptions { + + for _, v := range classificationlabels { + + s.v.ClassificationLabels = append(s.v.ClassificationLabels, v) + + } + return s +} + +// Specifies the number of top class predictions to return. Defaults to 0. +func (s *_textClassificationInferenceOptions) NumTopClasses(numtopclasses int) *_textClassificationInferenceOptions { + + s.v.NumTopClasses = &numtopclasses + + return s +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_textClassificationInferenceOptions) ResultsField(resultsfield string) *_textClassificationInferenceOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +// The tokenization options +func (s *_textClassificationInferenceOptions) Tokenization(tokenization types.TokenizationConfigContainerVariant) *_textClassificationInferenceOptions { + + s.v.Tokenization = tokenization.TokenizationConfigContainerCaster() + + return s +} + +func (s *_textClassificationInferenceOptions) InferenceConfigCreateContainerCaster() *types.InferenceConfigCreateContainer { + container := types.NewInferenceConfigCreateContainer() + + container.TextClassification = s.v + + return container +} + +func (s *_textClassificationInferenceOptions) TextClassificationInferenceOptionsCaster() *types.TextClassificationInferenceOptions { + return s.v +} diff --git a/typedapi/esdsl/textclassificationinferenceupdateoptions.go b/typedapi/esdsl/textclassificationinferenceupdateoptions.go new file mode 100644 index 0000000000..0b2d8684ab --- /dev/null +++ b/typedapi/esdsl/textclassificationinferenceupdateoptions.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _textClassificationInferenceUpdateOptions struct { + v *types.TextClassificationInferenceUpdateOptions +} + +// Text classification configuration for inference. +func NewTextClassificationInferenceUpdateOptions() *_textClassificationInferenceUpdateOptions { + + return &_textClassificationInferenceUpdateOptions{v: types.NewTextClassificationInferenceUpdateOptions()} + +} + +// Classification labels to apply other than the stored labels. Must have the +// same deminsions as the default configured labels +func (s *_textClassificationInferenceUpdateOptions) ClassificationLabels(classificationlabels ...string) *_textClassificationInferenceUpdateOptions { + + for _, v := range classificationlabels { + + s.v.ClassificationLabels = append(s.v.ClassificationLabels, v) + + } + return s +} + +// Specifies the number of top class predictions to return. Defaults to 0. +func (s *_textClassificationInferenceUpdateOptions) NumTopClasses(numtopclasses int) *_textClassificationInferenceUpdateOptions { + + s.v.NumTopClasses = &numtopclasses + + return s +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_textClassificationInferenceUpdateOptions) ResultsField(resultsfield string) *_textClassificationInferenceUpdateOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +// The tokenization options to update when inferring +func (s *_textClassificationInferenceUpdateOptions) Tokenization(tokenization types.NlpTokenizationUpdateOptionsVariant) *_textClassificationInferenceUpdateOptions { + + s.v.Tokenization = tokenization.NlpTokenizationUpdateOptionsCaster() + + return s +} + +func (s *_textClassificationInferenceUpdateOptions) InferenceConfigUpdateContainerCaster() *types.InferenceConfigUpdateContainer { + container := types.NewInferenceConfigUpdateContainer() + + container.TextClassification = s.v + + return container +} + +func (s *_textClassificationInferenceUpdateOptions) TextClassificationInferenceUpdateOptionsCaster() *types.TextClassificationInferenceUpdateOptions { + return s.v +} diff --git a/typedapi/esdsl/textembedding.go b/typedapi/esdsl/textembedding.go new file mode 100644 index 0000000000..1714566edb --- /dev/null +++ b/typedapi/esdsl/textembedding.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _textEmbedding struct { + v *types.TextEmbedding +} + +func NewTextEmbedding(modelid string, modeltext string) *_textEmbedding { + + tmp := &_textEmbedding{v: types.NewTextEmbedding()} + + tmp.ModelId(modelid) + + tmp.ModelText(modeltext) + + return tmp + +} + +func (s *_textEmbedding) ModelId(modelid string) *_textEmbedding { + + s.v.ModelId = modelid + + return s +} + +func (s *_textEmbedding) ModelText(modeltext string) *_textEmbedding { + + s.v.ModelText = modeltext + + return s +} + +func (s *_textEmbedding) QueryVectorBuilderCaster() *types.QueryVectorBuilder { + container := types.NewQueryVectorBuilder() + + container.TextEmbedding = s.v + + return container +} + +func (s *_textEmbedding) TextEmbeddingCaster() *types.TextEmbedding { + return s.v +} diff --git a/typedapi/esdsl/textembeddinginferenceoptions.go b/typedapi/esdsl/textembeddinginferenceoptions.go new file mode 100644 index 0000000000..e0acf8bec5 --- /dev/null +++ b/typedapi/esdsl/textembeddinginferenceoptions.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _textEmbeddingInferenceOptions struct { + v *types.TextEmbeddingInferenceOptions +} + +// Text embedding configuration for inference. +func NewTextEmbeddingInferenceOptions(vocabulary types.VocabularyVariant) *_textEmbeddingInferenceOptions { + + tmp := &_textEmbeddingInferenceOptions{v: types.NewTextEmbeddingInferenceOptions()} + + tmp.Vocabulary(vocabulary) + + return tmp + +} + +// The number of dimensions in the embedding output +func (s *_textEmbeddingInferenceOptions) EmbeddingSize(embeddingsize int) *_textEmbeddingInferenceOptions { + + s.v.EmbeddingSize = &embeddingsize + + return s +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_textEmbeddingInferenceOptions) ResultsField(resultsfield string) *_textEmbeddingInferenceOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +// The tokenization options +func (s *_textEmbeddingInferenceOptions) Tokenization(tokenization types.TokenizationConfigContainerVariant) *_textEmbeddingInferenceOptions { + + s.v.Tokenization = tokenization.TokenizationConfigContainerCaster() + + return s +} + +func (s *_textEmbeddingInferenceOptions) Vocabulary(vocabulary types.VocabularyVariant) *_textEmbeddingInferenceOptions { + + s.v.Vocabulary = *vocabulary.VocabularyCaster() + + return s +} + +func (s *_textEmbeddingInferenceOptions) InferenceConfigCreateContainerCaster() *types.InferenceConfigCreateContainer { + container := types.NewInferenceConfigCreateContainer() + + container.TextEmbedding = s.v + + return container +} + +func (s *_textEmbeddingInferenceOptions) TextEmbeddingInferenceOptionsCaster() *types.TextEmbeddingInferenceOptions { + return s.v +} diff --git a/typedapi/esdsl/textembeddinginferenceupdateoptions.go b/typedapi/esdsl/textembeddinginferenceupdateoptions.go new file mode 100644 index 0000000000..dec9a2832f --- /dev/null +++ b/typedapi/esdsl/textembeddinginferenceupdateoptions.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _textEmbeddingInferenceUpdateOptions struct { + v *types.TextEmbeddingInferenceUpdateOptions +} + +// Text embedding configuration for inference. +func NewTextEmbeddingInferenceUpdateOptions() *_textEmbeddingInferenceUpdateOptions { + + return &_textEmbeddingInferenceUpdateOptions{v: types.NewTextEmbeddingInferenceUpdateOptions()} + +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_textEmbeddingInferenceUpdateOptions) ResultsField(resultsfield string) *_textEmbeddingInferenceUpdateOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +func (s *_textEmbeddingInferenceUpdateOptions) Tokenization(tokenization types.NlpTokenizationUpdateOptionsVariant) *_textEmbeddingInferenceUpdateOptions { + + s.v.Tokenization = tokenization.NlpTokenizationUpdateOptionsCaster() + + return s +} + +func (s *_textEmbeddingInferenceUpdateOptions) InferenceConfigUpdateContainerCaster() *types.InferenceConfigUpdateContainer { + container := types.NewInferenceConfigUpdateContainer() + + container.TextEmbedding = s.v + + return container +} + +func (s *_textEmbeddingInferenceUpdateOptions) TextEmbeddingInferenceUpdateOptionsCaster() *types.TextEmbeddingInferenceUpdateOptions { + return s.v +} diff --git a/typedapi/esdsl/textexpansioninferenceoptions.go b/typedapi/esdsl/textexpansioninferenceoptions.go new file mode 100644 index 0000000000..0059bb50b0 --- /dev/null +++ b/typedapi/esdsl/textexpansioninferenceoptions.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _textExpansionInferenceOptions struct { + v *types.TextExpansionInferenceOptions +} + +// Text expansion configuration for inference. +func NewTextExpansionInferenceOptions(vocabulary types.VocabularyVariant) *_textExpansionInferenceOptions { + + tmp := &_textExpansionInferenceOptions{v: types.NewTextExpansionInferenceOptions()} + + tmp.Vocabulary(vocabulary) + + return tmp + +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_textExpansionInferenceOptions) ResultsField(resultsfield string) *_textExpansionInferenceOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +// The tokenization options +func (s *_textExpansionInferenceOptions) Tokenization(tokenization types.TokenizationConfigContainerVariant) *_textExpansionInferenceOptions { + + s.v.Tokenization = tokenization.TokenizationConfigContainerCaster() + + return s +} + +func (s *_textExpansionInferenceOptions) Vocabulary(vocabulary types.VocabularyVariant) *_textExpansionInferenceOptions { + + s.v.Vocabulary = *vocabulary.VocabularyCaster() + + return s +} + +func (s *_textExpansionInferenceOptions) InferenceConfigCreateContainerCaster() *types.InferenceConfigCreateContainer { + container := types.NewInferenceConfigCreateContainer() + + container.TextExpansion = s.v + + return container +} + +func (s *_textExpansionInferenceOptions) TextExpansionInferenceOptionsCaster() *types.TextExpansionInferenceOptions { + return s.v +} diff --git a/typedapi/esdsl/textexpansioninferenceupdateoptions.go b/typedapi/esdsl/textexpansioninferenceupdateoptions.go new file mode 100644 index 0000000000..7edc369e05 --- /dev/null +++ b/typedapi/esdsl/textexpansioninferenceupdateoptions.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _textExpansionInferenceUpdateOptions struct { + v *types.TextExpansionInferenceUpdateOptions +} + +// Text expansion configuration for inference. +func NewTextExpansionInferenceUpdateOptions() *_textExpansionInferenceUpdateOptions { + + return &_textExpansionInferenceUpdateOptions{v: types.NewTextExpansionInferenceUpdateOptions()} + +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_textExpansionInferenceUpdateOptions) ResultsField(resultsfield string) *_textExpansionInferenceUpdateOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +func (s *_textExpansionInferenceUpdateOptions) Tokenization(tokenization types.NlpTokenizationUpdateOptionsVariant) *_textExpansionInferenceUpdateOptions { + + s.v.Tokenization = tokenization.NlpTokenizationUpdateOptionsCaster() + + return s +} + +func (s *_textExpansionInferenceUpdateOptions) InferenceConfigUpdateContainerCaster() *types.InferenceConfigUpdateContainer { + container := types.NewInferenceConfigUpdateContainer() + + container.TextExpansion = s.v + + return container +} + +func (s *_textExpansionInferenceUpdateOptions) TextExpansionInferenceUpdateOptionsCaster() *types.TextExpansionInferenceUpdateOptions { + return s.v +} diff --git a/typedapi/esdsl/textexpansionquery.go b/typedapi/esdsl/textexpansionquery.go new file mode 100644 index 0000000000..3ec275598b --- /dev/null +++ b/typedapi/esdsl/textexpansionquery.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _textExpansionQuery struct { + k string + v *types.TextExpansionQuery +} + +// Uses a natural language processing model to convert the query text into a +// list of token-weight pairs which are then used in a query against a sparse +// vector or rank features field. +func NewTextExpansionQuery(key string) *_textExpansionQuery { + return &_textExpansionQuery{ + k: key, + v: types.NewTextExpansionQuery(), + } +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_textExpansionQuery) Boost(boost float32) *_textExpansionQuery { + + s.v.Boost = &boost + + return s +} + +// The text expansion NLP model to use +func (s *_textExpansionQuery) ModelId(modelid string) *_textExpansionQuery { + + s.v.ModelId = modelid + + return s +} + +// The query text +func (s *_textExpansionQuery) ModelText(modeltext string) *_textExpansionQuery { + + s.v.ModelText = modeltext + + return s +} + +// Token pruning configurations +func (s *_textExpansionQuery) PruningConfig(pruningconfig types.TokenPruningConfigVariant) *_textExpansionQuery { + + s.v.PruningConfig = pruningconfig.TokenPruningConfigCaster() + + return s +} + +func (s *_textExpansionQuery) QueryName_(queryname_ string) *_textExpansionQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_textExpansionQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.TextExpansion = map[string]types.TextExpansionQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleTextExpansionQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleTextExpansionQuery() *_textExpansionQuery { + return &_textExpansionQuery{ + k: "", + v: types.NewTextExpansionQuery(), + } +} + +func (s *_textExpansionQuery) TextExpansionQueryCaster() *types.TextExpansionQuery { + return s.v.TextExpansionQueryCaster() +} diff --git a/typedapi/esdsl/textindexprefixes.go b/typedapi/esdsl/textindexprefixes.go new file mode 100644 index 0000000000..947b9abd6e --- /dev/null +++ b/typedapi/esdsl/textindexprefixes.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _textIndexPrefixes struct { + v *types.TextIndexPrefixes +} + +func NewTextIndexPrefixes(maxchars int, minchars int) *_textIndexPrefixes { + + tmp := &_textIndexPrefixes{v: types.NewTextIndexPrefixes()} + + tmp.MaxChars(maxchars) + + tmp.MinChars(minchars) + + return tmp + +} + +func (s *_textIndexPrefixes) MaxChars(maxchars int) *_textIndexPrefixes { + + s.v.MaxChars = maxchars + + return s +} + +func (s *_textIndexPrefixes) MinChars(minchars int) *_textIndexPrefixes { + + s.v.MinChars = minchars + + return s +} + +func (s *_textIndexPrefixes) TextIndexPrefixesCaster() *types.TextIndexPrefixes { + return s.v +} diff --git a/typedapi/esdsl/textproperty.go b/typedapi/esdsl/textproperty.go new file mode 100644 index 0000000000..2695611359 --- /dev/null +++ b/typedapi/esdsl/textproperty.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption" +) + +type _textProperty struct { + v *types.TextProperty +} + +func NewTextProperty() *_textProperty { + + return &_textProperty{v: types.NewTextProperty()} + +} + +func (s *_textProperty) Analyzer(analyzer string) *_textProperty { + + s.v.Analyzer = &analyzer + + return s +} + +func (s *_textProperty) Boost(boost types.Float64) *_textProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_textProperty) CopyTo(fields ...string) *_textProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_textProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_textProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_textProperty) EagerGlobalOrdinals(eagerglobalordinals bool) *_textProperty { + + s.v.EagerGlobalOrdinals = &eagerglobalordinals + + return s +} + +func (s *_textProperty) Fielddata(fielddata bool) *_textProperty { + + s.v.Fielddata = &fielddata + + return s +} + +func (s *_textProperty) FielddataFrequencyFilter(fielddatafrequencyfilter types.FielddataFrequencyFilterVariant) *_textProperty { + + s.v.FielddataFrequencyFilter = fielddatafrequencyfilter.FielddataFrequencyFilterCaster() + + return s +} + +func (s *_textProperty) Fields(fields map[string]types.Property) *_textProperty { + + s.v.Fields = fields + return s +} + +func (s *_textProperty) AddField(key string, value types.PropertyVariant) *_textProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_textProperty) IgnoreAbove(ignoreabove int) *_textProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_textProperty) Index(index bool) *_textProperty { + + s.v.Index = &index + + return s +} + +func (s *_textProperty) IndexOptions(indexoptions indexoptions.IndexOptions) *_textProperty { + + s.v.IndexOptions = &indexoptions + return s +} + +func (s *_textProperty) IndexPhrases(indexphrases bool) *_textProperty { + + s.v.IndexPhrases = &indexphrases + + return s +} + +func (s *_textProperty) IndexPrefixes(indexprefixes types.TextIndexPrefixesVariant) *_textProperty { + + s.v.IndexPrefixes = indexprefixes.TextIndexPrefixesCaster() + + return s +} + +// Metadata about the field. +func (s *_textProperty) Meta(meta map[string]string) *_textProperty { + + s.v.Meta = meta + return s +} + +func (s *_textProperty) AddMeta(key string, value string) *_textProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_textProperty) Norms(norms bool) *_textProperty { + + s.v.Norms = &norms + + return s +} + +func (s *_textProperty) PositionIncrementGap(positionincrementgap int) *_textProperty { + + s.v.PositionIncrementGap = &positionincrementgap + + return s +} + +func (s *_textProperty) Properties(properties map[string]types.Property) *_textProperty { + + s.v.Properties = properties + return s +} + +func (s *_textProperty) AddProperty(key string, value types.PropertyVariant) *_textProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_textProperty) SearchAnalyzer(searchanalyzer string) *_textProperty { + + s.v.SearchAnalyzer = &searchanalyzer + + return s +} + +func (s *_textProperty) SearchQuoteAnalyzer(searchquoteanalyzer string) *_textProperty { + + s.v.SearchQuoteAnalyzer = &searchquoteanalyzer + + return s +} + +func (s *_textProperty) Similarity(similarity string) *_textProperty { + + s.v.Similarity = &similarity + + return s +} + +func (s *_textProperty) Store(store bool) *_textProperty { + + s.v.Store = &store + + return s +} + +func (s *_textProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_textProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_textProperty) TermVector(termvector termvectoroption.TermVectorOption) *_textProperty { + + s.v.TermVector = &termvector + return s +} + +func (s *_textProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_textProperty) TextPropertyCaster() *types.TextProperty { + return s.v +} diff --git a/typedapi/esdsl/textsimilarityreranker.go b/typedapi/esdsl/textsimilarityreranker.go new file mode 100644 index 0000000000..b8e4831c1f --- /dev/null +++ b/typedapi/esdsl/textsimilarityreranker.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _textSimilarityReranker struct { + v *types.TextSimilarityReranker +} + +// A retriever that reranks the top documents based on a reranking model using +// the InferenceAPI +func NewTextSimilarityReranker(retriever types.RetrieverContainerVariant) *_textSimilarityReranker { + + tmp := &_textSimilarityReranker{v: types.NewTextSimilarityReranker()} + + tmp.Retriever(retriever) + + return tmp + +} + +// The document field to be used for text similarity comparisons. This field +// should contain the text that will be evaluated against the inference_text +func (s *_textSimilarityReranker) Field(field string) *_textSimilarityReranker { + + s.v.Field = &field + + return s +} + +// Query to filter the documents that can match. +func (s *_textSimilarityReranker) Filter(filters ...types.QueryVariant) *_textSimilarityReranker { + + s.v.Filter = make([]types.Query, len(filters)) + for i, v := range filters { + s.v.Filter[i] = *v.QueryCaster() + } + + return s +} + +// Unique identifier of the inference endpoint created using the inference API. +func (s *_textSimilarityReranker) InferenceId(inferenceid string) *_textSimilarityReranker { + + s.v.InferenceId = &inferenceid + + return s +} + +// The text snippet used as the basis for similarity comparison +func (s *_textSimilarityReranker) InferenceText(inferencetext string) *_textSimilarityReranker { + + s.v.InferenceText = &inferencetext + + return s +} + +// Minimum _score for matching documents. Documents with a lower _score are not +// included in the top documents. +func (s *_textSimilarityReranker) MinScore(minscore float32) *_textSimilarityReranker { + + s.v.MinScore = &minscore + + return s +} + +// This value determines how many documents we will consider from the nested +// retriever. +func (s *_textSimilarityReranker) RankWindowSize(rankwindowsize int) *_textSimilarityReranker { + + s.v.RankWindowSize = &rankwindowsize + + return s +} + +// The nested retriever which will produce the first-level results, that will +// later be used for reranking. +func (s *_textSimilarityReranker) Retriever(retriever types.RetrieverContainerVariant) *_textSimilarityReranker { + + s.v.Retriever = *retriever.RetrieverContainerCaster() + + return s +} + +func (s *_textSimilarityReranker) RetrieverContainerCaster() *types.RetrieverContainer { + container := types.NewRetrieverContainer() + + container.TextSimilarityReranker = s.v + + return container +} + +func (s *_textSimilarityReranker) TextSimilarityRerankerCaster() *types.TextSimilarityReranker { + return s.v +} diff --git a/typedapi/esdsl/texttoanalyze.go b/typedapi/esdsl/texttoanalyze.go new file mode 100644 index 0000000000..8c66f6b509 --- /dev/null +++ b/typedapi/esdsl/texttoanalyze.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _textToAnalyze struct { + v types.TextToAnalyze +} + +func NewTextToAnalyze() *_textToAnalyze { + return &_textToAnalyze{v: nil} +} + +func (u *_textToAnalyze) Strings(strings ...string) *_textToAnalyze { + + u.v = make([]string, len(strings)) + u.v = strings + + return u +} + +func (u *_textToAnalyze) TextToAnalyzeCaster() *types.TextToAnalyze { + return &u.v +} diff --git a/typedapi/esdsl/thaianalyzer.go b/typedapi/esdsl/thaianalyzer.go new file mode 100644 index 0000000000..29b36e8617 --- /dev/null +++ b/typedapi/esdsl/thaianalyzer.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _thaiAnalyzer struct { + v *types.ThaiAnalyzer +} + +func NewThaiAnalyzer() *_thaiAnalyzer { + + return &_thaiAnalyzer{v: types.NewThaiAnalyzer()} + +} + +func (s *_thaiAnalyzer) Stopwords(stopwords ...string) *_thaiAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_thaiAnalyzer) StopwordsPath(stopwordspath string) *_thaiAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_thaiAnalyzer) ThaiAnalyzerCaster() *types.ThaiAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/thaitokenizer.go b/typedapi/esdsl/thaitokenizer.go new file mode 100644 index 0000000000..c7e340313f --- /dev/null +++ b/typedapi/esdsl/thaitokenizer.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _thaiTokenizer struct { + v *types.ThaiTokenizer +} + +func NewThaiTokenizer() *_thaiTokenizer { + + return &_thaiTokenizer{v: types.NewThaiTokenizer()} + +} + +func (s *_thaiTokenizer) Version(versionstring string) *_thaiTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_thaiTokenizer) ThaiTokenizerCaster() *types.ThaiTokenizer { + return s.v +} diff --git a/typedapi/esdsl/throttlestate.go b/typedapi/esdsl/throttlestate.go new file mode 100644 index 0000000000..4b380acfe1 --- /dev/null +++ b/typedapi/esdsl/throttlestate.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _throttleState struct { + v *types.ThrottleState +} + +func NewThrottleState(reason string) *_throttleState { + + tmp := &_throttleState{v: types.NewThrottleState()} + + tmp.Reason(reason) + + return tmp + +} + +func (s *_throttleState) Reason(reason string) *_throttleState { + + s.v.Reason = reason + + return s +} + +func (s *_throttleState) Timestamp(datetime types.DateTimeVariant) *_throttleState { + + s.v.Timestamp = *datetime.DateTimeCaster() + + return s +} + +func (s *_throttleState) ThrottleStateCaster() *types.ThrottleState { + return s.v +} diff --git a/typedapi/esdsl/timeofmonth.go b/typedapi/esdsl/timeofmonth.go new file mode 100644 index 0000000000..7681821604 --- /dev/null +++ b/typedapi/esdsl/timeofmonth.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _timeOfMonth struct { + v *types.TimeOfMonth +} + +func NewTimeOfMonth() *_timeOfMonth { + + return &_timeOfMonth{v: types.NewTimeOfMonth()} + +} + +func (s *_timeOfMonth) At(ats ...string) *_timeOfMonth { + + for _, v := range ats { + + s.v.At = append(s.v.At, v) + + } + return s +} + +func (s *_timeOfMonth) On(ons ...int) *_timeOfMonth { + + for _, v := range ons { + + s.v.On = append(s.v.On, v) + + } + return s +} + +func (s *_timeOfMonth) TimeOfMonthCaster() *types.TimeOfMonth { + return s.v +} diff --git a/typedapi/esdsl/timeofweek.go b/typedapi/esdsl/timeofweek.go new file mode 100644 index 0000000000..875d7a1365 --- /dev/null +++ b/typedapi/esdsl/timeofweek.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/day" +) + +type _timeOfWeek struct { + v *types.TimeOfWeek +} + +func NewTimeOfWeek() *_timeOfWeek { + + return &_timeOfWeek{v: types.NewTimeOfWeek()} + +} + +func (s *_timeOfWeek) At(ats ...string) *_timeOfWeek { + + for _, v := range ats { + + s.v.At = append(s.v.At, v) + + } + return s +} + +func (s *_timeOfWeek) On(ons ...day.Day) *_timeOfWeek { + + for _, v := range ons { + + s.v.On = append(s.v.On, v) + + } + return s +} + +func (s *_timeOfWeek) TimeOfWeekCaster() *types.TimeOfWeek { + return s.v +} diff --git a/typedapi/esdsl/timeofyear.go b/typedapi/esdsl/timeofyear.go new file mode 100644 index 0000000000..2174d48bf8 --- /dev/null +++ b/typedapi/esdsl/timeofyear.go @@ -0,0 +1,70 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/month" +) + +type _timeOfYear struct { + v *types.TimeOfYear +} + +func NewTimeOfYear() *_timeOfYear { + + return &_timeOfYear{v: types.NewTimeOfYear()} + +} + +func (s *_timeOfYear) At(ats ...string) *_timeOfYear { + + for _, v := range ats { + + s.v.At = append(s.v.At, v) + + } + return s +} + +func (s *_timeOfYear) Int(ints ...month.Month) *_timeOfYear { + + for _, v := range ints { + + s.v.Int = append(s.v.Int, v) + + } + return s +} + +func (s *_timeOfYear) On(ons ...int) *_timeOfYear { + + for _, v := range ons { + + s.v.On = append(s.v.On, v) + + } + return s +} + +func (s *_timeOfYear) TimeOfYearCaster() *types.TimeOfYear { + return s.v +} diff --git a/typedapi/esdsl/timeseriesaggregation.go b/typedapi/esdsl/timeseriesaggregation.go new file mode 100644 index 0000000000..9c362328dd --- /dev/null +++ b/typedapi/esdsl/timeseriesaggregation.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _timeSeriesAggregation struct { + v *types.TimeSeriesAggregation +} + +// The time series aggregation queries data created using a time series index. +// This is typically data such as metrics or other data streams with a time +// component, and requires creating an index using the time series mode. +func NewTimeSeriesAggregation() *_timeSeriesAggregation { + + return &_timeSeriesAggregation{v: types.NewTimeSeriesAggregation()} + +} + +// Set to `true` to associate a unique string key with each bucket and returns +// the ranges as a hash rather than an array. +func (s *_timeSeriesAggregation) Keyed(keyed bool) *_timeSeriesAggregation { + + s.v.Keyed = &keyed + + return s +} + +// The maximum number of results to return. +func (s *_timeSeriesAggregation) Size(size int) *_timeSeriesAggregation { + + s.v.Size = &size + + return s +} + +func (s *_timeSeriesAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.TimeSeries = s.v + + return container +} + +func (s *_timeSeriesAggregation) TimeSeriesAggregationCaster() *types.TimeSeriesAggregation { + return s.v +} diff --git a/typedapi/esdsl/timesync.go b/typedapi/esdsl/timesync.go new file mode 100644 index 0000000000..fe892b72cc --- /dev/null +++ b/typedapi/esdsl/timesync.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _timeSync struct { + v *types.TimeSync +} + +// Specifies that the transform uses a time field to synchronize the source and +// destination indices. +func NewTimeSync() *_timeSync { + + return &_timeSync{v: types.NewTimeSync()} + +} + +// The time delay between the current time and the latest input data time. +func (s *_timeSync) Delay(duration types.DurationVariant) *_timeSync { + + s.v.Delay = *duration.DurationCaster() + + return s +} + +// The date field that is used to identify new documents in the source. In +// general, it’s a good idea to use a field +// that contains the ingest timestamp. If you use a different field, you might +// need to set the delay such that it +// accounts for data transmission delays. +func (s *_timeSync) Field(field string) *_timeSync { + + s.v.Field = field + + return s +} + +func (s *_timeSync) SyncContainerCaster() *types.SyncContainer { + container := types.NewSyncContainer() + + container.Time = s.v + + return container +} + +func (s *_timeSync) TimeSyncCaster() *types.TimeSync { + return s.v +} diff --git a/typedapi/esdsl/tokencountproperty.go b/typedapi/esdsl/tokencountproperty.go new file mode 100644 index 0000000000..24469a33b2 --- /dev/null +++ b/typedapi/esdsl/tokencountproperty.go @@ -0,0 +1,188 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _tokenCountProperty struct { + v *types.TokenCountProperty +} + +func NewTokenCountProperty() *_tokenCountProperty { + + return &_tokenCountProperty{v: types.NewTokenCountProperty()} + +} + +func (s *_tokenCountProperty) Analyzer(analyzer string) *_tokenCountProperty { + + s.v.Analyzer = &analyzer + + return s +} + +func (s *_tokenCountProperty) Boost(boost types.Float64) *_tokenCountProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_tokenCountProperty) CopyTo(fields ...string) *_tokenCountProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_tokenCountProperty) DocValues(docvalues bool) *_tokenCountProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_tokenCountProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_tokenCountProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_tokenCountProperty) EnablePositionIncrements(enablepositionincrements bool) *_tokenCountProperty { + + s.v.EnablePositionIncrements = &enablepositionincrements + + return s +} + +func (s *_tokenCountProperty) Fields(fields map[string]types.Property) *_tokenCountProperty { + + s.v.Fields = fields + return s +} + +func (s *_tokenCountProperty) AddField(key string, value types.PropertyVariant) *_tokenCountProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_tokenCountProperty) IgnoreAbove(ignoreabove int) *_tokenCountProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_tokenCountProperty) Index(index bool) *_tokenCountProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_tokenCountProperty) Meta(meta map[string]string) *_tokenCountProperty { + + s.v.Meta = meta + return s +} + +func (s *_tokenCountProperty) AddMeta(key string, value string) *_tokenCountProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_tokenCountProperty) NullValue(nullvalue types.Float64) *_tokenCountProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_tokenCountProperty) Properties(properties map[string]types.Property) *_tokenCountProperty { + + s.v.Properties = properties + return s +} + +func (s *_tokenCountProperty) AddProperty(key string, value types.PropertyVariant) *_tokenCountProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_tokenCountProperty) Store(store bool) *_tokenCountProperty { + + s.v.Store = &store + + return s +} + +func (s *_tokenCountProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_tokenCountProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_tokenCountProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_tokenCountProperty) TokenCountPropertyCaster() *types.TokenCountProperty { + return s.v +} diff --git a/typedapi/esdsl/tokenfilter.go b/typedapi/esdsl/tokenfilter.go new file mode 100644 index 0000000000..9172c98c43 --- /dev/null +++ b/typedapi/esdsl/tokenfilter.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _tokenFilter struct { + v types.TokenFilter +} + +func NewTokenFilter() *_tokenFilter { + return &_tokenFilter{v: nil} +} + +func (u *_tokenFilter) String(string string) *_tokenFilter { + + u.v = &string + + return u +} + +func (u *_tokenFilter) TokenFilterDefinition(tokenfilterdefinition types.TokenFilterDefinitionVariant) *_tokenFilter { + + u.v = *tokenfilterdefinition.TokenFilterDefinitionCaster() + + return u +} + +// Interface implementation for TokenFilterDefinition in TokenFilter union +func (u *_tokenFilterDefinition) TokenFilterCaster() *types.TokenFilter { + t := types.TokenFilter(u.v) + return &t +} + +func (u *_tokenFilter) TokenFilterCaster() *types.TokenFilter { + return &u.v +} diff --git a/typedapi/esdsl/tokenfilterdefinition.go b/typedapi/esdsl/tokenfilterdefinition.go new file mode 100644 index 0000000000..22d500cea5 --- /dev/null +++ b/typedapi/esdsl/tokenfilterdefinition.go @@ -0,0 +1,647 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _tokenFilterDefinition struct { + v types.TokenFilterDefinition +} + +func NewTokenFilterDefinition() *_tokenFilterDefinition { + return &_tokenFilterDefinition{v: nil} +} + +func (u *_tokenFilterDefinition) AsciiFoldingTokenFilter(asciifoldingtokenfilter types.AsciiFoldingTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &asciifoldingtokenfilter + + return u +} + +// Interface implementation for AsciiFoldingTokenFilter in TokenFilterDefinition union +func (u *_asciiFoldingTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) CommonGramsTokenFilter(commongramstokenfilter types.CommonGramsTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &commongramstokenfilter + + return u +} + +// Interface implementation for CommonGramsTokenFilter in TokenFilterDefinition union +func (u *_commonGramsTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) ConditionTokenFilter(conditiontokenfilter types.ConditionTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &conditiontokenfilter + + return u +} + +// Interface implementation for ConditionTokenFilter in TokenFilterDefinition union +func (u *_conditionTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) DelimitedPayloadTokenFilter(delimitedpayloadtokenfilter types.DelimitedPayloadTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &delimitedpayloadtokenfilter + + return u +} + +// Interface implementation for DelimitedPayloadTokenFilter in TokenFilterDefinition union +func (u *_delimitedPayloadTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) EdgeNGramTokenFilter(edgengramtokenfilter types.EdgeNGramTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &edgengramtokenfilter + + return u +} + +// Interface implementation for EdgeNGramTokenFilter in TokenFilterDefinition union +func (u *_edgeNGramTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) ElisionTokenFilter(elisiontokenfilter types.ElisionTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &elisiontokenfilter + + return u +} + +// Interface implementation for ElisionTokenFilter in TokenFilterDefinition union +func (u *_elisionTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) FingerprintTokenFilter(fingerprinttokenfilter types.FingerprintTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &fingerprinttokenfilter + + return u +} + +// Interface implementation for FingerprintTokenFilter in TokenFilterDefinition union +func (u *_fingerprintTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) HunspellTokenFilter(hunspelltokenfilter types.HunspellTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &hunspelltokenfilter + + return u +} + +// Interface implementation for HunspellTokenFilter in TokenFilterDefinition union +func (u *_hunspellTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) HyphenationDecompounderTokenFilter(hyphenationdecompoundertokenfilter types.HyphenationDecompounderTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &hyphenationdecompoundertokenfilter + + return u +} + +// Interface implementation for HyphenationDecompounderTokenFilter in TokenFilterDefinition union +func (u *_hyphenationDecompounderTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) KeepTypesTokenFilter(keeptypestokenfilter types.KeepTypesTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &keeptypestokenfilter + + return u +} + +// Interface implementation for KeepTypesTokenFilter in TokenFilterDefinition union +func (u *_keepTypesTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) KeepWordsTokenFilter(keepwordstokenfilter types.KeepWordsTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &keepwordstokenfilter + + return u +} + +// Interface implementation for KeepWordsTokenFilter in TokenFilterDefinition union +func (u *_keepWordsTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) KeywordMarkerTokenFilter(keywordmarkertokenfilter types.KeywordMarkerTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &keywordmarkertokenfilter + + return u +} + +// Interface implementation for KeywordMarkerTokenFilter in TokenFilterDefinition union +func (u *_keywordMarkerTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) KStemTokenFilter(kstemtokenfilter types.KStemTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &kstemtokenfilter + + return u +} + +// Interface implementation for KStemTokenFilter in TokenFilterDefinition union +func (u *_kStemTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) LengthTokenFilter(lengthtokenfilter types.LengthTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &lengthtokenfilter + + return u +} + +// Interface implementation for LengthTokenFilter in TokenFilterDefinition union +func (u *_lengthTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) LimitTokenCountTokenFilter(limittokencounttokenfilter types.LimitTokenCountTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &limittokencounttokenfilter + + return u +} + +// Interface implementation for LimitTokenCountTokenFilter in TokenFilterDefinition union +func (u *_limitTokenCountTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) LowercaseTokenFilter(lowercasetokenfilter types.LowercaseTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &lowercasetokenfilter + + return u +} + +// Interface implementation for LowercaseTokenFilter in TokenFilterDefinition union +func (u *_lowercaseTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) MultiplexerTokenFilter(multiplexertokenfilter types.MultiplexerTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &multiplexertokenfilter + + return u +} + +// Interface implementation for MultiplexerTokenFilter in TokenFilterDefinition union +func (u *_multiplexerTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) NGramTokenFilter(ngramtokenfilter types.NGramTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &ngramtokenfilter + + return u +} + +// Interface implementation for NGramTokenFilter in TokenFilterDefinition union +func (u *_nGramTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) NoriPartOfSpeechTokenFilter(noripartofspeechtokenfilter types.NoriPartOfSpeechTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &noripartofspeechtokenfilter + + return u +} + +// Interface implementation for NoriPartOfSpeechTokenFilter in TokenFilterDefinition union +func (u *_noriPartOfSpeechTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) PatternCaptureTokenFilter(patterncapturetokenfilter types.PatternCaptureTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &patterncapturetokenfilter + + return u +} + +// Interface implementation for PatternCaptureTokenFilter in TokenFilterDefinition union +func (u *_patternCaptureTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) PatternReplaceTokenFilter(patternreplacetokenfilter types.PatternReplaceTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &patternreplacetokenfilter + + return u +} + +// Interface implementation for PatternReplaceTokenFilter in TokenFilterDefinition union +func (u *_patternReplaceTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) PorterStemTokenFilter(porterstemtokenfilter types.PorterStemTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &porterstemtokenfilter + + return u +} + +// Interface implementation for PorterStemTokenFilter in TokenFilterDefinition union +func (u *_porterStemTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) PredicateTokenFilter(predicatetokenfilter types.PredicateTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &predicatetokenfilter + + return u +} + +// Interface implementation for PredicateTokenFilter in TokenFilterDefinition union +func (u *_predicateTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) RemoveDuplicatesTokenFilter(removeduplicatestokenfilter types.RemoveDuplicatesTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &removeduplicatestokenfilter + + return u +} + +// Interface implementation for RemoveDuplicatesTokenFilter in TokenFilterDefinition union +func (u *_removeDuplicatesTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) ReverseTokenFilter(reversetokenfilter types.ReverseTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &reversetokenfilter + + return u +} + +// Interface implementation for ReverseTokenFilter in TokenFilterDefinition union +func (u *_reverseTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) ShingleTokenFilter(shingletokenfilter types.ShingleTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &shingletokenfilter + + return u +} + +// Interface implementation for ShingleTokenFilter in TokenFilterDefinition union +func (u *_shingleTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) SnowballTokenFilter(snowballtokenfilter types.SnowballTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &snowballtokenfilter + + return u +} + +// Interface implementation for SnowballTokenFilter in TokenFilterDefinition union +func (u *_snowballTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) StemmerOverrideTokenFilter(stemmeroverridetokenfilter types.StemmerOverrideTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &stemmeroverridetokenfilter + + return u +} + +// Interface implementation for StemmerOverrideTokenFilter in TokenFilterDefinition union +func (u *_stemmerOverrideTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) StemmerTokenFilter(stemmertokenfilter types.StemmerTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &stemmertokenfilter + + return u +} + +// Interface implementation for StemmerTokenFilter in TokenFilterDefinition union +func (u *_stemmerTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) StopTokenFilter(stoptokenfilter types.StopTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &stoptokenfilter + + return u +} + +// Interface implementation for StopTokenFilter in TokenFilterDefinition union +func (u *_stopTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) SynonymGraphTokenFilter(synonymgraphtokenfilter types.SynonymGraphTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &synonymgraphtokenfilter + + return u +} + +// Interface implementation for SynonymGraphTokenFilter in TokenFilterDefinition union +func (u *_synonymGraphTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) SynonymTokenFilter(synonymtokenfilter types.SynonymTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &synonymtokenfilter + + return u +} + +// Interface implementation for SynonymTokenFilter in TokenFilterDefinition union +func (u *_synonymTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) TrimTokenFilter(trimtokenfilter types.TrimTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &trimtokenfilter + + return u +} + +// Interface implementation for TrimTokenFilter in TokenFilterDefinition union +func (u *_trimTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) TruncateTokenFilter(truncatetokenfilter types.TruncateTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &truncatetokenfilter + + return u +} + +// Interface implementation for TruncateTokenFilter in TokenFilterDefinition union +func (u *_truncateTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) UniqueTokenFilter(uniquetokenfilter types.UniqueTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &uniquetokenfilter + + return u +} + +// Interface implementation for UniqueTokenFilter in TokenFilterDefinition union +func (u *_uniqueTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) UppercaseTokenFilter(uppercasetokenfilter types.UppercaseTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &uppercasetokenfilter + + return u +} + +// Interface implementation for UppercaseTokenFilter in TokenFilterDefinition union +func (u *_uppercaseTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) WordDelimiterGraphTokenFilter(worddelimitergraphtokenfilter types.WordDelimiterGraphTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &worddelimitergraphtokenfilter + + return u +} + +// Interface implementation for WordDelimiterGraphTokenFilter in TokenFilterDefinition union +func (u *_wordDelimiterGraphTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) WordDelimiterTokenFilter(worddelimitertokenfilter types.WordDelimiterTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &worddelimitertokenfilter + + return u +} + +// Interface implementation for WordDelimiterTokenFilter in TokenFilterDefinition union +func (u *_wordDelimiterTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) KuromojiStemmerTokenFilter(kuromojistemmertokenfilter types.KuromojiStemmerTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &kuromojistemmertokenfilter + + return u +} + +// Interface implementation for KuromojiStemmerTokenFilter in TokenFilterDefinition union +func (u *_kuromojiStemmerTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) KuromojiReadingFormTokenFilter(kuromojireadingformtokenfilter types.KuromojiReadingFormTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &kuromojireadingformtokenfilter + + return u +} + +// Interface implementation for KuromojiReadingFormTokenFilter in TokenFilterDefinition union +func (u *_kuromojiReadingFormTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) KuromojiPartOfSpeechTokenFilter(kuromojipartofspeechtokenfilter types.KuromojiPartOfSpeechTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &kuromojipartofspeechtokenfilter + + return u +} + +// Interface implementation for KuromojiPartOfSpeechTokenFilter in TokenFilterDefinition union +func (u *_kuromojiPartOfSpeechTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) IcuCollationTokenFilter(icucollationtokenfilter types.IcuCollationTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &icucollationtokenfilter + + return u +} + +// Interface implementation for IcuCollationTokenFilter in TokenFilterDefinition union +func (u *_icuCollationTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) IcuFoldingTokenFilter(icufoldingtokenfilter types.IcuFoldingTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &icufoldingtokenfilter + + return u +} + +// Interface implementation for IcuFoldingTokenFilter in TokenFilterDefinition union +func (u *_icuFoldingTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) IcuNormalizationTokenFilter(icunormalizationtokenfilter types.IcuNormalizationTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &icunormalizationtokenfilter + + return u +} + +// Interface implementation for IcuNormalizationTokenFilter in TokenFilterDefinition union +func (u *_icuNormalizationTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) IcuTransformTokenFilter(icutransformtokenfilter types.IcuTransformTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &icutransformtokenfilter + + return u +} + +// Interface implementation for IcuTransformTokenFilter in TokenFilterDefinition union +func (u *_icuTransformTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) PhoneticTokenFilter(phonetictokenfilter types.PhoneticTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &phonetictokenfilter + + return u +} + +// Interface implementation for PhoneticTokenFilter in TokenFilterDefinition union +func (u *_phoneticTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) DictionaryDecompounderTokenFilter(dictionarydecompoundertokenfilter types.DictionaryDecompounderTokenFilterVariant) *_tokenFilterDefinition { + + u.v = &dictionarydecompoundertokenfilter + + return u +} + +// Interface implementation for DictionaryDecompounderTokenFilter in TokenFilterDefinition union +func (u *_dictionaryDecompounderTokenFilter) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + t := types.TokenFilterDefinition(u.v) + return &t +} + +func (u *_tokenFilterDefinition) TokenFilterDefinitionCaster() *types.TokenFilterDefinition { + return &u.v +} diff --git a/typedapi/esdsl/tokenizationconfigcontainer.go b/typedapi/esdsl/tokenizationconfigcontainer.go new file mode 100644 index 0000000000..a1bbcb5a05 --- /dev/null +++ b/typedapi/esdsl/tokenizationconfigcontainer.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _tokenizationConfigContainer struct { + v *types.TokenizationConfigContainer +} + +func NewTokenizationConfigContainer() *_tokenizationConfigContainer { + return &_tokenizationConfigContainer{v: types.NewTokenizationConfigContainer()} +} + +// AdditionalTokenizationConfigContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_tokenizationConfigContainer) AdditionalTokenizationConfigContainerProperty(key string, value json.RawMessage) *_tokenizationConfigContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalTokenizationConfigContainerProperty = tmp + return s +} + +// Indicates BERT tokenization and its options +func (s *_tokenizationConfigContainer) Bert(bert types.NlpBertTokenizationConfigVariant) *_tokenizationConfigContainer { + + s.v.Bert = bert.NlpBertTokenizationConfigCaster() + + return s +} + +// Indicates BERT Japanese tokenization and its options +func (s *_tokenizationConfigContainer) BertJa(bertja types.NlpBertTokenizationConfigVariant) *_tokenizationConfigContainer { + + s.v.BertJa = bertja.NlpBertTokenizationConfigCaster() + + return s +} + +// Indicates MPNET tokenization and its options +func (s *_tokenizationConfigContainer) Mpnet(mpnet types.NlpBertTokenizationConfigVariant) *_tokenizationConfigContainer { + + s.v.Mpnet = mpnet.NlpBertTokenizationConfigCaster() + + return s +} + +// Indicates RoBERTa tokenization and its options +func (s *_tokenizationConfigContainer) Roberta(roberta types.NlpRobertaTokenizationConfigVariant) *_tokenizationConfigContainer { + + s.v.Roberta = roberta.NlpRobertaTokenizationConfigCaster() + + return s +} + +func (s *_tokenizationConfigContainer) TokenizationConfigContainerCaster() *types.TokenizationConfigContainer { + return s.v +} diff --git a/typedapi/esdsl/tokenizer.go b/typedapi/esdsl/tokenizer.go new file mode 100644 index 0000000000..ff59146cdd --- /dev/null +++ b/typedapi/esdsl/tokenizer.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _tokenizer struct { + v types.Tokenizer +} + +func NewTokenizer() *_tokenizer { + return &_tokenizer{v: nil} +} + +func (u *_tokenizer) String(string string) *_tokenizer { + + u.v = &string + + return u +} + +func (u *_tokenizer) TokenizerDefinition(tokenizerdefinition types.TokenizerDefinitionVariant) *_tokenizer { + + u.v = *tokenizerdefinition.TokenizerDefinitionCaster() + + return u +} + +// Interface implementation for TokenizerDefinition in Tokenizer union +func (u *_tokenizerDefinition) TokenizerCaster() *types.Tokenizer { + t := types.Tokenizer(u.v) + return &t +} + +func (u *_tokenizer) TokenizerCaster() *types.Tokenizer { + return &u.v +} diff --git a/typedapi/esdsl/tokenizerdefinition.go b/typedapi/esdsl/tokenizerdefinition.go new file mode 100644 index 0000000000..88475dd982 --- /dev/null +++ b/typedapi/esdsl/tokenizerdefinition.go @@ -0,0 +1,270 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _tokenizerDefinition struct { + v types.TokenizerDefinition +} + +func NewTokenizerDefinition() *_tokenizerDefinition { + return &_tokenizerDefinition{v: nil} +} + +func (u *_tokenizerDefinition) CharGroupTokenizer(chargrouptokenizer types.CharGroupTokenizerVariant) *_tokenizerDefinition { + + u.v = &chargrouptokenizer + + return u +} + +// Interface implementation for CharGroupTokenizer in TokenizerDefinition union +func (u *_charGroupTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) ClassicTokenizer(classictokenizer types.ClassicTokenizerVariant) *_tokenizerDefinition { + + u.v = &classictokenizer + + return u +} + +// Interface implementation for ClassicTokenizer in TokenizerDefinition union +func (u *_classicTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) EdgeNGramTokenizer(edgengramtokenizer types.EdgeNGramTokenizerVariant) *_tokenizerDefinition { + + u.v = &edgengramtokenizer + + return u +} + +// Interface implementation for EdgeNGramTokenizer in TokenizerDefinition union +func (u *_edgeNGramTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) KeywordTokenizer(keywordtokenizer types.KeywordTokenizerVariant) *_tokenizerDefinition { + + u.v = &keywordtokenizer + + return u +} + +// Interface implementation for KeywordTokenizer in TokenizerDefinition union +func (u *_keywordTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) LetterTokenizer(lettertokenizer types.LetterTokenizerVariant) *_tokenizerDefinition { + + u.v = &lettertokenizer + + return u +} + +// Interface implementation for LetterTokenizer in TokenizerDefinition union +func (u *_letterTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) LowercaseTokenizer(lowercasetokenizer types.LowercaseTokenizerVariant) *_tokenizerDefinition { + + u.v = &lowercasetokenizer + + return u +} + +// Interface implementation for LowercaseTokenizer in TokenizerDefinition union +func (u *_lowercaseTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) NGramTokenizer(ngramtokenizer types.NGramTokenizerVariant) *_tokenizerDefinition { + + u.v = &ngramtokenizer + + return u +} + +// Interface implementation for NGramTokenizer in TokenizerDefinition union +func (u *_nGramTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) PathHierarchyTokenizer(pathhierarchytokenizer types.PathHierarchyTokenizerVariant) *_tokenizerDefinition { + + u.v = &pathhierarchytokenizer + + return u +} + +// Interface implementation for PathHierarchyTokenizer in TokenizerDefinition union +func (u *_pathHierarchyTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) PatternTokenizer(patterntokenizer types.PatternTokenizerVariant) *_tokenizerDefinition { + + u.v = &patterntokenizer + + return u +} + +// Interface implementation for PatternTokenizer in TokenizerDefinition union +func (u *_patternTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) SimplePatternTokenizer(simplepatterntokenizer types.SimplePatternTokenizerVariant) *_tokenizerDefinition { + + u.v = &simplepatterntokenizer + + return u +} + +// Interface implementation for SimplePatternTokenizer in TokenizerDefinition union +func (u *_simplePatternTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) SimplePatternSplitTokenizer(simplepatternsplittokenizer types.SimplePatternSplitTokenizerVariant) *_tokenizerDefinition { + + u.v = &simplepatternsplittokenizer + + return u +} + +// Interface implementation for SimplePatternSplitTokenizer in TokenizerDefinition union +func (u *_simplePatternSplitTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) StandardTokenizer(standardtokenizer types.StandardTokenizerVariant) *_tokenizerDefinition { + + u.v = &standardtokenizer + + return u +} + +// Interface implementation for StandardTokenizer in TokenizerDefinition union +func (u *_standardTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) ThaiTokenizer(thaitokenizer types.ThaiTokenizerVariant) *_tokenizerDefinition { + + u.v = &thaitokenizer + + return u +} + +// Interface implementation for ThaiTokenizer in TokenizerDefinition union +func (u *_thaiTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) UaxEmailUrlTokenizer(uaxemailurltokenizer types.UaxEmailUrlTokenizerVariant) *_tokenizerDefinition { + + u.v = &uaxemailurltokenizer + + return u +} + +// Interface implementation for UaxEmailUrlTokenizer in TokenizerDefinition union +func (u *_uaxEmailUrlTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) WhitespaceTokenizer(whitespacetokenizer types.WhitespaceTokenizerVariant) *_tokenizerDefinition { + + u.v = &whitespacetokenizer + + return u +} + +// Interface implementation for WhitespaceTokenizer in TokenizerDefinition union +func (u *_whitespaceTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) IcuTokenizer(icutokenizer types.IcuTokenizerVariant) *_tokenizerDefinition { + + u.v = &icutokenizer + + return u +} + +// Interface implementation for IcuTokenizer in TokenizerDefinition union +func (u *_icuTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) KuromojiTokenizer(kuromojitokenizer types.KuromojiTokenizerVariant) *_tokenizerDefinition { + + u.v = &kuromojitokenizer + + return u +} + +// Interface implementation for KuromojiTokenizer in TokenizerDefinition union +func (u *_kuromojiTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) NoriTokenizer(noritokenizer types.NoriTokenizerVariant) *_tokenizerDefinition { + + u.v = &noritokenizer + + return u +} + +// Interface implementation for NoriTokenizer in TokenizerDefinition union +func (u *_noriTokenizer) TokenizerDefinitionCaster() *types.TokenizerDefinition { + t := types.TokenizerDefinition(u.v) + return &t +} + +func (u *_tokenizerDefinition) TokenizerDefinitionCaster() *types.TokenizerDefinition { + return &u.v +} diff --git a/typedapi/esdsl/tokenpruningconfig.go b/typedapi/esdsl/tokenpruningconfig.go new file mode 100644 index 0000000000..ef91b27226 --- /dev/null +++ b/typedapi/esdsl/tokenpruningconfig.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _tokenPruningConfig struct { + v *types.TokenPruningConfig +} + +// Optional pruning configuration. +// If enabled, this will omit non-significant tokens from the query in order to +// improve query performance. +// This is only used if prune is set to true. +// If prune is set to true but pruning_config is not specified, default values +// will be used. +func NewTokenPruningConfig() *_tokenPruningConfig { + + return &_tokenPruningConfig{v: types.NewTokenPruningConfig()} + +} + +// Whether to only score pruned tokens, vs only scoring kept tokens. +func (s *_tokenPruningConfig) OnlyScorePrunedTokens(onlyscoreprunedtokens bool) *_tokenPruningConfig { + + s.v.OnlyScorePrunedTokens = &onlyscoreprunedtokens + + return s +} + +// Tokens whose frequency is more than this threshold times the average +// frequency of all tokens in the specified field are considered outliers and +// pruned. +func (s *_tokenPruningConfig) TokensFreqRatioThreshold(tokensfreqratiothreshold int) *_tokenPruningConfig { + + s.v.TokensFreqRatioThreshold = &tokensfreqratiothreshold + + return s +} + +// Tokens whose weight is less than this threshold are considered nonsignificant +// and pruned. +func (s *_tokenPruningConfig) TokensWeightThreshold(tokensweightthreshold float32) *_tokenPruningConfig { + + s.v.TokensWeightThreshold = &tokensweightthreshold + + return s +} + +func (s *_tokenPruningConfig) SparseVectorQueryCaster() *types.SparseVectorQuery { + container := types.NewSparseVectorQuery() + + container.PruningConfig = s.v + + return container +} + +func (s *_tokenPruningConfig) TokenPruningConfigCaster() *types.TokenPruningConfig { + return s.v +} diff --git a/typedapi/esdsl/toolcall.go b/typedapi/esdsl/toolcall.go new file mode 100644 index 0000000000..59b6b9923e --- /dev/null +++ b/typedapi/esdsl/toolcall.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _toolCall struct { + v *types.ToolCall +} + +func NewToolCall(function types.ToolCallFunctionVariant, type_ string) *_toolCall { + + tmp := &_toolCall{v: types.NewToolCall()} + + tmp.Function(function) + + tmp.Type(type_) + + return tmp + +} + +// The function that the model called. +func (s *_toolCall) Function(function types.ToolCallFunctionVariant) *_toolCall { + + s.v.Function = *function.ToolCallFunctionCaster() + + return s +} + +// The identifier of the tool call. +func (s *_toolCall) Id(id string) *_toolCall { + + s.v.Id = id + + return s +} + +// The type of the tool call. +func (s *_toolCall) Type(type_ string) *_toolCall { + + s.v.Type = type_ + + return s +} + +func (s *_toolCall) ToolCallCaster() *types.ToolCall { + return s.v +} diff --git a/typedapi/esdsl/toolcallfunction.go b/typedapi/esdsl/toolcallfunction.go new file mode 100644 index 0000000000..b08e97d365 --- /dev/null +++ b/typedapi/esdsl/toolcallfunction.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _toolCallFunction struct { + v *types.ToolCallFunction +} + +func NewToolCallFunction(arguments string, name string) *_toolCallFunction { + + tmp := &_toolCallFunction{v: types.NewToolCallFunction()} + + tmp.Arguments(arguments) + + tmp.Name(name) + + return tmp + +} + +// The arguments to call the function with in JSON format. +func (s *_toolCallFunction) Arguments(arguments string) *_toolCallFunction { + + s.v.Arguments = arguments + + return s +} + +// The name of the function to call. +func (s *_toolCallFunction) Name(name string) *_toolCallFunction { + + s.v.Name = name + + return s +} + +func (s *_toolCallFunction) ToolCallFunctionCaster() *types.ToolCallFunction { + return s.v +} diff --git a/typedapi/esdsl/tophitsaggregation.go b/typedapi/esdsl/tophitsaggregation.go new file mode 100644 index 0000000000..5e2e03d6b2 --- /dev/null +++ b/typedapi/esdsl/tophitsaggregation.go @@ -0,0 +1,203 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _topHitsAggregation struct { + v *types.TopHitsAggregation +} + +// A metric aggregation that returns the top matching documents per bucket. +func NewTopHitsAggregation() *_topHitsAggregation { + + return &_topHitsAggregation{v: types.NewTopHitsAggregation()} + +} + +// Fields for which to return doc values. +func (s *_topHitsAggregation) DocvalueFields(docvaluefields ...types.FieldAndFormatVariant) *_topHitsAggregation { + + for _, v := range docvaluefields { + + s.v.DocvalueFields = append(s.v.DocvalueFields, *v.FieldAndFormatCaster()) + + } + return s +} + +// If `true`, returns detailed information about score computation as part of a +// hit. +func (s *_topHitsAggregation) Explain(explain bool) *_topHitsAggregation { + + s.v.Explain = &explain + + return s +} + +// The field on which to run the aggregation. +func (s *_topHitsAggregation) Field(field string) *_topHitsAggregation { + + s.v.Field = &field + + return s +} + +// Array of wildcard (*) patterns. The request returns values for field names +// matching these patterns in the hits.fields property of the response. +func (s *_topHitsAggregation) Fields(fields ...types.FieldAndFormatVariant) *_topHitsAggregation { + + for _, v := range fields { + + s.v.Fields = append(s.v.Fields, *v.FieldAndFormatCaster()) + + } + return s +} + +// Starting document offset. +func (s *_topHitsAggregation) From(from int) *_topHitsAggregation { + + s.v.From = &from + + return s +} + +// Specifies the highlighter to use for retrieving highlighted snippets from one +// or more fields in the search results. +func (s *_topHitsAggregation) Highlight(highlight types.HighlightVariant) *_topHitsAggregation { + + s.v.Highlight = highlight.HighlightCaster() + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_topHitsAggregation) Missing(missing types.MissingVariant) *_topHitsAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_topHitsAggregation) Script(script types.ScriptVariant) *_topHitsAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +// Returns the result of one or more script evaluations for each hit. +func (s *_topHitsAggregation) ScriptFields(scriptfields map[string]types.ScriptField) *_topHitsAggregation { + + s.v.ScriptFields = scriptfields + return s +} + +func (s *_topHitsAggregation) AddScriptField(key string, value types.ScriptFieldVariant) *_topHitsAggregation { + + var tmp map[string]types.ScriptField + if s.v.ScriptFields == nil { + s.v.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = s.v.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + + s.v.ScriptFields = tmp + return s +} + +// If `true`, returns sequence number and primary term of the last modification +// of each hit. +func (s *_topHitsAggregation) SeqNoPrimaryTerm(seqnoprimaryterm bool) *_topHitsAggregation { + + s.v.SeqNoPrimaryTerm = &seqnoprimaryterm + + return s +} + +// The maximum number of top matching hits to return per bucket. +func (s *_topHitsAggregation) Size(size int) *_topHitsAggregation { + + s.v.Size = &size + + return s +} + +// Sort order of the top matching hits. +// By default, the hits are sorted by the score of the main query. +func (s *_topHitsAggregation) Sort(sorts ...types.SortCombinationsVariant) *_topHitsAggregation { + + for _, v := range sorts { + s.v.Sort = append(s.v.Sort, *v.SortCombinationsCaster()) + } + + return s +} + +// Selects the fields of the source that are returned. +func (s *_topHitsAggregation) Source_(sourceconfig types.SourceConfigVariant) *_topHitsAggregation { + + s.v.Source_ = *sourceconfig.SourceConfigCaster() + + return s +} + +// Returns values for the specified stored fields (fields that use the `store` +// mapping option). +func (s *_topHitsAggregation) StoredFields(fields ...string) *_topHitsAggregation { + + s.v.StoredFields = fields + + return s +} + +// If `true`, calculates and returns document scores, even if the scores are not +// used for sorting. +func (s *_topHitsAggregation) TrackScores(trackscores bool) *_topHitsAggregation { + + s.v.TrackScores = &trackscores + + return s +} + +// If `true`, returns document version as part of a hit. +func (s *_topHitsAggregation) Version(version bool) *_topHitsAggregation { + + s.v.Version = &version + + return s +} + +func (s *_topHitsAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.TopHits = s.v + + return container +} + +func (s *_topHitsAggregation) TopHitsAggregationCaster() *types.TopHitsAggregation { + return s.v +} diff --git a/typedapi/esdsl/topleftbottomrightgeobounds.go b/typedapi/esdsl/topleftbottomrightgeobounds.go new file mode 100644 index 0000000000..8c45c76482 --- /dev/null +++ b/typedapi/esdsl/topleftbottomrightgeobounds.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _topLeftBottomRightGeoBounds struct { + v *types.TopLeftBottomRightGeoBounds +} + +func NewTopLeftBottomRightGeoBounds() *_topLeftBottomRightGeoBounds { + + return &_topLeftBottomRightGeoBounds{v: types.NewTopLeftBottomRightGeoBounds()} + +} + +func (s *_topLeftBottomRightGeoBounds) BottomRight(geolocation types.GeoLocationVariant) *_topLeftBottomRightGeoBounds { + + s.v.BottomRight = *geolocation.GeoLocationCaster() + + return s +} + +func (s *_topLeftBottomRightGeoBounds) TopLeft(geolocation types.GeoLocationVariant) *_topLeftBottomRightGeoBounds { + + s.v.TopLeft = *geolocation.GeoLocationCaster() + + return s +} + +func (s *_topLeftBottomRightGeoBounds) TopLeftBottomRightGeoBoundsCaster() *types.TopLeftBottomRightGeoBounds { + return s.v +} diff --git a/typedapi/esdsl/topmetricsaggregation.go b/typedapi/esdsl/topmetricsaggregation.go new file mode 100644 index 0000000000..d75aaaa339 --- /dev/null +++ b/typedapi/esdsl/topmetricsaggregation.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _topMetricsAggregation struct { + v *types.TopMetricsAggregation +} + +// A metric aggregation that selects metrics from the document with the largest +// or smallest sort value. +func NewTopMetricsAggregation() *_topMetricsAggregation { + + return &_topMetricsAggregation{v: types.NewTopMetricsAggregation()} + +} + +// The field on which to run the aggregation. +func (s *_topMetricsAggregation) Field(field string) *_topMetricsAggregation { + + s.v.Field = &field + + return s +} + +// The fields of the top document to return. +func (s *_topMetricsAggregation) Metrics(metrics ...types.TopMetricsValueVariant) *_topMetricsAggregation { + + s.v.Metrics = make([]types.TopMetricsValue, len(metrics)) + for i, v := range metrics { + s.v.Metrics[i] = *v.TopMetricsValueCaster() + } + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_topMetricsAggregation) Missing(missing types.MissingVariant) *_topMetricsAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_topMetricsAggregation) Script(script types.ScriptVariant) *_topMetricsAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +// The number of top documents from which to return metrics. +func (s *_topMetricsAggregation) Size(size int) *_topMetricsAggregation { + + s.v.Size = &size + + return s +} + +// The sort order of the documents. +func (s *_topMetricsAggregation) Sort(sorts ...types.SortCombinationsVariant) *_topMetricsAggregation { + + for _, v := range sorts { + s.v.Sort = append(s.v.Sort, *v.SortCombinationsCaster()) + } + + return s +} + +func (s *_topMetricsAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.TopMetrics = s.v + + return container +} + +func (s *_topMetricsAggregation) TopMetricsAggregationCaster() *types.TopMetricsAggregation { + return s.v +} diff --git a/typedapi/esdsl/topmetricsvalue.go b/typedapi/esdsl/topmetricsvalue.go new file mode 100644 index 0000000000..5241f3b81c --- /dev/null +++ b/typedapi/esdsl/topmetricsvalue.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _topMetricsValue struct { + v *types.TopMetricsValue +} + +func NewTopMetricsValue() *_topMetricsValue { + + return &_topMetricsValue{v: types.NewTopMetricsValue()} + +} + +// A field to return as a metric. +func (s *_topMetricsValue) Field(field string) *_topMetricsValue { + + s.v.Field = field + + return s +} + +func (s *_topMetricsValue) TopMetricsValueCaster() *types.TopMetricsValue { + return s.v +} diff --git a/typedapi/esdsl/toprightbottomleftgeobounds.go b/typedapi/esdsl/toprightbottomleftgeobounds.go new file mode 100644 index 0000000000..ee58ac71d2 --- /dev/null +++ b/typedapi/esdsl/toprightbottomleftgeobounds.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _topRightBottomLeftGeoBounds struct { + v *types.TopRightBottomLeftGeoBounds +} + +func NewTopRightBottomLeftGeoBounds() *_topRightBottomLeftGeoBounds { + + return &_topRightBottomLeftGeoBounds{v: types.NewTopRightBottomLeftGeoBounds()} + +} + +func (s *_topRightBottomLeftGeoBounds) BottomLeft(geolocation types.GeoLocationVariant) *_topRightBottomLeftGeoBounds { + + s.v.BottomLeft = *geolocation.GeoLocationCaster() + + return s +} + +func (s *_topRightBottomLeftGeoBounds) TopRight(geolocation types.GeoLocationVariant) *_topRightBottomLeftGeoBounds { + + s.v.TopRight = *geolocation.GeoLocationCaster() + + return s +} + +func (s *_topRightBottomLeftGeoBounds) TopRightBottomLeftGeoBoundsCaster() *types.TopRightBottomLeftGeoBounds { + return s.v +} diff --git a/typedapi/esdsl/trackhits.go b/typedapi/esdsl/trackhits.go new file mode 100644 index 0000000000..8f82f815c3 --- /dev/null +++ b/typedapi/esdsl/trackhits.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _trackHits struct { + v types.TrackHits +} + +func NewTrackHits() *_trackHits { + return &_trackHits{v: nil} +} + +func (u *_trackHits) Bool(bool bool) *_trackHits { + + u.v = &bool + + return u +} + +func (u *_trackHits) Int(int int) *_trackHits { + + u.v = &int + + return u +} + +func (u *_trackHits) TrackHitsCaster() *types.TrackHits { + return &u.v +} diff --git a/typedapi/esdsl/trainedmodel.go b/typedapi/esdsl/trainedmodel.go new file mode 100644 index 0000000000..99d279e315 --- /dev/null +++ b/typedapi/esdsl/trainedmodel.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _trainedModel struct { + v *types.TrainedModel +} + +func NewTrainedModel() *_trainedModel { + + return &_trainedModel{v: types.NewTrainedModel()} + +} + +// The definition for an ensemble model +func (s *_trainedModel) Ensemble(ensemble types.EnsembleVariant) *_trainedModel { + + s.v.Ensemble = ensemble.EnsembleCaster() + + return s +} + +// The definition for a binary decision tree. +func (s *_trainedModel) Tree(tree types.TrainedModelTreeVariant) *_trainedModel { + + s.v.Tree = tree.TrainedModelTreeCaster() + + return s +} + +// The definition of a node in a tree. +// There are two major types of nodes: leaf nodes and not-leaf nodes. +// - Leaf nodes only need node_index and leaf_value defined. +// - All other nodes need split_feature, left_child, right_child, threshold, +// decision_type, and default_left defined. +func (s *_trainedModel) TreeNode(treenode types.TrainedModelTreeNodeVariant) *_trainedModel { + + s.v.TreeNode = treenode.TrainedModelTreeNodeCaster() + + return s +} + +func (s *_trainedModel) TrainedModelCaster() *types.TrainedModel { + return s.v +} diff --git a/typedapi/esdsl/trainedmodelprefixstrings.go b/typedapi/esdsl/trainedmodelprefixstrings.go new file mode 100644 index 0000000000..fce74f5cc1 --- /dev/null +++ b/typedapi/esdsl/trainedmodelprefixstrings.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _trainedModelPrefixStrings struct { + v *types.TrainedModelPrefixStrings +} + +func NewTrainedModelPrefixStrings() *_trainedModelPrefixStrings { + + return &_trainedModelPrefixStrings{v: types.NewTrainedModelPrefixStrings()} + +} + +// String prepended to input at ingest +func (s *_trainedModelPrefixStrings) Ingest(ingest string) *_trainedModelPrefixStrings { + + s.v.Ingest = &ingest + + return s +} + +// String prepended to input at search +func (s *_trainedModelPrefixStrings) Search(search string) *_trainedModelPrefixStrings { + + s.v.Search = &search + + return s +} + +func (s *_trainedModelPrefixStrings) TrainedModelPrefixStringsCaster() *types.TrainedModelPrefixStrings { + return s.v +} diff --git a/typedapi/esdsl/trainedmodeltree.go b/typedapi/esdsl/trainedmodeltree.go new file mode 100644 index 0000000000..5e27f5ba1c --- /dev/null +++ b/typedapi/esdsl/trainedmodeltree.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _trainedModelTree struct { + v *types.TrainedModelTree +} + +func NewTrainedModelTree() *_trainedModelTree { + + return &_trainedModelTree{v: types.NewTrainedModelTree()} + +} + +func (s *_trainedModelTree) ClassificationLabels(classificationlabels ...string) *_trainedModelTree { + + for _, v := range classificationlabels { + + s.v.ClassificationLabels = append(s.v.ClassificationLabels, v) + + } + return s +} + +func (s *_trainedModelTree) FeatureNames(featurenames ...string) *_trainedModelTree { + + for _, v := range featurenames { + + s.v.FeatureNames = append(s.v.FeatureNames, v) + + } + return s +} + +func (s *_trainedModelTree) TargetType(targettype string) *_trainedModelTree { + + s.v.TargetType = &targettype + + return s +} + +func (s *_trainedModelTree) TreeStructure(treestructures ...types.TrainedModelTreeNodeVariant) *_trainedModelTree { + + for _, v := range treestructures { + + s.v.TreeStructure = append(s.v.TreeStructure, *v.TrainedModelTreeNodeCaster()) + + } + return s +} + +func (s *_trainedModelTree) TrainedModelTreeCaster() *types.TrainedModelTree { + return s.v +} diff --git a/typedapi/esdsl/trainedmodeltreenode.go b/typedapi/esdsl/trainedmodeltreenode.go new file mode 100644 index 0000000000..0d8f2fb415 --- /dev/null +++ b/typedapi/esdsl/trainedmodeltreenode.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _trainedModelTreeNode struct { + v *types.TrainedModelTreeNode +} + +func NewTrainedModelTreeNode(nodeindex int) *_trainedModelTreeNode { + + tmp := &_trainedModelTreeNode{v: types.NewTrainedModelTreeNode()} + + tmp.NodeIndex(nodeindex) + + return tmp + +} + +func (s *_trainedModelTreeNode) DecisionType(decisiontype string) *_trainedModelTreeNode { + + s.v.DecisionType = &decisiontype + + return s +} + +func (s *_trainedModelTreeNode) DefaultLeft(defaultleft bool) *_trainedModelTreeNode { + + s.v.DefaultLeft = &defaultleft + + return s +} + +func (s *_trainedModelTreeNode) LeafValue(leafvalue types.Float64) *_trainedModelTreeNode { + + s.v.LeafValue = &leafvalue + + return s +} + +func (s *_trainedModelTreeNode) LeftChild(leftchild int) *_trainedModelTreeNode { + + s.v.LeftChild = &leftchild + + return s +} + +func (s *_trainedModelTreeNode) NodeIndex(nodeindex int) *_trainedModelTreeNode { + + s.v.NodeIndex = nodeindex + + return s +} + +func (s *_trainedModelTreeNode) RightChild(rightchild int) *_trainedModelTreeNode { + + s.v.RightChild = &rightchild + + return s +} + +func (s *_trainedModelTreeNode) SplitFeature(splitfeature int) *_trainedModelTreeNode { + + s.v.SplitFeature = &splitfeature + + return s +} + +func (s *_trainedModelTreeNode) SplitGain(splitgain int) *_trainedModelTreeNode { + + s.v.SplitGain = &splitgain + + return s +} + +func (s *_trainedModelTreeNode) Threshold(threshold types.Float64) *_trainedModelTreeNode { + + s.v.Threshold = &threshold + + return s +} + +func (s *_trainedModelTreeNode) TrainedModelTreeNodeCaster() *types.TrainedModelTreeNode { + return s.v +} diff --git a/typedapi/esdsl/transformcontainer.go b/typedapi/esdsl/transformcontainer.go new file mode 100644 index 0000000000..84ccb249b1 --- /dev/null +++ b/typedapi/esdsl/transformcontainer.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _transformContainer struct { + v *types.TransformContainer +} + +func NewTransformContainer() *_transformContainer { + return &_transformContainer{v: types.NewTransformContainer()} +} + +// AdditionalTransformContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_transformContainer) AdditionalTransformContainerProperty(key string, value json.RawMessage) *_transformContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalTransformContainerProperty = tmp + return s +} + +func (s *_transformContainer) Chain(chains ...types.TransformContainerVariant) *_transformContainer { + + for _, v := range chains { + + s.v.Chain = append(s.v.Chain, *v.TransformContainerCaster()) + + } + return s +} + +func (s *_transformContainer) Script(script types.ScriptTransformVariant) *_transformContainer { + + s.v.Script = script.ScriptTransformCaster() + + return s +} + +func (s *_transformContainer) Search(search types.SearchTransformVariant) *_transformContainer { + + s.v.Search = search.SearchTransformCaster() + + return s +} + +func (s *_transformContainer) TransformContainerCaster() *types.TransformContainer { + return s.v +} diff --git a/typedapi/esdsl/transformdestination.go b/typedapi/esdsl/transformdestination.go new file mode 100644 index 0000000000..09737bee7b --- /dev/null +++ b/typedapi/esdsl/transformdestination.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _transformDestination struct { + v *types.TransformDestination +} + +func NewTransformDestination() *_transformDestination { + + return &_transformDestination{v: types.NewTransformDestination()} + +} + +// The destination index for the transform. The mappings of the destination +// index are deduced based on the source +// fields when possible. If alternate mappings are required, use the create +// index API prior to starting the +// transform. +func (s *_transformDestination) Index(indexname string) *_transformDestination { + + s.v.Index = &indexname + + return s +} + +// The unique identifier for an ingest pipeline. +func (s *_transformDestination) Pipeline(pipeline string) *_transformDestination { + + s.v.Pipeline = &pipeline + + return s +} + +func (s *_transformDestination) TransformDestinationCaster() *types.TransformDestination { + return s.v +} diff --git a/typedapi/esdsl/transformsource.go b/typedapi/esdsl/transformsource.go new file mode 100644 index 0000000000..5cfc0f6924 --- /dev/null +++ b/typedapi/esdsl/transformsource.go @@ -0,0 +1,70 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _transformSource struct { + v *types.TransformSource +} + +func NewTransformSource() *_transformSource { + + return &_transformSource{v: types.NewTransformSource()} + +} + +// The source indices for the transform. It can be a single index, an index +// pattern (for example, `"my-index-*""`), an +// array of indices (for example, `["my-index-000001", "my-index-000002"]`), or +// an array of index patterns (for +// example, `["my-index-*", "my-other-index-*"]`. For remote indices use the +// syntax `"remote_name:index_name"`. If +// any indices are in remote clusters then the master node and at least one +// transform node must have the `remote_cluster_client` node role. +func (s *_transformSource) Index(indices ...string) *_transformSource { + + s.v.Index = indices + + return s +} + +// A query clause that retrieves a subset of data from the source index. +func (s *_transformSource) Query(query types.QueryVariant) *_transformSource { + + s.v.Query = query.QueryCaster() + + return s +} + +// Definitions of search-time runtime fields that can be used by the transform. +// For search runtime fields all data +// nodes, including remote nodes, must be 7.12 or later. +func (s *_transformSource) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *_transformSource { + + s.v.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() + + return s +} + +func (s *_transformSource) TransformSourceCaster() *types.TransformSource { + return s.v +} diff --git a/typedapi/esdsl/translog.go b/typedapi/esdsl/translog.go new file mode 100644 index 0000000000..f4d44443cb --- /dev/null +++ b/typedapi/esdsl/translog.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/translogdurability" +) + +type _translog struct { + v *types.Translog +} + +func NewTranslog() *_translog { + + return &_translog{v: types.NewTranslog()} + +} + +// Whether or not to `fsync` and commit the translog after every index, delete, +// update, or bulk request. +func (s *_translog) Durability(durability translogdurability.TranslogDurability) *_translog { + + s.v.Durability = &durability + return s +} + +// The translog stores all operations that are not yet safely persisted in +// Lucene (i.e., are not +// part of a Lucene commit point). Although these operations are available for +// reads, they will need +// to be replayed if the shard was stopped and had to be recovered. This setting +// controls the +// maximum total size of these operations, to prevent recoveries from taking too +// long. Once the +// maximum size has been reached a flush will happen, generating a new Lucene +// commit point. +func (s *_translog) FlushThresholdSize(bytesize types.ByteSizeVariant) *_translog { + + s.v.FlushThresholdSize = *bytesize.ByteSizeCaster() + + return s +} + +func (s *_translog) Retention(retention types.TranslogRetentionVariant) *_translog { + + s.v.Retention = retention.TranslogRetentionCaster() + + return s +} + +// How often the translog is fsynced to disk and committed, regardless of write +// operations. +// Values less than 100ms are not allowed. +func (s *_translog) SyncInterval(duration types.DurationVariant) *_translog { + + s.v.SyncInterval = *duration.DurationCaster() + + return s +} + +func (s *_translog) TranslogCaster() *types.Translog { + return s.v +} diff --git a/typedapi/esdsl/translogretention.go b/typedapi/esdsl/translogretention.go new file mode 100644 index 0000000000..0f62dce079 --- /dev/null +++ b/typedapi/esdsl/translogretention.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _translogRetention struct { + v *types.TranslogRetention +} + +func NewTranslogRetention() *_translogRetention { + + return &_translogRetention{v: types.NewTranslogRetention()} + +} + +// This controls the maximum duration for which translog files are kept by each +// shard. Keeping more +// translog files increases the chance of performing an operation based sync +// when recovering replicas. If +// the translog files are not sufficient, replica recovery will fall back to a +// file based sync. This setting +// is ignored, and should not be set, if soft deletes are enabled. Soft deletes +// are enabled by default in +// indices created in Elasticsearch versions 7.0.0 and later. +func (s *_translogRetention) Age(duration types.DurationVariant) *_translogRetention { + + s.v.Age = *duration.DurationCaster() + + return s +} + +// This controls the total size of translog files to keep for each shard. +// Keeping more translog files increases +// the chance of performing an operation based sync when recovering a replica. +// If the translog files are not +// sufficient, replica recovery will fall back to a file based sync. This +// setting is ignored, and should not be +// set, if soft deletes are enabled. Soft deletes are enabled by default in +// indices created in Elasticsearch +// versions 7.0.0 and later. +func (s *_translogRetention) Size(bytesize types.ByteSizeVariant) *_translogRetention { + + s.v.Size = *bytesize.ByteSizeCaster() + + return s +} + +func (s *_translogRetention) TranslogRetentionCaster() *types.TranslogRetention { + return s.v +} diff --git a/typedapi/esdsl/triggercontainer.go b/typedapi/esdsl/triggercontainer.go new file mode 100644 index 0000000000..33d38bd2b1 --- /dev/null +++ b/typedapi/esdsl/triggercontainer.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _triggerContainer struct { + v *types.TriggerContainer +} + +func NewTriggerContainer() *_triggerContainer { + return &_triggerContainer{v: types.NewTriggerContainer()} +} + +// AdditionalTriggerContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_triggerContainer) AdditionalTriggerContainerProperty(key string, value json.RawMessage) *_triggerContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalTriggerContainerProperty = tmp + return s +} + +func (s *_triggerContainer) Schedule(schedule types.ScheduleContainerVariant) *_triggerContainer { + + s.v.Schedule = schedule.ScheduleContainerCaster() + + return s +} + +func (s *_triggerContainer) TriggerContainerCaster() *types.TriggerContainer { + return s.v +} diff --git a/typedapi/esdsl/trimprocessor.go b/typedapi/esdsl/trimprocessor.go new file mode 100644 index 0000000000..1be008089f --- /dev/null +++ b/typedapi/esdsl/trimprocessor.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _trimProcessor struct { + v *types.TrimProcessor +} + +// Trims whitespace from a field. +// If the field is an array of strings, all members of the array will be +// trimmed. +// This only works on leading and trailing whitespace. +func NewTrimProcessor() *_trimProcessor { + + return &_trimProcessor{v: types.NewTrimProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_trimProcessor) Description(description string) *_trimProcessor { + + s.v.Description = &description + + return s +} + +// The string-valued field to trim whitespace from. +func (s *_trimProcessor) Field(field string) *_trimProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_trimProcessor) If(if_ types.ScriptVariant) *_trimProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_trimProcessor) IgnoreFailure(ignorefailure bool) *_trimProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist, the processor quietly exits without +// modifying the document. +func (s *_trimProcessor) IgnoreMissing(ignoremissing bool) *_trimProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_trimProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_trimProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_trimProcessor) Tag(tag string) *_trimProcessor { + + s.v.Tag = &tag + + return s +} + +// The field to assign the trimmed value to. +// By default, the field is updated in-place. +func (s *_trimProcessor) TargetField(field string) *_trimProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_trimProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Trim = s.v + + return container +} + +func (s *_trimProcessor) TrimProcessorCaster() *types.TrimProcessor { + return s.v +} diff --git a/typedapi/esdsl/trimtokenfilter.go b/typedapi/esdsl/trimtokenfilter.go new file mode 100644 index 0000000000..9b93b25c76 --- /dev/null +++ b/typedapi/esdsl/trimtokenfilter.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _trimTokenFilter struct { + v *types.TrimTokenFilter +} + +func NewTrimTokenFilter() *_trimTokenFilter { + + return &_trimTokenFilter{v: types.NewTrimTokenFilter()} + +} + +func (s *_trimTokenFilter) Version(versionstring string) *_trimTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_trimTokenFilter) TrimTokenFilterCaster() *types.TrimTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/truncatetokenfilter.go b/typedapi/esdsl/truncatetokenfilter.go new file mode 100644 index 0000000000..66e4ef8248 --- /dev/null +++ b/typedapi/esdsl/truncatetokenfilter.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _truncateTokenFilter struct { + v *types.TruncateTokenFilter +} + +func NewTruncateTokenFilter() *_truncateTokenFilter { + + return &_truncateTokenFilter{v: types.NewTruncateTokenFilter()} + +} + +func (s *_truncateTokenFilter) Length(length int) *_truncateTokenFilter { + + s.v.Length = &length + + return s +} + +func (s *_truncateTokenFilter) Version(versionstring string) *_truncateTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_truncateTokenFilter) TruncateTokenFilterCaster() *types.TruncateTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/ttestaggregation.go b/typedapi/esdsl/ttestaggregation.go new file mode 100644 index 0000000000..59d6ee9a1f --- /dev/null +++ b/typedapi/esdsl/ttestaggregation.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ttesttype" +) + +type _tTestAggregation struct { + v *types.TTestAggregation +} + +// A metrics aggregation that performs a statistical hypothesis test in which +// the test statistic follows a Student’s t-distribution under the null +// hypothesis on numeric values extracted from the aggregated documents. +func NewTTestAggregation() *_tTestAggregation { + + return &_tTestAggregation{v: types.NewTTestAggregation()} + +} + +// Test population A. +func (s *_tTestAggregation) A(a types.TestPopulationVariant) *_tTestAggregation { + + s.v.A = a.TestPopulationCaster() + + return s +} + +// Test population B. +func (s *_tTestAggregation) B(b types.TestPopulationVariant) *_tTestAggregation { + + s.v.B = b.TestPopulationCaster() + + return s +} + +// The type of test. +func (s *_tTestAggregation) Type(type_ ttesttype.TTestType) *_tTestAggregation { + + s.v.Type = &type_ + return s +} + +func (s *_tTestAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.TTest = s.v + + return container +} + +func (s *_tTestAggregation) TTestAggregationCaster() *types.TTestAggregation { + return s.v +} diff --git a/typedapi/esdsl/turkishanalyzer.go b/typedapi/esdsl/turkishanalyzer.go new file mode 100644 index 0000000000..54c257bdbd --- /dev/null +++ b/typedapi/esdsl/turkishanalyzer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _turkishAnalyzer struct { + v *types.TurkishAnalyzer +} + +func NewTurkishAnalyzer() *_turkishAnalyzer { + + return &_turkishAnalyzer{v: types.NewTurkishAnalyzer()} + +} + +func (s *_turkishAnalyzer) StemExclusion(stemexclusions ...string) *_turkishAnalyzer { + + for _, v := range stemexclusions { + + s.v.StemExclusion = append(s.v.StemExclusion, v) + + } + return s +} + +func (s *_turkishAnalyzer) Stopwords(stopwords ...string) *_turkishAnalyzer { + + s.v.Stopwords = stopwords + + return s +} + +func (s *_turkishAnalyzer) StopwordsPath(stopwordspath string) *_turkishAnalyzer { + + s.v.StopwordsPath = &stopwordspath + + return s +} + +func (s *_turkishAnalyzer) TurkishAnalyzerCaster() *types.TurkishAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/typemapping.go b/typedapi/esdsl/typemapping.go new file mode 100644 index 0000000000..8bbd0f4ea5 --- /dev/null +++ b/typedapi/esdsl/typemapping.go @@ -0,0 +1,189 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/subobjects" +) + +type _typeMapping struct { + v *types.TypeMapping +} + +func NewTypeMapping() *_typeMapping { + + return &_typeMapping{v: types.NewTypeMapping()} + +} + +func (s *_typeMapping) AllField(allfield types.AllFieldVariant) *_typeMapping { + + s.v.AllField = allfield.AllFieldCaster() + + return s +} + +func (s *_typeMapping) DataStreamTimestamp_(datastreamtimestamp_ types.DataStreamTimestampVariant) *_typeMapping { + + s.v.DataStreamTimestamp_ = datastreamtimestamp_.DataStreamTimestampCaster() + + return s +} + +func (s *_typeMapping) DateDetection(datedetection bool) *_typeMapping { + + s.v.DateDetection = &datedetection + + return s +} + +func (s *_typeMapping) Dynamic(dynamic dynamicmapping.DynamicMapping) *_typeMapping { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_typeMapping) DynamicDateFormats(dynamicdateformats ...string) *_typeMapping { + + for _, v := range dynamicdateformats { + + s.v.DynamicDateFormats = append(s.v.DynamicDateFormats, v) + + } + return s +} + +func (s *_typeMapping) DynamicTemplates(dynamictemplates []map[string]types.DynamicTemplate) *_typeMapping { + + s.v.DynamicTemplates = dynamictemplates + + return s +} + +func (s *_typeMapping) Enabled(enabled bool) *_typeMapping { + + s.v.Enabled = &enabled + + return s +} + +func (s *_typeMapping) FieldNames_(fieldnames_ types.FieldNamesFieldVariant) *_typeMapping { + + s.v.FieldNames_ = fieldnames_.FieldNamesFieldCaster() + + return s +} + +func (s *_typeMapping) IndexField(indexfield types.IndexFieldVariant) *_typeMapping { + + s.v.IndexField = indexfield.IndexFieldCaster() + + return s +} + +func (s *_typeMapping) Meta_(metadata types.MetadataVariant) *_typeMapping { + + s.v.Meta_ = *metadata.MetadataCaster() + + return s +} + +func (s *_typeMapping) NumericDetection(numericdetection bool) *_typeMapping { + + s.v.NumericDetection = &numericdetection + + return s +} + +func (s *_typeMapping) Properties(properties map[string]types.Property) *_typeMapping { + + s.v.Properties = properties + return s +} + +func (s *_typeMapping) AddProperty(key string, value types.PropertyVariant) *_typeMapping { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_typeMapping) Routing_(routing_ types.RoutingFieldVariant) *_typeMapping { + + s.v.Routing_ = routing_.RoutingFieldCaster() + + return s +} + +func (s *_typeMapping) Runtime(runtime map[string]types.RuntimeField) *_typeMapping { + + s.v.Runtime = runtime + return s +} + +func (s *_typeMapping) AddRuntime(key string, value types.RuntimeFieldVariant) *_typeMapping { + + var tmp map[string]types.RuntimeField + if s.v.Runtime == nil { + s.v.Runtime = make(map[string]types.RuntimeField) + } else { + tmp = s.v.Runtime + } + + tmp[key] = *value.RuntimeFieldCaster() + + s.v.Runtime = tmp + return s +} + +func (s *_typeMapping) Size_(size_ types.SizeFieldVariant) *_typeMapping { + + s.v.Size_ = size_.SizeFieldCaster() + + return s +} + +func (s *_typeMapping) Source_(source_ types.SourceFieldVariant) *_typeMapping { + + s.v.Source_ = source_.SourceFieldCaster() + + return s +} + +func (s *_typeMapping) Subobjects(subobjects subobjects.Subobjects) *_typeMapping { + + s.v.Subobjects = &subobjects + return s +} + +func (s *_typeMapping) TypeMappingCaster() *types.TypeMapping { + return s.v +} diff --git a/typedapi/esdsl/typequery.go b/typedapi/esdsl/typequery.go new file mode 100644 index 0000000000..98f9e384e3 --- /dev/null +++ b/typedapi/esdsl/typequery.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _typeQuery struct { + v *types.TypeQuery +} + +func NewTypeQuery(value string) *_typeQuery { + + tmp := &_typeQuery{v: types.NewTypeQuery()} + + tmp.Value(value) + + return tmp + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_typeQuery) Boost(boost float32) *_typeQuery { + + s.v.Boost = &boost + + return s +} + +func (s *_typeQuery) QueryName_(queryname_ string) *_typeQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_typeQuery) Value(value string) *_typeQuery { + + s.v.Value = value + + return s +} + +func (s *_typeQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.Type = s.v + + return container +} + +func (s *_typeQuery) TypeQueryCaster() *types.TypeQuery { + return s.v +} diff --git a/typedapi/esdsl/uaxemailurltokenizer.go b/typedapi/esdsl/uaxemailurltokenizer.go new file mode 100644 index 0000000000..e9b3f451a4 --- /dev/null +++ b/typedapi/esdsl/uaxemailurltokenizer.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _uaxEmailUrlTokenizer struct { + v *types.UaxEmailUrlTokenizer +} + +func NewUaxEmailUrlTokenizer() *_uaxEmailUrlTokenizer { + + return &_uaxEmailUrlTokenizer{v: types.NewUaxEmailUrlTokenizer()} + +} + +func (s *_uaxEmailUrlTokenizer) MaxTokenLength(maxtokenlength int) *_uaxEmailUrlTokenizer { + + s.v.MaxTokenLength = &maxtokenlength + + return s +} + +func (s *_uaxEmailUrlTokenizer) Version(versionstring string) *_uaxEmailUrlTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_uaxEmailUrlTokenizer) UaxEmailUrlTokenizerCaster() *types.UaxEmailUrlTokenizer { + return s.v +} diff --git a/typedapi/esdsl/uniquetokenfilter.go b/typedapi/esdsl/uniquetokenfilter.go new file mode 100644 index 0000000000..7404887c53 --- /dev/null +++ b/typedapi/esdsl/uniquetokenfilter.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _uniqueTokenFilter struct { + v *types.UniqueTokenFilter +} + +func NewUniqueTokenFilter() *_uniqueTokenFilter { + + return &_uniqueTokenFilter{v: types.NewUniqueTokenFilter()} + +} + +func (s *_uniqueTokenFilter) OnlyOnSamePosition(onlyonsameposition bool) *_uniqueTokenFilter { + + s.v.OnlyOnSamePosition = &onlyonsameposition + + return s +} + +func (s *_uniqueTokenFilter) Version(versionstring string) *_uniqueTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_uniqueTokenFilter) UniqueTokenFilterCaster() *types.UniqueTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/unsignedlongnumberproperty.go b/typedapi/esdsl/unsignedlongnumberproperty.go new file mode 100644 index 0000000000..98a0103fd3 --- /dev/null +++ b/typedapi/esdsl/unsignedlongnumberproperty.go @@ -0,0 +1,220 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" +) + +type _unsignedLongNumberProperty struct { + v *types.UnsignedLongNumberProperty +} + +func NewUnsignedLongNumberProperty() *_unsignedLongNumberProperty { + + return &_unsignedLongNumberProperty{v: types.NewUnsignedLongNumberProperty()} + +} + +func (s *_unsignedLongNumberProperty) Boost(boost types.Float64) *_unsignedLongNumberProperty { + + s.v.Boost = &boost + + return s +} + +func (s *_unsignedLongNumberProperty) Coerce(coerce bool) *_unsignedLongNumberProperty { + + s.v.Coerce = &coerce + + return s +} + +func (s *_unsignedLongNumberProperty) CopyTo(fields ...string) *_unsignedLongNumberProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_unsignedLongNumberProperty) DocValues(docvalues bool) *_unsignedLongNumberProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_unsignedLongNumberProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_unsignedLongNumberProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_unsignedLongNumberProperty) Fields(fields map[string]types.Property) *_unsignedLongNumberProperty { + + s.v.Fields = fields + return s +} + +func (s *_unsignedLongNumberProperty) AddField(key string, value types.PropertyVariant) *_unsignedLongNumberProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_unsignedLongNumberProperty) IgnoreAbove(ignoreabove int) *_unsignedLongNumberProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +func (s *_unsignedLongNumberProperty) IgnoreMalformed(ignoremalformed bool) *_unsignedLongNumberProperty { + + s.v.IgnoreMalformed = &ignoremalformed + + return s +} + +func (s *_unsignedLongNumberProperty) Index(index bool) *_unsignedLongNumberProperty { + + s.v.Index = &index + + return s +} + +// Metadata about the field. +func (s *_unsignedLongNumberProperty) Meta(meta map[string]string) *_unsignedLongNumberProperty { + + s.v.Meta = meta + return s +} + +func (s *_unsignedLongNumberProperty) AddMeta(key string, value string) *_unsignedLongNumberProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_unsignedLongNumberProperty) NullValue(nullvalue uint64) *_unsignedLongNumberProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_unsignedLongNumberProperty) OnScriptError(onscripterror onscripterror.OnScriptError) *_unsignedLongNumberProperty { + + s.v.OnScriptError = &onscripterror + return s +} + +func (s *_unsignedLongNumberProperty) Properties(properties map[string]types.Property) *_unsignedLongNumberProperty { + + s.v.Properties = properties + return s +} + +func (s *_unsignedLongNumberProperty) AddProperty(key string, value types.PropertyVariant) *_unsignedLongNumberProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_unsignedLongNumberProperty) Script(script types.ScriptVariant) *_unsignedLongNumberProperty { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_unsignedLongNumberProperty) Store(store bool) *_unsignedLongNumberProperty { + + s.v.Store = &store + + return s +} + +func (s *_unsignedLongNumberProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_unsignedLongNumberProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_unsignedLongNumberProperty) TimeSeriesDimension(timeseriesdimension bool) *_unsignedLongNumberProperty { + + s.v.TimeSeriesDimension = ×eriesdimension + + return s +} + +// For internal use by Elastic only. Marks the field as a time series dimension. +// Defaults to false. +func (s *_unsignedLongNumberProperty) TimeSeriesMetric(timeseriesmetric timeseriesmetrictype.TimeSeriesMetricType) *_unsignedLongNumberProperty { + + s.v.TimeSeriesMetric = ×eriesmetric + return s +} + +func (s *_unsignedLongNumberProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_unsignedLongNumberProperty) UnsignedLongNumberPropertyCaster() *types.UnsignedLongNumberProperty { + return s.v +} diff --git a/typedapi/esdsl/untypeddecayfunction.go b/typedapi/esdsl/untypeddecayfunction.go new file mode 100644 index 0000000000..a8780e222d --- /dev/null +++ b/typedapi/esdsl/untypeddecayfunction.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode" +) + +type _untypedDecayFunction struct { + v *types.UntypedDecayFunction +} + +// Function that scores a document with a linear decay, depending on the +// distance of a numeric field value of the document from an origin. +func NewUntypedDecayFunction() *_untypedDecayFunction { + + return &_untypedDecayFunction{v: types.NewUntypedDecayFunction()} + +} + +func (s *_untypedDecayFunction) DecayFunctionBase(decayfunctionbase map[string]types.DecayPlacement) *_untypedDecayFunction { + + s.v.DecayFunctionBase = decayfunctionbase + return s +} + +func (s *_untypedDecayFunction) AddDecayFunctionBase(key string, value types.DecayPlacementVariant) *_untypedDecayFunction { + + var tmp map[string]types.DecayPlacement + if s.v.DecayFunctionBase == nil { + s.v.DecayFunctionBase = make(map[string]types.DecayPlacement) + } else { + tmp = s.v.DecayFunctionBase + } + + tmp[key] = *value.DecayPlacementCaster() + + s.v.DecayFunctionBase = tmp + return s +} + +// Determines how the distance is calculated when a field used for computing the +// decay contains multiple values. +func (s *_untypedDecayFunction) MultiValueMode(multivaluemode multivaluemode.MultiValueMode) *_untypedDecayFunction { + + s.v.MultiValueMode = &multivaluemode + return s +} + +func (s *_untypedDecayFunction) FunctionScoreCaster() *types.FunctionScore { + container := types.NewFunctionScore() + + container.Linear = s.v + + return container +} + +func (s *_untypedDecayFunction) UntypedDecayFunctionCaster() *types.UntypedDecayFunction { + return s.v +} diff --git a/typedapi/esdsl/untypeddistancefeaturequery.go b/typedapi/esdsl/untypeddistancefeaturequery.go new file mode 100644 index 0000000000..de1f4ce992 --- /dev/null +++ b/typedapi/esdsl/untypeddistancefeaturequery.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _untypedDistanceFeatureQuery struct { + v *types.UntypedDistanceFeatureQuery +} + +// Boosts the relevance score of documents closer to a provided origin date or +// point. +// For example, you can use this query to give more weight to documents closer +// to a certain date or location. +func NewUntypedDistanceFeatureQuery(origin json.RawMessage, pivot json.RawMessage) *_untypedDistanceFeatureQuery { + + tmp := &_untypedDistanceFeatureQuery{v: types.NewUntypedDistanceFeatureQuery()} + + tmp.Origin(origin) + + tmp.Pivot(pivot) + + return tmp + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_untypedDistanceFeatureQuery) Boost(boost float32) *_untypedDistanceFeatureQuery { + + s.v.Boost = &boost + + return s +} + +// Name of the field used to calculate distances. This field must meet the +// following criteria: +// be a `date`, `date_nanos` or `geo_point` field; +// have an `index` mapping parameter value of `true`, which is the default; +// have an `doc_values` mapping parameter value of `true`, which is the default. +func (s *_untypedDistanceFeatureQuery) Field(field string) *_untypedDistanceFeatureQuery { + + s.v.Field = field + + return s +} + +// Date or point of origin used to calculate distances. +// If the `field` value is a `date` or `date_nanos` field, the `origin` value +// must be a date. +// Date Math, such as `now-1h`, is supported. +// If the field value is a `geo_point` field, the `origin` value must be a +// geopoint. +func (s *_untypedDistanceFeatureQuery) Origin(origin json.RawMessage) *_untypedDistanceFeatureQuery { + + s.v.Origin = origin + + return s +} + +// Distance from the `origin` at which relevance scores receive half of the +// `boost` value. +// If the `field` value is a `date` or `date_nanos` field, the `pivot` value +// must be a time unit, such as `1h` or `10d`. If the `field` value is a +// `geo_point` field, the `pivot` value must be a distance unit, such as `1km` +// or `12m`. +func (s *_untypedDistanceFeatureQuery) Pivot(pivot json.RawMessage) *_untypedDistanceFeatureQuery { + + s.v.Pivot = pivot + + return s +} + +func (s *_untypedDistanceFeatureQuery) QueryName_(queryname_ string) *_untypedDistanceFeatureQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_untypedDistanceFeatureQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.DistanceFeature = s.v + + return container +} + +func (s *_untypedDistanceFeatureQuery) UntypedDistanceFeatureQueryCaster() *types.UntypedDistanceFeatureQuery { + return s.v +} diff --git a/typedapi/esdsl/untypedrangequery.go b/typedapi/esdsl/untypedrangequery.go new file mode 100644 index 0000000000..816a2d65f1 --- /dev/null +++ b/typedapi/esdsl/untypedrangequery.go @@ -0,0 +1,176 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation" +) + +type _untypedRangeQuery struct { + k string + v *types.UntypedRangeQuery +} + +// Returns documents that contain terms within a provided range. +func NewUntypedRangeQuery(key string) *_untypedRangeQuery { + return &_untypedRangeQuery{ + k: key, + v: types.NewUntypedRangeQuery(), + } +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_untypedRangeQuery) Boost(boost float32) *_untypedRangeQuery { + + s.v.Boost = &boost + + return s +} + +// Date format used to convert `date` values in the query. +func (s *_untypedRangeQuery) Format(dateformat string) *_untypedRangeQuery { + + s.v.Format = &dateformat + + return s +} + +func (s *_untypedRangeQuery) From(from json.RawMessage) *_untypedRangeQuery { + + s.v.From = &from + + return s +} + +// Greater than. +func (s *_untypedRangeQuery) Gt(gt json.RawMessage) *_untypedRangeQuery { + + s.v.Gt = gt + + return s +} + +// Greater than or equal to. +func (s *_untypedRangeQuery) Gte(gte json.RawMessage) *_untypedRangeQuery { + + s.v.Gte = gte + + return s +} + +// Less than. +func (s *_untypedRangeQuery) Lt(lt json.RawMessage) *_untypedRangeQuery { + + s.v.Lt = lt + + return s +} + +// Less than or equal to. +func (s *_untypedRangeQuery) Lte(lte json.RawMessage) *_untypedRangeQuery { + + s.v.Lte = lte + + return s +} + +func (s *_untypedRangeQuery) QueryName_(queryname_ string) *_untypedRangeQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Indicates how the range query matches values for `range` fields. +func (s *_untypedRangeQuery) Relation(relation rangerelation.RangeRelation) *_untypedRangeQuery { + + s.v.Relation = &relation + return s +} + +// Coordinated Universal Time (UTC) offset or IANA time zone used to convert +// `date` values in the query to UTC. +func (s *_untypedRangeQuery) TimeZone(timezone string) *_untypedRangeQuery { + + s.v.TimeZone = &timezone + + return s +} + +func (s *_untypedRangeQuery) To(to json.RawMessage) *_untypedRangeQuery { + + s.v.To = &to + + return s +} + +func (s *_untypedRangeQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.Range = map[string]types.RangeQuery{ + s.k: *s.v, + } + return container +} + +func (s *_untypedRangeQuery) ApiKeyQueryContainerCaster() *types.ApiKeyQueryContainer { + container := types.NewApiKeyQueryContainer() + container.Range = map[string]types.RangeQuery{ + s.k: *s.v, + } + return container +} + +func (s *_untypedRangeQuery) RoleQueryContainerCaster() *types.RoleQueryContainer { + container := types.NewRoleQueryContainer() + container.Range = map[string]types.RangeQuery{ + s.k: *s.v, + } + return container +} + +func (s *_untypedRangeQuery) UserQueryContainerCaster() *types.UserQueryContainer { + container := types.NewUserQueryContainer() + container.Range = map[string]types.RangeQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleUntypedRangeQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleUntypedRangeQuery() *_untypedRangeQuery { + return &_untypedRangeQuery{ + k: "", + v: types.NewUntypedRangeQuery(), + } +} + +func (s *_untypedRangeQuery) UntypedRangeQueryCaster() *types.UntypedRangeQuery { + return s.v.UntypedRangeQueryCaster() +} diff --git a/typedapi/esdsl/updateaction.go b/typedapi/esdsl/updateaction.go new file mode 100644 index 0000000000..836cc07fdd --- /dev/null +++ b/typedapi/esdsl/updateaction.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _updateAction struct { + v *types.UpdateAction +} + +func NewUpdateAction() *_updateAction { + + return &_updateAction{v: types.NewUpdateAction()} + +} + +// If true, the `result` in the response is set to 'noop' when no changes to the +// document occur. +func (s *_updateAction) DetectNoop(detectnoop bool) *_updateAction { + + s.v.DetectNoop = &detectnoop + + return s +} + +// A partial update to an existing document. +func (s *_updateAction) Doc(doc json.RawMessage) *_updateAction { + + s.v.Doc = doc + + return s +} + +// Set to `true` to use the contents of `doc` as the value of `upsert`. +func (s *_updateAction) DocAsUpsert(docasupsert bool) *_updateAction { + + s.v.DocAsUpsert = &docasupsert + + return s +} + +// The script to run to update the document. +func (s *_updateAction) Script(script types.ScriptVariant) *_updateAction { + + s.v.Script = script.ScriptCaster() + + return s +} + +// Set to `true` to run the script whether or not the document exists. +func (s *_updateAction) ScriptedUpsert(scriptedupsert bool) *_updateAction { + + s.v.ScriptedUpsert = &scriptedupsert + + return s +} + +// If `false`, source retrieval is turned off. +// You can also specify a comma-separated list of the fields you want to +// retrieve. +func (s *_updateAction) Source_(sourceconfig types.SourceConfigVariant) *_updateAction { + + s.v.Source_ = *sourceconfig.SourceConfigCaster() + + return s +} + +// If the document does not already exist, the contents of `upsert` are inserted +// as a new document. +// If the document exists, the `script` is run. +func (s *_updateAction) Upsert(upsert json.RawMessage) *_updateAction { + + s.v.Upsert = upsert + + return s +} + +func (s *_updateAction) UpdateActionCaster() *types.UpdateAction { + return s.v +} diff --git a/typedapi/esdsl/updateoperation.go b/typedapi/esdsl/updateoperation.go new file mode 100644 index 0000000000..13b7126eed --- /dev/null +++ b/typedapi/esdsl/updateoperation.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" +) + +type _updateOperation struct { + v *types.UpdateOperation +} + +// Perform a partial document update. +// The following line must contain the partial document and update options. +func NewUpdateOperation() *_updateOperation { + + return &_updateOperation{v: types.NewUpdateOperation()} + +} + +// The document ID. +func (s *_updateOperation) Id_(id string) *_updateOperation { + + s.v.Id_ = &id + + return s +} + +func (s *_updateOperation) IfPrimaryTerm(ifprimaryterm int64) *_updateOperation { + + s.v.IfPrimaryTerm = &ifprimaryterm + + return s +} + +func (s *_updateOperation) IfSeqNo(sequencenumber int64) *_updateOperation { + + s.v.IfSeqNo = &sequencenumber + + return s +} + +// The name of the index or index alias to perform the action on. +func (s *_updateOperation) Index_(indexname string) *_updateOperation { + + s.v.Index_ = &indexname + + return s +} + +// If `true`, the request's actions must target an index alias. +func (s *_updateOperation) RequireAlias(requirealias bool) *_updateOperation { + + s.v.RequireAlias = &requirealias + + return s +} + +// The number of times an update should be retried in the case of a version +// conflict. +func (s *_updateOperation) RetryOnConflict(retryonconflict int) *_updateOperation { + + s.v.RetryOnConflict = &retryonconflict + + return s +} + +// A custom value used to route operations to a specific shard. +func (s *_updateOperation) Routing(routing string) *_updateOperation { + + s.v.Routing = &routing + + return s +} + +func (s *_updateOperation) Version(versionnumber int64) *_updateOperation { + + s.v.Version = &versionnumber + + return s +} + +func (s *_updateOperation) VersionType(versiontype versiontype.VersionType) *_updateOperation { + + s.v.VersionType = &versiontype + return s +} + +func (s *_updateOperation) OperationContainerCaster() *types.OperationContainer { + container := types.NewOperationContainer() + + container.Update = s.v + + return container +} + +func (s *_updateOperation) UpdateOperationCaster() *types.UpdateOperation { + return s.v +} diff --git a/typedapi/esdsl/uppercaseprocessor.go b/typedapi/esdsl/uppercaseprocessor.go new file mode 100644 index 0000000000..95c9c9ead8 --- /dev/null +++ b/typedapi/esdsl/uppercaseprocessor.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _uppercaseProcessor struct { + v *types.UppercaseProcessor +} + +// Converts a string to its uppercase equivalent. +// If the field is an array of strings, all members of the array will be +// converted. +func NewUppercaseProcessor() *_uppercaseProcessor { + + return &_uppercaseProcessor{v: types.NewUppercaseProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_uppercaseProcessor) Description(description string) *_uppercaseProcessor { + + s.v.Description = &description + + return s +} + +// The field to make uppercase. +func (s *_uppercaseProcessor) Field(field string) *_uppercaseProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_uppercaseProcessor) If(if_ types.ScriptVariant) *_uppercaseProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_uppercaseProcessor) IgnoreFailure(ignorefailure bool) *_uppercaseProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist or is `null`, the processor quietly +// exits without modifying the document. +func (s *_uppercaseProcessor) IgnoreMissing(ignoremissing bool) *_uppercaseProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_uppercaseProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_uppercaseProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_uppercaseProcessor) Tag(tag string) *_uppercaseProcessor { + + s.v.Tag = &tag + + return s +} + +// The field to assign the converted value to. +// By default, the field is updated in-place. +func (s *_uppercaseProcessor) TargetField(field string) *_uppercaseProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_uppercaseProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Uppercase = s.v + + return container +} + +func (s *_uppercaseProcessor) UppercaseProcessorCaster() *types.UppercaseProcessor { + return s.v +} diff --git a/typedapi/esdsl/uppercasetokenfilter.go b/typedapi/esdsl/uppercasetokenfilter.go new file mode 100644 index 0000000000..8894aa5876 --- /dev/null +++ b/typedapi/esdsl/uppercasetokenfilter.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _uppercaseTokenFilter struct { + v *types.UppercaseTokenFilter +} + +func NewUppercaseTokenFilter() *_uppercaseTokenFilter { + + return &_uppercaseTokenFilter{v: types.NewUppercaseTokenFilter()} + +} + +func (s *_uppercaseTokenFilter) Version(versionstring string) *_uppercaseTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_uppercaseTokenFilter) UppercaseTokenFilterCaster() *types.UppercaseTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/uripartsprocessor.go b/typedapi/esdsl/uripartsprocessor.go new file mode 100644 index 0000000000..2ed6dfddc5 --- /dev/null +++ b/typedapi/esdsl/uripartsprocessor.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _uriPartsProcessor struct { + v *types.UriPartsProcessor +} + +// Parses a Uniform Resource Identifier (URI) string and extracts its components +// as an object. +// This URI object includes properties for the URI’s domain, path, fragment, +// port, query, scheme, user info, username, and password. +func NewUriPartsProcessor() *_uriPartsProcessor { + + return &_uriPartsProcessor{v: types.NewUriPartsProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_uriPartsProcessor) Description(description string) *_uriPartsProcessor { + + s.v.Description = &description + + return s +} + +// Field containing the URI string. +func (s *_uriPartsProcessor) Field(field string) *_uriPartsProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_uriPartsProcessor) If(if_ types.ScriptVariant) *_uriPartsProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_uriPartsProcessor) IgnoreFailure(ignorefailure bool) *_uriPartsProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist, the processor quietly exits without +// modifying the document. +func (s *_uriPartsProcessor) IgnoreMissing(ignoremissing bool) *_uriPartsProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// If `true`, the processor copies the unparsed URI to +// `.original`. +func (s *_uriPartsProcessor) KeepOriginal(keeporiginal bool) *_uriPartsProcessor { + + s.v.KeepOriginal = &keeporiginal + + return s +} + +// Handle failures for the processor. +func (s *_uriPartsProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_uriPartsProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// If `true`, the processor removes the `field` after parsing the URI string. +// If parsing fails, the processor does not remove the `field`. +func (s *_uriPartsProcessor) RemoveIfSuccessful(removeifsuccessful bool) *_uriPartsProcessor { + + s.v.RemoveIfSuccessful = &removeifsuccessful + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_uriPartsProcessor) Tag(tag string) *_uriPartsProcessor { + + s.v.Tag = &tag + + return s +} + +// Output field for the URI object. +func (s *_uriPartsProcessor) TargetField(field string) *_uriPartsProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_uriPartsProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.UriParts = s.v + + return container +} + +func (s *_uriPartsProcessor) UriPartsProcessorCaster() *types.UriPartsProcessor { + return s.v +} diff --git a/typedapi/esdsl/urldecodeprocessor.go b/typedapi/esdsl/urldecodeprocessor.go new file mode 100644 index 0000000000..a8f04beced --- /dev/null +++ b/typedapi/esdsl/urldecodeprocessor.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _urlDecodeProcessor struct { + v *types.UrlDecodeProcessor +} + +// URL-decodes a string. +// If the field is an array of strings, all members of the array will be +// decoded. +func NewUrlDecodeProcessor() *_urlDecodeProcessor { + + return &_urlDecodeProcessor{v: types.NewUrlDecodeProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_urlDecodeProcessor) Description(description string) *_urlDecodeProcessor { + + s.v.Description = &description + + return s +} + +// The field to decode. +func (s *_urlDecodeProcessor) Field(field string) *_urlDecodeProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_urlDecodeProcessor) If(if_ types.ScriptVariant) *_urlDecodeProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_urlDecodeProcessor) IgnoreFailure(ignorefailure bool) *_urlDecodeProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist or is `null`, the processor quietly +// exits without modifying the document. +func (s *_urlDecodeProcessor) IgnoreMissing(ignoremissing bool) *_urlDecodeProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_urlDecodeProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_urlDecodeProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_urlDecodeProcessor) Tag(tag string) *_urlDecodeProcessor { + + s.v.Tag = &tag + + return s +} + +// The field to assign the converted value to. +// By default, the field is updated in-place. +func (s *_urlDecodeProcessor) TargetField(field string) *_urlDecodeProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_urlDecodeProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.Urldecode = s.v + + return container +} + +func (s *_urlDecodeProcessor) UrlDecodeProcessorCaster() *types.UrlDecodeProcessor { + return s.v +} diff --git a/typedapi/esdsl/useragentprocessor.go b/typedapi/esdsl/useragentprocessor.go new file mode 100644 index 0000000000..6fcc6d36e1 --- /dev/null +++ b/typedapi/esdsl/useragentprocessor.go @@ -0,0 +1,152 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/useragentproperty" +) + +type _userAgentProcessor struct { + v *types.UserAgentProcessor +} + +// The `user_agent` processor extracts details from the user agent string a +// browser sends with its web requests. +// This processor adds this information by default under the `user_agent` field. +func NewUserAgentProcessor() *_userAgentProcessor { + + return &_userAgentProcessor{v: types.NewUserAgentProcessor()} + +} + +// Description of the processor. +// Useful for describing the purpose of the processor or its configuration. +func (s *_userAgentProcessor) Description(description string) *_userAgentProcessor { + + s.v.Description = &description + + return s +} + +// Extracts device type from the user agent string on a best-effort basis. +func (s *_userAgentProcessor) ExtractDeviceType(extractdevicetype bool) *_userAgentProcessor { + + s.v.ExtractDeviceType = &extractdevicetype + + return s +} + +// The field containing the user agent string. +func (s *_userAgentProcessor) Field(field string) *_userAgentProcessor { + + s.v.Field = field + + return s +} + +// Conditionally execute the processor. +func (s *_userAgentProcessor) If(if_ types.ScriptVariant) *_userAgentProcessor { + + s.v.If = if_.ScriptCaster() + + return s +} + +// Ignore failures for the processor. +func (s *_userAgentProcessor) IgnoreFailure(ignorefailure bool) *_userAgentProcessor { + + s.v.IgnoreFailure = &ignorefailure + + return s +} + +// If `true` and `field` does not exist, the processor quietly exits without +// modifying the document. +func (s *_userAgentProcessor) IgnoreMissing(ignoremissing bool) *_userAgentProcessor { + + s.v.IgnoreMissing = &ignoremissing + + return s +} + +// Handle failures for the processor. +func (s *_userAgentProcessor) OnFailure(onfailures ...types.ProcessorContainerVariant) *_userAgentProcessor { + + for _, v := range onfailures { + + s.v.OnFailure = append(s.v.OnFailure, *v.ProcessorContainerCaster()) + + } + return s +} + +// Controls what properties are added to `target_field`. +func (s *_userAgentProcessor) Properties(properties ...useragentproperty.UserAgentProperty) *_userAgentProcessor { + + for _, v := range properties { + + s.v.Properties = append(s.v.Properties, v) + + } + return s +} + +// The name of the file in the `config/ingest-user-agent` directory containing +// the regular expressions for parsing the user agent string. Both the directory +// and the file have to be created before starting Elasticsearch. If not +// specified, ingest-user-agent will use the `regexes.yaml` from uap-core it +// ships with. +func (s *_userAgentProcessor) RegexFile(regexfile string) *_userAgentProcessor { + + s.v.RegexFile = ®exfile + + return s +} + +// Identifier for the processor. +// Useful for debugging and metrics. +func (s *_userAgentProcessor) Tag(tag string) *_userAgentProcessor { + + s.v.Tag = &tag + + return s +} + +// The field that will be filled with the user agent details. +func (s *_userAgentProcessor) TargetField(field string) *_userAgentProcessor { + + s.v.TargetField = &field + + return s +} + +func (s *_userAgentProcessor) ProcessorContainerCaster() *types.ProcessorContainer { + container := types.NewProcessorContainer() + + container.UserAgent = s.v + + return container +} + +func (s *_userAgentProcessor) UserAgentProcessorCaster() *types.UserAgentProcessor { + return s.v +} diff --git a/typedapi/esdsl/userquerycontainer.go b/typedapi/esdsl/userquerycontainer.go new file mode 100644 index 0000000000..7d933e69bc --- /dev/null +++ b/typedapi/esdsl/userquerycontainer.go @@ -0,0 +1,171 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _userQueryContainer struct { + v *types.UserQueryContainer +} + +func NewUserQueryContainer() *_userQueryContainer { + return &_userQueryContainer{v: types.NewUserQueryContainer()} +} + +// AdditionalUserQueryContainerProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_userQueryContainer) AdditionalUserQueryContainerProperty(key string, value json.RawMessage) *_userQueryContainer { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalUserQueryContainerProperty = tmp + return s +} + +// matches users matching boolean combinations of other queries. +func (s *_userQueryContainer) Bool(bool types.BoolQueryVariant) *_userQueryContainer { + + s.v.Bool = bool.BoolQueryCaster() + + return s +} + +// Returns users that contain an indexed value for a field. +func (s *_userQueryContainer) Exists(exists types.ExistsQueryVariant) *_userQueryContainer { + + s.v.Exists = exists.ExistsQueryCaster() + + return s +} + +// Returns users based on their IDs. +// This query uses the user document IDs stored in the `_id` field. +func (s *_userQueryContainer) Ids(ids types.IdsQueryVariant) *_userQueryContainer { + + s.v.Ids = ids.IdsQueryCaster() + + return s +} + +// Returns users that match a provided text, number, date or boolean value. +// The provided text is analyzed before matching. +// Match is a single key dictionnary. +// It will replace the current value on each call. +func (s *_userQueryContainer) Match(key string, value types.MatchQueryVariant) *_userQueryContainer { + + tmp := make(map[string]types.MatchQuery) + + tmp[key] = *value.MatchQueryCaster() + + s.v.Match = tmp + return s +} + +// Matches all users, giving them all a `_score` of 1.0. +func (s *_userQueryContainer) MatchAll(matchall types.MatchAllQueryVariant) *_userQueryContainer { + + s.v.MatchAll = matchall.MatchAllQueryCaster() + + return s +} + +// Returns users that contain a specific prefix in a provided field. +// Prefix is a single key dictionnary. +// It will replace the current value on each call. +func (s *_userQueryContainer) Prefix(key string, value types.PrefixQueryVariant) *_userQueryContainer { + + tmp := make(map[string]types.PrefixQuery) + + tmp[key] = *value.PrefixQueryCaster() + + s.v.Prefix = tmp + return s +} + +// Returns users that contain terms within a provided range. +// Range is a single key dictionnary. +// It will replace the current value on each call. +func (s *_userQueryContainer) Range(key string, value types.RangeQueryVariant) *_userQueryContainer { + + tmp := make(map[string]types.RangeQuery) + + tmp[key] = *value.RangeQueryCaster() + + s.v.Range = tmp + return s +} + +// Returns users based on a provided query string, using a parser with a limited +// but fault-tolerant syntax. +func (s *_userQueryContainer) SimpleQueryString(simplequerystring types.SimpleQueryStringQueryVariant) *_userQueryContainer { + + s.v.SimpleQueryString = simplequerystring.SimpleQueryStringQueryCaster() + + return s +} + +// Returns users that contain an exact term in a provided field. +// To return a document, the query term must exactly match the queried field's +// value, including whitespace and capitalization. +// Term is a single key dictionnary. +// It will replace the current value on each call. +func (s *_userQueryContainer) Term(key string, value types.TermQueryVariant) *_userQueryContainer { + + tmp := make(map[string]types.TermQuery) + + tmp[key] = *value.TermQueryCaster() + + s.v.Term = tmp + return s +} + +// Returns users that contain one or more exact terms in a provided field. +// To return a document, one or more terms must exactly match a field value, +// including whitespace and capitalization. +func (s *_userQueryContainer) Terms(terms types.TermsQueryVariant) *_userQueryContainer { + + s.v.Terms = terms.TermsQueryCaster() + + return s +} + +// Returns users that contain terms matching a wildcard pattern. +// Wildcard is a single key dictionnary. +// It will replace the current value on each call. +func (s *_userQueryContainer) Wildcard(key string, value types.WildcardQueryVariant) *_userQueryContainer { + + tmp := make(map[string]types.WildcardQuery) + + tmp[key] = *value.WildcardQueryCaster() + + s.v.Wildcard = tmp + return s +} + +func (s *_userQueryContainer) UserQueryContainerCaster() *types.UserQueryContainer { + return s.v +} diff --git a/typedapi/esdsl/validation.go b/typedapi/esdsl/validation.go new file mode 100644 index 0000000000..08f54e0061 --- /dev/null +++ b/typedapi/esdsl/validation.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide all the types that are part of the union. +type _validation struct { + v types.Validation +} + +func NewValidation() *_validation { + return &_validation{v: nil} +} + +func (u *_validation) LessThanValidation(lessthanvalidation types.LessThanValidationVariant) *_validation { + + u.v = &lessthanvalidation + + return u +} + +// Interface implementation for LessThanValidation in Validation union +func (u *_lessThanValidation) ValidationCaster() *types.Validation { + t := types.Validation(u.v) + return &t +} + +func (u *_validation) GreaterThanValidation(greaterthanvalidation types.GreaterThanValidationVariant) *_validation { + + u.v = &greaterthanvalidation + + return u +} + +// Interface implementation for GreaterThanValidation in Validation union +func (u *_greaterThanValidation) ValidationCaster() *types.Validation { + t := types.Validation(u.v) + return &t +} + +func (u *_validation) ListTypeValidation(listtypevalidation types.ListTypeValidationVariant) *_validation { + + u.v = &listtypevalidation + + return u +} + +// Interface implementation for ListTypeValidation in Validation union +func (u *_listTypeValidation) ValidationCaster() *types.Validation { + t := types.Validation(u.v) + return &t +} + +func (u *_validation) IncludedInValidation(includedinvalidation types.IncludedInValidationVariant) *_validation { + + u.v = &includedinvalidation + + return u +} + +// Interface implementation for IncludedInValidation in Validation union +func (u *_includedInValidation) ValidationCaster() *types.Validation { + t := types.Validation(u.v) + return &t +} + +func (u *_validation) RegexValidation(regexvalidation types.RegexValidationVariant) *_validation { + + u.v = ®exvalidation + + return u +} + +// Interface implementation for RegexValidation in Validation union +func (u *_regexValidation) ValidationCaster() *types.Validation { + t := types.Validation(u.v) + return &t +} + +func (u *_validation) ValidationCaster() *types.Validation { + return &u.v +} diff --git a/typedapi/esdsl/valuecountaggregation.go b/typedapi/esdsl/valuecountaggregation.go new file mode 100644 index 0000000000..e7778ab618 --- /dev/null +++ b/typedapi/esdsl/valuecountaggregation.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _valueCountAggregation struct { + v *types.ValueCountAggregation +} + +// A single-value metrics aggregation that counts the number of values that are +// extracted from the aggregated documents. +func NewValueCountAggregation() *_valueCountAggregation { + + return &_valueCountAggregation{v: types.NewValueCountAggregation()} + +} + +// The field on which to run the aggregation. +func (s *_valueCountAggregation) Field(field string) *_valueCountAggregation { + + s.v.Field = &field + + return s +} + +func (s *_valueCountAggregation) Format(format string) *_valueCountAggregation { + + s.v.Format = &format + + return s +} + +// The value to apply to documents that do not have a value. +// By default, documents without a value are ignored. +func (s *_valueCountAggregation) Missing(missing types.MissingVariant) *_valueCountAggregation { + + s.v.Missing = *missing.MissingCaster() + + return s +} + +func (s *_valueCountAggregation) Script(script types.ScriptVariant) *_valueCountAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_valueCountAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.ValueCount = s.v + + return container +} + +func (s *_valueCountAggregation) ApiKeyAggregationContainerCaster() *types.ApiKeyAggregationContainer { + container := types.NewApiKeyAggregationContainer() + + container.ValueCount = s.v + + return container +} + +func (s *_valueCountAggregation) ValueCountAggregationCaster() *types.ValueCountAggregation { + return s.v +} diff --git a/typedapi/esdsl/variablewidthhistogramaggregation.go b/typedapi/esdsl/variablewidthhistogramaggregation.go new file mode 100644 index 0000000000..4b30f31915 --- /dev/null +++ b/typedapi/esdsl/variablewidthhistogramaggregation.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _variableWidthHistogramAggregation struct { + v *types.VariableWidthHistogramAggregation +} + +// A multi-bucket aggregation similar to the histogram, except instead of +// providing an interval to use as the width of each bucket, a target number of +// buckets is provided. +func NewVariableWidthHistogramAggregation() *_variableWidthHistogramAggregation { + + return &_variableWidthHistogramAggregation{v: types.NewVariableWidthHistogramAggregation()} + +} + +// The target number of buckets. +func (s *_variableWidthHistogramAggregation) Buckets(buckets int) *_variableWidthHistogramAggregation { + + s.v.Buckets = &buckets + + return s +} + +// The name of the field. +func (s *_variableWidthHistogramAggregation) Field(field string) *_variableWidthHistogramAggregation { + + s.v.Field = &field + + return s +} + +// Specifies the number of individual documents that will be stored in memory on +// a shard before the initial bucketing algorithm is run. +// Defaults to `min(10 * shard_size, 50000)`. +func (s *_variableWidthHistogramAggregation) InitialBuffer(initialbuffer int) *_variableWidthHistogramAggregation { + + s.v.InitialBuffer = &initialbuffer + + return s +} + +func (s *_variableWidthHistogramAggregation) Script(script types.ScriptVariant) *_variableWidthHistogramAggregation { + + s.v.Script = script.ScriptCaster() + + return s +} + +// The number of buckets that the coordinating node will request from each +// shard. +// Defaults to `buckets * 50`. +func (s *_variableWidthHistogramAggregation) ShardSize(shardsize int) *_variableWidthHistogramAggregation { + + s.v.ShardSize = &shardsize + + return s +} + +func (s *_variableWidthHistogramAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.VariableWidthHistogram = s.v + + return container +} + +func (s *_variableWidthHistogramAggregation) VariableWidthHistogramAggregationCaster() *types.VariableWidthHistogramAggregation { + return s.v +} diff --git a/typedapi/esdsl/versionproperty.go b/typedapi/esdsl/versionproperty.go new file mode 100644 index 0000000000..ae07803f67 --- /dev/null +++ b/typedapi/esdsl/versionproperty.go @@ -0,0 +1,153 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _versionProperty struct { + v *types.VersionProperty +} + +func NewVersionProperty() *_versionProperty { + + return &_versionProperty{v: types.NewVersionProperty()} + +} + +func (s *_versionProperty) CopyTo(fields ...string) *_versionProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_versionProperty) DocValues(docvalues bool) *_versionProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_versionProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_versionProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_versionProperty) Fields(fields map[string]types.Property) *_versionProperty { + + s.v.Fields = fields + return s +} + +func (s *_versionProperty) AddField(key string, value types.PropertyVariant) *_versionProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_versionProperty) IgnoreAbove(ignoreabove int) *_versionProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +// Metadata about the field. +func (s *_versionProperty) Meta(meta map[string]string) *_versionProperty { + + s.v.Meta = meta + return s +} + +func (s *_versionProperty) AddMeta(key string, value string) *_versionProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_versionProperty) Properties(properties map[string]types.Property) *_versionProperty { + + s.v.Properties = properties + return s +} + +func (s *_versionProperty) AddProperty(key string, value types.PropertyVariant) *_versionProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_versionProperty) Store(store bool) *_versionProperty { + + s.v.Store = &store + + return s +} + +func (s *_versionProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_versionProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_versionProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_versionProperty) VersionPropertyCaster() *types.VersionProperty { + return s.v +} diff --git a/typedapi/esdsl/vertexdefinition.go b/typedapi/esdsl/vertexdefinition.go new file mode 100644 index 0000000000..2276a66dcf --- /dev/null +++ b/typedapi/esdsl/vertexdefinition.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _vertexDefinition struct { + v *types.VertexDefinition +} + +func NewVertexDefinition() *_vertexDefinition { + + return &_vertexDefinition{v: types.NewVertexDefinition()} + +} + +// Prevents the specified terms from being included in the results. +func (s *_vertexDefinition) Exclude(excludes ...string) *_vertexDefinition { + + for _, v := range excludes { + + s.v.Exclude = append(s.v.Exclude, v) + + } + return s +} + +// Identifies a field in the documents of interest. +func (s *_vertexDefinition) Field(field string) *_vertexDefinition { + + s.v.Field = field + + return s +} + +// Identifies the terms of interest that form the starting points from which you +// want to spider out. +func (s *_vertexDefinition) Include(includes ...types.VertexIncludeVariant) *_vertexDefinition { + + for _, v := range includes { + + s.v.Include = append(s.v.Include, *v.VertexIncludeCaster()) + + } + return s +} + +// Specifies how many documents must contain a pair of terms before it is +// considered to be a useful connection. +// This setting acts as a certainty threshold. +func (s *_vertexDefinition) MinDocCount(mindoccount int64) *_vertexDefinition { + + s.v.MinDocCount = &mindoccount + + return s +} + +// Controls how many documents on a particular shard have to contain a pair of +// terms before the connection is returned for global consideration. +func (s *_vertexDefinition) ShardMinDocCount(shardmindoccount int64) *_vertexDefinition { + + s.v.ShardMinDocCount = &shardmindoccount + + return s +} + +// Specifies the maximum number of vertex terms returned for each field. +func (s *_vertexDefinition) Size(size int) *_vertexDefinition { + + s.v.Size = &size + + return s +} + +func (s *_vertexDefinition) VertexDefinitionCaster() *types.VertexDefinition { + return s.v +} diff --git a/typedapi/esdsl/vertexinclude.go b/typedapi/esdsl/vertexinclude.go new file mode 100644 index 0000000000..67c566e54b --- /dev/null +++ b/typedapi/esdsl/vertexinclude.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _vertexInclude struct { + v *types.VertexInclude +} + +func NewVertexInclude(term string) *_vertexInclude { + + tmp := &_vertexInclude{v: types.NewVertexInclude()} + + tmp.Term(term) + + return tmp + +} + +func (s *_vertexInclude) Boost(boost types.Float64) *_vertexInclude { + + s.v.Boost = &boost + + return s +} + +func (s *_vertexInclude) Term(term string) *_vertexInclude { + + s.v.Term = term + + return s +} + +func (s *_vertexInclude) VertexIncludeCaster() *types.VertexInclude { + return s.v +} diff --git a/typedapi/esdsl/vocabulary.go b/typedapi/esdsl/vocabulary.go new file mode 100644 index 0000000000..5ebc0acc17 --- /dev/null +++ b/typedapi/esdsl/vocabulary.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _vocabulary struct { + v *types.Vocabulary +} + +func NewVocabulary() *_vocabulary { + + return &_vocabulary{v: types.NewVocabulary()} + +} + +func (s *_vocabulary) Index(indexname string) *_vocabulary { + + s.v.Index = indexname + + return s +} + +func (s *_vocabulary) VocabularyCaster() *types.Vocabulary { + return s.v +} diff --git a/typedapi/esdsl/waitforsnapshotaction.go b/typedapi/esdsl/waitforsnapshotaction.go new file mode 100644 index 0000000000..2d809783bd --- /dev/null +++ b/typedapi/esdsl/waitforsnapshotaction.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _waitForSnapshotAction struct { + v *types.WaitForSnapshotAction +} + +func NewWaitForSnapshotAction(policy string) *_waitForSnapshotAction { + + tmp := &_waitForSnapshotAction{v: types.NewWaitForSnapshotAction()} + + tmp.Policy(policy) + + return tmp + +} + +func (s *_waitForSnapshotAction) Policy(policy string) *_waitForSnapshotAction { + + s.v.Policy = policy + + return s +} + +func (s *_waitForSnapshotAction) WaitForSnapshotActionCaster() *types.WaitForSnapshotAction { + return s.v +} diff --git a/typedapi/esdsl/watch.go b/typedapi/esdsl/watch.go new file mode 100644 index 0000000000..7cdb90c29c --- /dev/null +++ b/typedapi/esdsl/watch.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _watch struct { + v *types.Watch +} + +func NewWatch(condition types.WatcherConditionVariant, input types.WatcherInputVariant, trigger types.TriggerContainerVariant) *_watch { + + tmp := &_watch{v: types.NewWatch()} + + tmp.Condition(condition) + + tmp.Input(input) + + tmp.Trigger(trigger) + + return tmp + +} + +func (s *_watch) Actions(actions map[string]types.WatcherAction) *_watch { + + s.v.Actions = actions + return s +} + +func (s *_watch) AddAction(key string, value types.WatcherActionVariant) *_watch { + + var tmp map[string]types.WatcherAction + if s.v.Actions == nil { + s.v.Actions = make(map[string]types.WatcherAction) + } else { + tmp = s.v.Actions + } + + tmp[key] = *value.WatcherActionCaster() + + s.v.Actions = tmp + return s +} + +func (s *_watch) Condition(condition types.WatcherConditionVariant) *_watch { + + s.v.Condition = *condition.WatcherConditionCaster() + + return s +} + +func (s *_watch) Input(input types.WatcherInputVariant) *_watch { + + s.v.Input = *input.WatcherInputCaster() + + return s +} + +func (s *_watch) Metadata(metadata types.MetadataVariant) *_watch { + + s.v.Metadata = *metadata.MetadataCaster() + + return s +} + +func (s *_watch) Status(status types.WatchStatusVariant) *_watch { + + s.v.Status = status.WatchStatusCaster() + + return s +} + +func (s *_watch) ThrottlePeriod(duration types.DurationVariant) *_watch { + + s.v.ThrottlePeriod = *duration.DurationCaster() + + return s +} + +func (s *_watch) ThrottlePeriodInMillis(durationvalueunitmillis int64) *_watch { + + s.v.ThrottlePeriodInMillis = &durationvalueunitmillis + + return s +} + +func (s *_watch) Transform(transform types.TransformContainerVariant) *_watch { + + s.v.Transform = transform.TransformContainerCaster() + + return s +} + +func (s *_watch) Trigger(trigger types.TriggerContainerVariant) *_watch { + + s.v.Trigger = *trigger.TriggerContainerCaster() + + return s +} + +func (s *_watch) WatchCaster() *types.Watch { + return s.v +} diff --git a/typedapi/esdsl/watcheraction.go b/typedapi/esdsl/watcheraction.go new file mode 100644 index 0000000000..cd8010b62c --- /dev/null +++ b/typedapi/esdsl/watcheraction.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actiontype" +) + +type _watcherAction struct { + v *types.WatcherAction +} + +func NewWatcherAction() *_watcherAction { + + return &_watcherAction{v: types.NewWatcherAction()} + +} + +func (s *_watcherAction) ActionType(actiontype actiontype.ActionType) *_watcherAction { + + s.v.ActionType = &actiontype + return s +} + +func (s *_watcherAction) Condition(condition types.WatcherConditionVariant) *_watcherAction { + + s.v.Condition = condition.WatcherConditionCaster() + + return s +} + +func (s *_watcherAction) Email(email types.EmailActionVariant) *_watcherAction { + + s.v.Email = email.EmailActionCaster() + + return s +} + +func (s *_watcherAction) Foreach(foreach string) *_watcherAction { + + s.v.Foreach = &foreach + + return s +} + +func (s *_watcherAction) Index(index types.IndexActionVariant) *_watcherAction { + + s.v.Index = index.IndexActionCaster() + + return s +} + +func (s *_watcherAction) Logging(logging types.LoggingActionVariant) *_watcherAction { + + s.v.Logging = logging.LoggingActionCaster() + + return s +} + +func (s *_watcherAction) MaxIterations(maxiterations int) *_watcherAction { + + s.v.MaxIterations = &maxiterations + + return s +} + +func (s *_watcherAction) Name(name string) *_watcherAction { + + s.v.Name = &name + + return s +} + +func (s *_watcherAction) Pagerduty(pagerduty types.PagerDutyActionVariant) *_watcherAction { + + s.v.Pagerduty = pagerduty.PagerDutyActionCaster() + + return s +} + +func (s *_watcherAction) Slack(slack types.SlackActionVariant) *_watcherAction { + + s.v.Slack = slack.SlackActionCaster() + + return s +} + +func (s *_watcherAction) ThrottlePeriod(duration types.DurationVariant) *_watcherAction { + + s.v.ThrottlePeriod = *duration.DurationCaster() + + return s +} + +func (s *_watcherAction) ThrottlePeriodInMillis(durationvalueunitmillis int64) *_watcherAction { + + s.v.ThrottlePeriodInMillis = &durationvalueunitmillis + + return s +} + +func (s *_watcherAction) Transform(transform types.TransformContainerVariant) *_watcherAction { + + s.v.Transform = transform.TransformContainerCaster() + + return s +} + +func (s *_watcherAction) Webhook(webhook types.WebhookActionVariant) *_watcherAction { + + s.v.Webhook = webhook.WebhookActionCaster() + + return s +} + +func (s *_watcherAction) WatcherActionCaster() *types.WatcherAction { + return s.v +} diff --git a/typedapi/esdsl/watchercondition.go b/typedapi/esdsl/watchercondition.go new file mode 100644 index 0000000000..99a8cdee4f --- /dev/null +++ b/typedapi/esdsl/watchercondition.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _watcherCondition struct { + v *types.WatcherCondition +} + +func NewWatcherCondition() *_watcherCondition { + return &_watcherCondition{v: types.NewWatcherCondition()} +} + +// AdditionalWatcherConditionProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_watcherCondition) AdditionalWatcherConditionProperty(key string, value json.RawMessage) *_watcherCondition { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalWatcherConditionProperty = tmp + return s +} + +func (s *_watcherCondition) Always(always types.AlwaysConditionVariant) *_watcherCondition { + + s.v.Always = always.AlwaysConditionCaster() + + return s +} + +// ArrayCompare is a single key dictionnary. +// It will replace the current value on each call. +func (s *_watcherCondition) ArrayCompare(key string, value types.ArrayCompareConditionVariant) *_watcherCondition { + + tmp := make(map[string]types.ArrayCompareCondition) + + tmp[key] = *value.ArrayCompareConditionCaster() + + s.v.ArrayCompare = tmp + return s +} + +// + +func (s *_watcherCondition) Never(never types.NeverConditionVariant) *_watcherCondition { + + s.v.Never = never.NeverConditionCaster() + + return s +} + +func (s *_watcherCondition) Script(script types.ScriptConditionVariant) *_watcherCondition { + + s.v.Script = script.ScriptConditionCaster() + + return s +} + +func (s *_watcherCondition) WatcherConditionCaster() *types.WatcherCondition { + return s.v +} diff --git a/typedapi/esdsl/watcherinput.go b/typedapi/esdsl/watcherinput.go new file mode 100644 index 0000000000..1972ce64d5 --- /dev/null +++ b/typedapi/esdsl/watcherinput.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +type _watcherInput struct { + v *types.WatcherInput +} + +func NewWatcherInput() *_watcherInput { + return &_watcherInput{v: types.NewWatcherInput()} +} + +// AdditionalWatcherInputProperty is a single key dictionnary. +// It will replace the current value on each call. +func (s *_watcherInput) AdditionalWatcherInputProperty(key string, value json.RawMessage) *_watcherInput { + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + s.v.AdditionalWatcherInputProperty = tmp + return s +} + +func (s *_watcherInput) Chain(chain types.ChainInputVariant) *_watcherInput { + + s.v.Chain = chain.ChainInputCaster() + + return s +} + +func (s *_watcherInput) Http(http types.HttpInputVariant) *_watcherInput { + + s.v.Http = http.HttpInputCaster() + + return s +} + +func (s *_watcherInput) Search(search types.SearchInputVariant) *_watcherInput { + + s.v.Search = search.SearchInputCaster() + + return s +} + +func (s *_watcherInput) Simple(simple map[string]json.RawMessage) *_watcherInput { + + s.v.Simple = simple + return s +} + +func (s *_watcherInput) AddSimple(key string, value json.RawMessage) *_watcherInput { + + var tmp map[string]json.RawMessage + if s.v.Simple == nil { + s.v.Simple = make(map[string]json.RawMessage) + } else { + tmp = s.v.Simple + } + + tmp[key] = value + + s.v.Simple = tmp + return s +} + +func (s *_watcherInput) WatcherInputCaster() *types.WatcherInput { + return s.v +} diff --git a/typedapi/esdsl/watcherstatusactions.go b/typedapi/esdsl/watcherstatusactions.go new file mode 100644 index 0000000000..d7d387d82c --- /dev/null +++ b/typedapi/esdsl/watcherstatusactions.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +// This is provide an API for type alias. +type _watcherStatusActions struct { + v types.WatcherStatusActions +} + +func NewWatcherStatusActions(watcherstatusactions map[string]types.ActionStatusVariant) *_watcherStatusActions { + return &_watcherStatusActions{v: make(map[string]types.ActionStatus, 0)} +} + +func (u *_watcherStatusActions) WatcherStatusActionsCaster() *types.WatcherStatusActions { + return &u.v +} diff --git a/typedapi/esdsl/watchstatus.go b/typedapi/esdsl/watchstatus.go new file mode 100644 index 0000000000..e1cf23cb65 --- /dev/null +++ b/typedapi/esdsl/watchstatus.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _watchStatus struct { + v *types.WatchStatus +} + +func NewWatchStatus(state types.ActivationStateVariant) *_watchStatus { + + tmp := &_watchStatus{v: types.NewWatchStatus()} + + tmp.State(state) + + return tmp + +} + +func (s *_watchStatus) Actions(watcherstatusactions types.WatcherStatusActionsVariant) *_watchStatus { + + s.v.Actions = *watcherstatusactions.WatcherStatusActionsCaster() + + return s +} + +func (s *_watchStatus) ExecutionState(executionstate string) *_watchStatus { + + s.v.ExecutionState = &executionstate + + return s +} + +func (s *_watchStatus) LastChecked(datetime types.DateTimeVariant) *_watchStatus { + + s.v.LastChecked = *datetime.DateTimeCaster() + + return s +} + +func (s *_watchStatus) LastMetCondition(datetime types.DateTimeVariant) *_watchStatus { + + s.v.LastMetCondition = *datetime.DateTimeCaster() + + return s +} + +func (s *_watchStatus) State(state types.ActivationStateVariant) *_watchStatus { + + s.v.State = *state.ActivationStateCaster() + + return s +} + +func (s *_watchStatus) Version(versionnumber int64) *_watchStatus { + + s.v.Version = versionnumber + + return s +} + +func (s *_watchStatus) WatchStatusCaster() *types.WatchStatus { + return s.v +} diff --git a/typedapi/esdsl/watsonxservicesettings.go b/typedapi/esdsl/watsonxservicesettings.go new file mode 100644 index 0000000000..ddbfc80acf --- /dev/null +++ b/typedapi/esdsl/watsonxservicesettings.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _watsonxServiceSettings struct { + v *types.WatsonxServiceSettings +} + +func NewWatsonxServiceSettings(apikey string, apiversion string, modelid string, projectid string, url string) *_watsonxServiceSettings { + + tmp := &_watsonxServiceSettings{v: types.NewWatsonxServiceSettings()} + + tmp.ApiKey(apikey) + + tmp.ApiVersion(apiversion) + + tmp.ModelId(modelid) + + tmp.ProjectId(projectid) + + tmp.Url(url) + + return tmp + +} + +// A valid API key of your Watsonx account. +// You can find your Watsonx API keys or you can create a new one on the API +// keys page. +// +// IMPORTANT: You need to provide the API key only once, during the inference +// model creation. +// The get inference endpoint API does not retrieve your API key. +// After creating the inference model, you cannot change the associated API key. +// If you want to use a different API key, delete the inference model and +// recreate it with the same name and the updated API key. +func (s *_watsonxServiceSettings) ApiKey(apikey string) *_watsonxServiceSettings { + + s.v.ApiKey = apikey + + return s +} + +// A version parameter that takes a version date in the format of `YYYY-MM-DD`. +// For the active version data parameters, refer to the Wastonx documentation. +func (s *_watsonxServiceSettings) ApiVersion(apiversion string) *_watsonxServiceSettings { + + s.v.ApiVersion = apiversion + + return s +} + +// The name of the model to use for the inference task. +// Refer to the IBM Embedding Models section in the Watsonx documentation for +// the list of available text embedding models. +func (s *_watsonxServiceSettings) ModelId(modelid string) *_watsonxServiceSettings { + + s.v.ModelId = modelid + + return s +} + +// The identifier of the IBM Cloud project to use for the inference task. +func (s *_watsonxServiceSettings) ProjectId(projectid string) *_watsonxServiceSettings { + + s.v.ProjectId = projectid + + return s +} + +// This setting helps to minimize the number of rate limit errors returned from +// Watsonx. +// By default, the `watsonxai` service sets the number of requests allowed per +// minute to 120. +func (s *_watsonxServiceSettings) RateLimit(ratelimit types.RateLimitSettingVariant) *_watsonxServiceSettings { + + s.v.RateLimit = ratelimit.RateLimitSettingCaster() + + return s +} + +// The URL of the inference endpoint that you created on Watsonx. +func (s *_watsonxServiceSettings) Url(url string) *_watsonxServiceSettings { + + s.v.Url = url + + return s +} + +func (s *_watsonxServiceSettings) WatsonxServiceSettingsCaster() *types.WatsonxServiceSettings { + return s.v +} diff --git a/typedapi/esdsl/webhookaction.go b/typedapi/esdsl/webhookaction.go new file mode 100644 index 0000000000..1e3b9b51ea --- /dev/null +++ b/typedapi/esdsl/webhookaction.go @@ -0,0 +1,158 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/connectionscheme" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/httpinputmethod" +) + +type _webhookAction struct { + v *types.WebhookAction +} + +func NewWebhookAction() *_webhookAction { + + return &_webhookAction{v: types.NewWebhookAction()} + +} + +func (s *_webhookAction) Auth(auth types.HttpInputAuthenticationVariant) *_webhookAction { + + s.v.Auth = auth.HttpInputAuthenticationCaster() + + return s +} + +func (s *_webhookAction) Body(body string) *_webhookAction { + + s.v.Body = &body + + return s +} + +func (s *_webhookAction) ConnectionTimeout(duration types.DurationVariant) *_webhookAction { + + s.v.ConnectionTimeout = *duration.DurationCaster() + + return s +} + +func (s *_webhookAction) Headers(headers map[string]string) *_webhookAction { + + s.v.Headers = headers + return s +} + +func (s *_webhookAction) AddHeader(key string, value string) *_webhookAction { + + var tmp map[string]string + if s.v.Headers == nil { + s.v.Headers = make(map[string]string) + } else { + tmp = s.v.Headers + } + + tmp[key] = value + + s.v.Headers = tmp + return s +} + +func (s *_webhookAction) Host(host string) *_webhookAction { + + s.v.Host = &host + + return s +} + +func (s *_webhookAction) Method(method httpinputmethod.HttpInputMethod) *_webhookAction { + + s.v.Method = &method + return s +} + +func (s *_webhookAction) Params(params map[string]string) *_webhookAction { + + s.v.Params = params + return s +} + +func (s *_webhookAction) AddParam(key string, value string) *_webhookAction { + + var tmp map[string]string + if s.v.Params == nil { + s.v.Params = make(map[string]string) + } else { + tmp = s.v.Params + } + + tmp[key] = value + + s.v.Params = tmp + return s +} + +func (s *_webhookAction) Path(path string) *_webhookAction { + + s.v.Path = &path + + return s +} + +func (s *_webhookAction) Port(port uint) *_webhookAction { + + s.v.Port = &port + + return s +} + +func (s *_webhookAction) Proxy(proxy types.HttpInputProxyVariant) *_webhookAction { + + s.v.Proxy = proxy.HttpInputProxyCaster() + + return s +} + +func (s *_webhookAction) ReadTimeout(duration types.DurationVariant) *_webhookAction { + + s.v.ReadTimeout = *duration.DurationCaster() + + return s +} + +func (s *_webhookAction) Scheme(scheme connectionscheme.ConnectionScheme) *_webhookAction { + + s.v.Scheme = &scheme + return s +} + +func (s *_webhookAction) Url(url string) *_webhookAction { + + s.v.Url = &url + + return s +} + +func (s *_webhookAction) WebhookActionCaster() *types.WebhookAction { + return s.v +} diff --git a/typedapi/esdsl/weightedaverageaggregation.go b/typedapi/esdsl/weightedaverageaggregation.go new file mode 100644 index 0000000000..472be1f0b9 --- /dev/null +++ b/typedapi/esdsl/weightedaverageaggregation.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/valuetype" +) + +type _weightedAverageAggregation struct { + v *types.WeightedAverageAggregation +} + +// A single-value metrics aggregation that computes the weighted average of +// numeric values that are extracted from the aggregated documents. +func NewWeightedAverageAggregation() *_weightedAverageAggregation { + + return &_weightedAverageAggregation{v: types.NewWeightedAverageAggregation()} + +} + +// A numeric response formatter. +func (s *_weightedAverageAggregation) Format(format string) *_weightedAverageAggregation { + + s.v.Format = &format + + return s +} + +// Configuration for the field that provides the values. +func (s *_weightedAverageAggregation) Value(value types.WeightedAverageValueVariant) *_weightedAverageAggregation { + + s.v.Value = value.WeightedAverageValueCaster() + + return s +} + +func (s *_weightedAverageAggregation) ValueType(valuetype valuetype.ValueType) *_weightedAverageAggregation { + + s.v.ValueType = &valuetype + return s +} + +// Configuration for the field or script that provides the weights. +func (s *_weightedAverageAggregation) Weight(weight types.WeightedAverageValueVariant) *_weightedAverageAggregation { + + s.v.Weight = weight.WeightedAverageValueCaster() + + return s +} + +func (s *_weightedAverageAggregation) AggregationsCaster() *types.Aggregations { + container := types.NewAggregations() + + container.WeightedAvg = s.v + + return container +} + +func (s *_weightedAverageAggregation) WeightedAverageAggregationCaster() *types.WeightedAverageAggregation { + return s.v +} diff --git a/typedapi/esdsl/weightedaveragevalue.go b/typedapi/esdsl/weightedaveragevalue.go new file mode 100644 index 0000000000..e43e485d2e --- /dev/null +++ b/typedapi/esdsl/weightedaveragevalue.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _weightedAverageValue struct { + v *types.WeightedAverageValue +} + +func NewWeightedAverageValue() *_weightedAverageValue { + + return &_weightedAverageValue{v: types.NewWeightedAverageValue()} + +} + +// The field from which to extract the values or weights. +func (s *_weightedAverageValue) Field(field string) *_weightedAverageValue { + + s.v.Field = &field + + return s +} + +// A value or weight to use if the field is missing. +func (s *_weightedAverageValue) Missing(missing types.Float64) *_weightedAverageValue { + + s.v.Missing = &missing + + return s +} + +func (s *_weightedAverageValue) Script(script types.ScriptVariant) *_weightedAverageValue { + + s.v.Script = script.ScriptCaster() + + return s +} + +func (s *_weightedAverageValue) WeightedAverageValueCaster() *types.WeightedAverageValue { + return s.v +} diff --git a/typedapi/esdsl/weightedtokensquery.go b/typedapi/esdsl/weightedtokensquery.go new file mode 100644 index 0000000000..0b835a2713 --- /dev/null +++ b/typedapi/esdsl/weightedtokensquery.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _weightedTokensQuery struct { + k string + v *types.WeightedTokensQuery +} + +// Supports returning text_expansion query results by sending in precomputed +// tokens with the query. +func NewWeightedTokensQuery(key string) *_weightedTokensQuery { + return &_weightedTokensQuery{ + k: key, + v: types.NewWeightedTokensQuery(), + } +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_weightedTokensQuery) Boost(boost float32) *_weightedTokensQuery { + + s.v.Boost = &boost + + return s +} + +// Token pruning configurations +func (s *_weightedTokensQuery) PruningConfig(pruningconfig types.TokenPruningConfigVariant) *_weightedTokensQuery { + + s.v.PruningConfig = pruningconfig.TokenPruningConfigCaster() + + return s +} + +func (s *_weightedTokensQuery) QueryName_(queryname_ string) *_weightedTokensQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// The tokens representing this query +func (s *_weightedTokensQuery) Tokens(tokens map[string]float32) *_weightedTokensQuery { + + s.v.Tokens = tokens + return s +} + +func (s *_weightedTokensQuery) AddToken(key string, value float32) *_weightedTokensQuery { + + var tmp map[string]float32 + if s.v.Tokens == nil { + s.v.Tokens = make(map[string]float32) + } else { + tmp = s.v.Tokens + } + + tmp[key] = value + + s.v.Tokens = tmp + return s +} + +func (s *_weightedTokensQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.WeightedTokens = map[string]types.WeightedTokensQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleWeightedTokensQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleWeightedTokensQuery() *_weightedTokensQuery { + return &_weightedTokensQuery{ + k: "", + v: types.NewWeightedTokensQuery(), + } +} + +func (s *_weightedTokensQuery) WeightedTokensQueryCaster() *types.WeightedTokensQuery { + return s.v.WeightedTokensQueryCaster() +} diff --git a/typedapi/esdsl/weights.go b/typedapi/esdsl/weights.go new file mode 100644 index 0000000000..7c32c92b1a --- /dev/null +++ b/typedapi/esdsl/weights.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _weights struct { + v *types.Weights +} + +func NewWeights(weights types.Float64) *_weights { + + tmp := &_weights{v: types.NewWeights()} + + tmp.Weights(weights) + + return tmp + +} + +func (s *_weights) Weights(weights types.Float64) *_weights { + + s.v.Weights = weights + + return s +} + +func (s *_weights) WeightsCaster() *types.Weights { + return s.v +} diff --git a/typedapi/esdsl/whitespaceanalyzer.go b/typedapi/esdsl/whitespaceanalyzer.go new file mode 100644 index 0000000000..b22793e06d --- /dev/null +++ b/typedapi/esdsl/whitespaceanalyzer.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _whitespaceAnalyzer struct { + v *types.WhitespaceAnalyzer +} + +func NewWhitespaceAnalyzer() *_whitespaceAnalyzer { + + return &_whitespaceAnalyzer{v: types.NewWhitespaceAnalyzer()} + +} + +func (s *_whitespaceAnalyzer) Version(versionstring string) *_whitespaceAnalyzer { + + s.v.Version = &versionstring + + return s +} + +func (s *_whitespaceAnalyzer) WhitespaceAnalyzerCaster() *types.WhitespaceAnalyzer { + return s.v +} diff --git a/typedapi/esdsl/whitespacetokenizer.go b/typedapi/esdsl/whitespacetokenizer.go new file mode 100644 index 0000000000..841ace4cb9 --- /dev/null +++ b/typedapi/esdsl/whitespacetokenizer.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _whitespaceTokenizer struct { + v *types.WhitespaceTokenizer +} + +func NewWhitespaceTokenizer() *_whitespaceTokenizer { + + return &_whitespaceTokenizer{v: types.NewWhitespaceTokenizer()} + +} + +func (s *_whitespaceTokenizer) MaxTokenLength(maxtokenlength int) *_whitespaceTokenizer { + + s.v.MaxTokenLength = &maxtokenlength + + return s +} + +func (s *_whitespaceTokenizer) Version(versionstring string) *_whitespaceTokenizer { + + s.v.Version = &versionstring + + return s +} + +func (s *_whitespaceTokenizer) WhitespaceTokenizerCaster() *types.WhitespaceTokenizer { + return s.v +} diff --git a/typedapi/esdsl/wildcardproperty.go b/typedapi/esdsl/wildcardproperty.go new file mode 100644 index 0000000000..c4738aba13 --- /dev/null +++ b/typedapi/esdsl/wildcardproperty.go @@ -0,0 +1,160 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +type _wildcardProperty struct { + v *types.WildcardProperty +} + +func NewWildcardProperty() *_wildcardProperty { + + return &_wildcardProperty{v: types.NewWildcardProperty()} + +} + +func (s *_wildcardProperty) CopyTo(fields ...string) *_wildcardProperty { + + s.v.CopyTo = fields + + return s +} + +func (s *_wildcardProperty) DocValues(docvalues bool) *_wildcardProperty { + + s.v.DocValues = &docvalues + + return s +} + +func (s *_wildcardProperty) Dynamic(dynamic dynamicmapping.DynamicMapping) *_wildcardProperty { + + s.v.Dynamic = &dynamic + return s +} + +func (s *_wildcardProperty) Fields(fields map[string]types.Property) *_wildcardProperty { + + s.v.Fields = fields + return s +} + +func (s *_wildcardProperty) AddField(key string, value types.PropertyVariant) *_wildcardProperty { + + var tmp map[string]types.Property + if s.v.Fields == nil { + s.v.Fields = make(map[string]types.Property) + } else { + tmp = s.v.Fields + } + + tmp[key] = *value.PropertyCaster() + + s.v.Fields = tmp + return s +} + +func (s *_wildcardProperty) IgnoreAbove(ignoreabove int) *_wildcardProperty { + + s.v.IgnoreAbove = &ignoreabove + + return s +} + +// Metadata about the field. +func (s *_wildcardProperty) Meta(meta map[string]string) *_wildcardProperty { + + s.v.Meta = meta + return s +} + +func (s *_wildcardProperty) AddMeta(key string, value string) *_wildcardProperty { + + var tmp map[string]string + if s.v.Meta == nil { + s.v.Meta = make(map[string]string) + } else { + tmp = s.v.Meta + } + + tmp[key] = value + + s.v.Meta = tmp + return s +} + +func (s *_wildcardProperty) NullValue(nullvalue string) *_wildcardProperty { + + s.v.NullValue = &nullvalue + + return s +} + +func (s *_wildcardProperty) Properties(properties map[string]types.Property) *_wildcardProperty { + + s.v.Properties = properties + return s +} + +func (s *_wildcardProperty) AddProperty(key string, value types.PropertyVariant) *_wildcardProperty { + + var tmp map[string]types.Property + if s.v.Properties == nil { + s.v.Properties = make(map[string]types.Property) + } else { + tmp = s.v.Properties + } + + tmp[key] = *value.PropertyCaster() + + s.v.Properties = tmp + return s +} + +func (s *_wildcardProperty) Store(store bool) *_wildcardProperty { + + s.v.Store = &store + + return s +} + +func (s *_wildcardProperty) SyntheticSourceKeep(syntheticsourcekeep syntheticsourcekeepenum.SyntheticSourceKeepEnum) *_wildcardProperty { + + s.v.SyntheticSourceKeep = &syntheticsourcekeep + return s +} + +func (s *_wildcardProperty) DynamicTemplateCaster() *types.DynamicTemplate { + container := types.NewDynamicTemplate() + + container.Mapping = s.v + + return container +} + +func (s *_wildcardProperty) WildcardPropertyCaster() *types.WildcardProperty { + return s.v +} diff --git a/typedapi/esdsl/wildcardquery.go b/typedapi/esdsl/wildcardquery.go new file mode 100644 index 0000000000..175dbbca40 --- /dev/null +++ b/typedapi/esdsl/wildcardquery.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _wildcardQuery struct { + k string + v *types.WildcardQuery +} + +// Returns users that contain terms matching a wildcard pattern. +func NewWildcardQuery(field string, value string) *_wildcardQuery { + tmp := &_wildcardQuery{ + k: field, + v: types.NewWildcardQuery(), + } + + tmp.Value(value) + return tmp +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_wildcardQuery) Boost(boost float32) *_wildcardQuery { + + s.v.Boost = &boost + + return s +} + +// Allows case insensitive matching of the pattern with the indexed field values +// when set to true. Default is false which means the case sensitivity of +// matching depends on the underlying field’s mapping. +func (s *_wildcardQuery) CaseInsensitive(caseinsensitive bool) *_wildcardQuery { + + s.v.CaseInsensitive = &caseinsensitive + + return s +} + +func (s *_wildcardQuery) QueryName_(queryname_ string) *_wildcardQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +// Method used to rewrite the query. +func (s *_wildcardQuery) Rewrite(multitermqueryrewrite string) *_wildcardQuery { + + s.v.Rewrite = &multitermqueryrewrite + + return s +} + +// Wildcard pattern for terms you wish to find in the provided field. Required, +// when wildcard is not set. +func (s *_wildcardQuery) Value(value string) *_wildcardQuery { + + s.v.Value = &value + + return s +} + +// Wildcard pattern for terms you wish to find in the provided field. Required, +// when value is not set. +func (s *_wildcardQuery) Wildcard(wildcard string) *_wildcardQuery { + + s.v.Wildcard = &wildcard + + return s +} + +func (s *_wildcardQuery) QueryCaster() *types.Query { + container := types.NewQuery() + container.Wildcard = map[string]types.WildcardQuery{ + s.k: *s.v, + } + return container +} + +func (s *_wildcardQuery) ApiKeyQueryContainerCaster() *types.ApiKeyQueryContainer { + container := types.NewApiKeyQueryContainer() + container.Wildcard = map[string]types.WildcardQuery{ + s.k: *s.v, + } + return container +} + +func (s *_wildcardQuery) RoleQueryContainerCaster() *types.RoleQueryContainer { + container := types.NewRoleQueryContainer() + container.Wildcard = map[string]types.WildcardQuery{ + s.k: *s.v, + } + return container +} + +func (s *_wildcardQuery) UserQueryContainerCaster() *types.UserQueryContainer { + container := types.NewUserQueryContainer() + container.Wildcard = map[string]types.WildcardQuery{ + s.k: *s.v, + } + return container +} + +// NewSingleWildcardQuery should be used when you want to +// create a single key dictionary without specifying the key in the +// constructor. Usually key is already defined within the parent container. +func NewSingleWildcardQuery() *_wildcardQuery { + return &_wildcardQuery{ + k: "", + v: types.NewWildcardQuery(), + } +} + +func (s *_wildcardQuery) WildcardQueryCaster() *types.WildcardQuery { + return s.v.WildcardQueryCaster() +} diff --git a/typedapi/esdsl/wktgeobounds.go b/typedapi/esdsl/wktgeobounds.go new file mode 100644 index 0000000000..ee4c65e7f4 --- /dev/null +++ b/typedapi/esdsl/wktgeobounds.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _wktGeoBounds struct { + v *types.WktGeoBounds +} + +func NewWktGeoBounds(wkt string) *_wktGeoBounds { + + tmp := &_wktGeoBounds{v: types.NewWktGeoBounds()} + + tmp.Wkt(wkt) + + return tmp + +} + +func (s *_wktGeoBounds) Wkt(wkt string) *_wktGeoBounds { + + s.v.Wkt = wkt + + return s +} + +func (s *_wktGeoBounds) WktGeoBoundsCaster() *types.WktGeoBounds { + return s.v +} diff --git a/typedapi/esdsl/worddelimitergraphtokenfilter.go b/typedapi/esdsl/worddelimitergraphtokenfilter.go new file mode 100644 index 0000000000..c11b9d5143 --- /dev/null +++ b/typedapi/esdsl/worddelimitergraphtokenfilter.go @@ -0,0 +1,155 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _wordDelimiterGraphTokenFilter struct { + v *types.WordDelimiterGraphTokenFilter +} + +func NewWordDelimiterGraphTokenFilter() *_wordDelimiterGraphTokenFilter { + + return &_wordDelimiterGraphTokenFilter{v: types.NewWordDelimiterGraphTokenFilter()} + +} + +func (s *_wordDelimiterGraphTokenFilter) AdjustOffsets(adjustoffsets bool) *_wordDelimiterGraphTokenFilter { + + s.v.AdjustOffsets = &adjustoffsets + + return s +} + +func (s *_wordDelimiterGraphTokenFilter) CatenateAll(catenateall bool) *_wordDelimiterGraphTokenFilter { + + s.v.CatenateAll = &catenateall + + return s +} + +func (s *_wordDelimiterGraphTokenFilter) CatenateNumbers(catenatenumbers bool) *_wordDelimiterGraphTokenFilter { + + s.v.CatenateNumbers = &catenatenumbers + + return s +} + +func (s *_wordDelimiterGraphTokenFilter) CatenateWords(catenatewords bool) *_wordDelimiterGraphTokenFilter { + + s.v.CatenateWords = &catenatewords + + return s +} + +func (s *_wordDelimiterGraphTokenFilter) GenerateNumberParts(generatenumberparts bool) *_wordDelimiterGraphTokenFilter { + + s.v.GenerateNumberParts = &generatenumberparts + + return s +} + +func (s *_wordDelimiterGraphTokenFilter) GenerateWordParts(generatewordparts bool) *_wordDelimiterGraphTokenFilter { + + s.v.GenerateWordParts = &generatewordparts + + return s +} + +func (s *_wordDelimiterGraphTokenFilter) IgnoreKeywords(ignorekeywords bool) *_wordDelimiterGraphTokenFilter { + + s.v.IgnoreKeywords = &ignorekeywords + + return s +} + +func (s *_wordDelimiterGraphTokenFilter) PreserveOriginal(stringifiedboolean types.StringifiedbooleanVariant) *_wordDelimiterGraphTokenFilter { + + s.v.PreserveOriginal = *stringifiedboolean.StringifiedbooleanCaster() + + return s +} + +func (s *_wordDelimiterGraphTokenFilter) ProtectedWords(protectedwords ...string) *_wordDelimiterGraphTokenFilter { + + for _, v := range protectedwords { + + s.v.ProtectedWords = append(s.v.ProtectedWords, v) + + } + return s +} + +func (s *_wordDelimiterGraphTokenFilter) ProtectedWordsPath(protectedwordspath string) *_wordDelimiterGraphTokenFilter { + + s.v.ProtectedWordsPath = &protectedwordspath + + return s +} + +func (s *_wordDelimiterGraphTokenFilter) SplitOnCaseChange(splitoncasechange bool) *_wordDelimiterGraphTokenFilter { + + s.v.SplitOnCaseChange = &splitoncasechange + + return s +} + +func (s *_wordDelimiterGraphTokenFilter) SplitOnNumerics(splitonnumerics bool) *_wordDelimiterGraphTokenFilter { + + s.v.SplitOnNumerics = &splitonnumerics + + return s +} + +func (s *_wordDelimiterGraphTokenFilter) StemEnglishPossessive(stemenglishpossessive bool) *_wordDelimiterGraphTokenFilter { + + s.v.StemEnglishPossessive = &stemenglishpossessive + + return s +} + +func (s *_wordDelimiterGraphTokenFilter) TypeTable(typetables ...string) *_wordDelimiterGraphTokenFilter { + + for _, v := range typetables { + + s.v.TypeTable = append(s.v.TypeTable, v) + + } + return s +} + +func (s *_wordDelimiterGraphTokenFilter) TypeTablePath(typetablepath string) *_wordDelimiterGraphTokenFilter { + + s.v.TypeTablePath = &typetablepath + + return s +} + +func (s *_wordDelimiterGraphTokenFilter) Version(versionstring string) *_wordDelimiterGraphTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_wordDelimiterGraphTokenFilter) WordDelimiterGraphTokenFilterCaster() *types.WordDelimiterGraphTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/worddelimitertokenfilter.go b/typedapi/esdsl/worddelimitertokenfilter.go new file mode 100644 index 0000000000..e6a8254781 --- /dev/null +++ b/typedapi/esdsl/worddelimitertokenfilter.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _wordDelimiterTokenFilter struct { + v *types.WordDelimiterTokenFilter +} + +func NewWordDelimiterTokenFilter() *_wordDelimiterTokenFilter { + + return &_wordDelimiterTokenFilter{v: types.NewWordDelimiterTokenFilter()} + +} + +func (s *_wordDelimiterTokenFilter) CatenateAll(catenateall bool) *_wordDelimiterTokenFilter { + + s.v.CatenateAll = &catenateall + + return s +} + +func (s *_wordDelimiterTokenFilter) CatenateNumbers(catenatenumbers bool) *_wordDelimiterTokenFilter { + + s.v.CatenateNumbers = &catenatenumbers + + return s +} + +func (s *_wordDelimiterTokenFilter) CatenateWords(catenatewords bool) *_wordDelimiterTokenFilter { + + s.v.CatenateWords = &catenatewords + + return s +} + +func (s *_wordDelimiterTokenFilter) GenerateNumberParts(generatenumberparts bool) *_wordDelimiterTokenFilter { + + s.v.GenerateNumberParts = &generatenumberparts + + return s +} + +func (s *_wordDelimiterTokenFilter) GenerateWordParts(generatewordparts bool) *_wordDelimiterTokenFilter { + + s.v.GenerateWordParts = &generatewordparts + + return s +} + +func (s *_wordDelimiterTokenFilter) PreserveOriginal(stringifiedboolean types.StringifiedbooleanVariant) *_wordDelimiterTokenFilter { + + s.v.PreserveOriginal = *stringifiedboolean.StringifiedbooleanCaster() + + return s +} + +func (s *_wordDelimiterTokenFilter) ProtectedWords(protectedwords ...string) *_wordDelimiterTokenFilter { + + for _, v := range protectedwords { + + s.v.ProtectedWords = append(s.v.ProtectedWords, v) + + } + return s +} + +func (s *_wordDelimiterTokenFilter) ProtectedWordsPath(protectedwordspath string) *_wordDelimiterTokenFilter { + + s.v.ProtectedWordsPath = &protectedwordspath + + return s +} + +func (s *_wordDelimiterTokenFilter) SplitOnCaseChange(splitoncasechange bool) *_wordDelimiterTokenFilter { + + s.v.SplitOnCaseChange = &splitoncasechange + + return s +} + +func (s *_wordDelimiterTokenFilter) SplitOnNumerics(splitonnumerics bool) *_wordDelimiterTokenFilter { + + s.v.SplitOnNumerics = &splitonnumerics + + return s +} + +func (s *_wordDelimiterTokenFilter) StemEnglishPossessive(stemenglishpossessive bool) *_wordDelimiterTokenFilter { + + s.v.StemEnglishPossessive = &stemenglishpossessive + + return s +} + +func (s *_wordDelimiterTokenFilter) TypeTable(typetables ...string) *_wordDelimiterTokenFilter { + + for _, v := range typetables { + + s.v.TypeTable = append(s.v.TypeTable, v) + + } + return s +} + +func (s *_wordDelimiterTokenFilter) TypeTablePath(typetablepath string) *_wordDelimiterTokenFilter { + + s.v.TypeTablePath = &typetablepath + + return s +} + +func (s *_wordDelimiterTokenFilter) Version(versionstring string) *_wordDelimiterTokenFilter { + + s.v.Version = &versionstring + + return s +} + +func (s *_wordDelimiterTokenFilter) WordDelimiterTokenFilterCaster() *types.WordDelimiterTokenFilter { + return s.v +} diff --git a/typedapi/esdsl/wrapperquery.go b/typedapi/esdsl/wrapperquery.go new file mode 100644 index 0000000000..e116fb17ba --- /dev/null +++ b/typedapi/esdsl/wrapperquery.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _wrapperQuery struct { + v *types.WrapperQuery +} + +// A query that accepts any other query as base64 encoded string. +func NewWrapperQuery(query string) *_wrapperQuery { + + tmp := &_wrapperQuery{v: types.NewWrapperQuery()} + + tmp.Query(query) + + return tmp + +} + +// Floating point number used to decrease or increase the relevance scores of +// the query. +// Boost values are relative to the default value of 1.0. +// A boost value between 0 and 1.0 decreases the relevance score. +// A value greater than 1.0 increases the relevance score. +func (s *_wrapperQuery) Boost(boost float32) *_wrapperQuery { + + s.v.Boost = &boost + + return s +} + +// A base64 encoded query. +// The binary data format can be any of JSON, YAML, CBOR or SMILE encodings +func (s *_wrapperQuery) Query(query string) *_wrapperQuery { + + s.v.Query = query + + return s +} + +func (s *_wrapperQuery) QueryName_(queryname_ string) *_wrapperQuery { + + s.v.QueryName_ = &queryname_ + + return s +} + +func (s *_wrapperQuery) QueryCaster() *types.Query { + container := types.NewQuery() + + container.Wrapper = s.v + + return container +} + +func (s *_wrapperQuery) WrapperQueryCaster() *types.WrapperQuery { + return s.v +} diff --git a/typedapi/esdsl/zeroshotclassificationinferenceoptions.go b/typedapi/esdsl/zeroshotclassificationinferenceoptions.go new file mode 100644 index 0000000000..93be356710 --- /dev/null +++ b/typedapi/esdsl/zeroshotclassificationinferenceoptions.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _zeroShotClassificationInferenceOptions struct { + v *types.ZeroShotClassificationInferenceOptions +} + +// Zeroshot classification configuration for inference. +func NewZeroShotClassificationInferenceOptions() *_zeroShotClassificationInferenceOptions { + + return &_zeroShotClassificationInferenceOptions{v: types.NewZeroShotClassificationInferenceOptions()} + +} + +// The zero shot classification labels indicating entailment, neutral, and +// contradiction +// Must contain exactly and only entailment, neutral, and contradiction +func (s *_zeroShotClassificationInferenceOptions) ClassificationLabels(classificationlabels ...string) *_zeroShotClassificationInferenceOptions { + + for _, v := range classificationlabels { + + s.v.ClassificationLabels = append(s.v.ClassificationLabels, v) + + } + return s +} + +// Hypothesis template used when tokenizing labels for prediction +func (s *_zeroShotClassificationInferenceOptions) HypothesisTemplate(hypothesistemplate string) *_zeroShotClassificationInferenceOptions { + + s.v.HypothesisTemplate = &hypothesistemplate + + return s +} + +// The labels to predict. +func (s *_zeroShotClassificationInferenceOptions) Labels(labels ...string) *_zeroShotClassificationInferenceOptions { + + for _, v := range labels { + + s.v.Labels = append(s.v.Labels, v) + + } + return s +} + +// Indicates if more than one true label exists. +func (s *_zeroShotClassificationInferenceOptions) MultiLabel(multilabel bool) *_zeroShotClassificationInferenceOptions { + + s.v.MultiLabel = &multilabel + + return s +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_zeroShotClassificationInferenceOptions) ResultsField(resultsfield string) *_zeroShotClassificationInferenceOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +// The tokenization options to update when inferring +func (s *_zeroShotClassificationInferenceOptions) Tokenization(tokenization types.TokenizationConfigContainerVariant) *_zeroShotClassificationInferenceOptions { + + s.v.Tokenization = tokenization.TokenizationConfigContainerCaster() + + return s +} + +func (s *_zeroShotClassificationInferenceOptions) InferenceConfigCreateContainerCaster() *types.InferenceConfigCreateContainer { + container := types.NewInferenceConfigCreateContainer() + + container.ZeroShotClassification = s.v + + return container +} + +func (s *_zeroShotClassificationInferenceOptions) ZeroShotClassificationInferenceOptionsCaster() *types.ZeroShotClassificationInferenceOptions { + return s.v +} diff --git a/typedapi/esdsl/zeroshotclassificationinferenceupdateoptions.go b/typedapi/esdsl/zeroshotclassificationinferenceupdateoptions.go new file mode 100644 index 0000000000..0d656bf5d2 --- /dev/null +++ b/typedapi/esdsl/zeroshotclassificationinferenceupdateoptions.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package esdsl + +import "github.com/elastic/go-elasticsearch/v8/typedapi/types" + +type _zeroShotClassificationInferenceUpdateOptions struct { + v *types.ZeroShotClassificationInferenceUpdateOptions +} + +// Zeroshot classification configuration for inference. +func NewZeroShotClassificationInferenceUpdateOptions() *_zeroShotClassificationInferenceUpdateOptions { + + return &_zeroShotClassificationInferenceUpdateOptions{v: types.NewZeroShotClassificationInferenceUpdateOptions()} + +} + +// The labels to predict. +func (s *_zeroShotClassificationInferenceUpdateOptions) Labels(labels ...string) *_zeroShotClassificationInferenceUpdateOptions { + + for _, v := range labels { + + s.v.Labels = append(s.v.Labels, v) + + } + return s +} + +// Update the configured multi label option. Indicates if more than one true +// label exists. Defaults to the configured value. +func (s *_zeroShotClassificationInferenceUpdateOptions) MultiLabel(multilabel bool) *_zeroShotClassificationInferenceUpdateOptions { + + s.v.MultiLabel = &multilabel + + return s +} + +// The field that is added to incoming documents to contain the inference +// prediction. Defaults to predicted_value. +func (s *_zeroShotClassificationInferenceUpdateOptions) ResultsField(resultsfield string) *_zeroShotClassificationInferenceUpdateOptions { + + s.v.ResultsField = &resultsfield + + return s +} + +// The tokenization options to update when inferring +func (s *_zeroShotClassificationInferenceUpdateOptions) Tokenization(tokenization types.NlpTokenizationUpdateOptionsVariant) *_zeroShotClassificationInferenceUpdateOptions { + + s.v.Tokenization = tokenization.NlpTokenizationUpdateOptionsCaster() + + return s +} + +func (s *_zeroShotClassificationInferenceUpdateOptions) InferenceConfigUpdateContainerCaster() *types.InferenceConfigUpdateContainer { + container := types.NewInferenceConfigUpdateContainer() + + container.ZeroShotClassification = s.v + + return container +} + +func (s *_zeroShotClassificationInferenceUpdateOptions) ZeroShotClassificationInferenceUpdateOptionsCaster() *types.ZeroShotClassificationInferenceUpdateOptions { + return s.v +} diff --git a/typedapi/esql/asyncquery/async_query.go b/typedapi/esql/asyncquery/async_query.go index a95cc8ba30..159ced132c 100644 --- a/typedapi/esql/asyncquery/async_query.go +++ b/typedapi/esql/asyncquery/async_query.go @@ -16,21 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Executes an ESQL request asynchronously +// Run an async ES|QL query. +// Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its +// progress, and retrieve results when they become available. +// +// The API accepts the same parameters and request body as the synchronous query +// API, along with additional async related properties. package asyncquery import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/esqlformat" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -45,6 +55,10 @@ type AsyncQuery struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -65,14 +79,21 @@ func NewAsyncQueryFunc(tp elastictransport.Interface) NewAsyncQuery { } } -// Executes an ESQL request asynchronously +// Run an async ES|QL query. +// Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its +// progress, and retrieve results when they become available. +// +// The API accepts the same parameters and request body as the synchronous query +// API, along with additional async related properties. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-async-query-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query func New(tp elastictransport.Interface) *AsyncQuery { r := &AsyncQuery{ transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -84,6 +105,21 @@ func New(tp elastictransport.Interface) *AsyncQuery { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *AsyncQuery) Raw(raw io.Reader) *AsyncQuery { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *AsyncQuery) Request(req *Request) *AsyncQuery { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *AsyncQuery) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -93,6 +129,31 @@ func (r *AsyncQuery) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for AsyncQuery: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -180,13 +241,7 @@ func (r AsyncQuery) Perform(providedCtx context.Context) (*http.Response, error) } // Do runs the request through the transport, handle the response and returns a asyncquery.Response -func (r AsyncQuery) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r AsyncQuery) IsSuccess(providedCtx context.Context) (bool, error) { +func (r AsyncQuery) Do(providedCtx context.Context) (Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -197,30 +252,46 @@ func (r AsyncQuery) IsSuccess(providedCtx context.Context) (bool, error) { ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the AsyncQuery query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err } - return false, nil + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the AsyncQuery headers map. @@ -229,3 +300,232 @@ func (r *AsyncQuery) Header(key, value string) *AsyncQuery { return r } + +// Delimiter The character to use between values within a CSV row. +// It is valid only for the CSV format. +// API name: delimiter +func (r *AsyncQuery) Delimiter(delimiter string) *AsyncQuery { + r.values.Set("delimiter", delimiter) + + return r +} + +// DropNullColumns Indicates whether columns that are entirely `null` will be removed from the +// `columns` and `values` portion of the results. +// If `true`, the response will include an extra section under the name +// `all_columns` which has the name of all the columns. +// API name: drop_null_columns +func (r *AsyncQuery) DropNullColumns(dropnullcolumns bool) *AsyncQuery { + r.values.Set("drop_null_columns", strconv.FormatBool(dropnullcolumns)) + + return r +} + +// Format A short version of the Accept header, for example `json` or `yaml`. +// API name: format +func (r *AsyncQuery) Format(format esqlformat.EsqlFormat) *AsyncQuery { + r.values.Set("format", format.String()) + + return r +} + +// KeepAlive The period for which the query and its results are stored in the cluster. +// The default period is five days. +// When this period expires, the query and its results are deleted, even if the +// query is still ongoing. +// If the `keep_on_completion` parameter is false, Elasticsearch only stores +// async queries that do not complete within the period set by the +// `wait_for_completion_timeout` parameter, regardless of this value. +// API name: keep_alive +func (r *AsyncQuery) KeepAlive(duration string) *AsyncQuery { + r.values.Set("keep_alive", duration) + + return r +} + +// KeepOnCompletion Indicates whether the query and its results are stored in the cluster. +// If false, the query and its results are stored in the cluster only if the +// request does not complete during the period set by the +// `wait_for_completion_timeout` parameter. +// API name: keep_on_completion +func (r *AsyncQuery) KeepOnCompletion(keeponcompletion bool) *AsyncQuery { + r.values.Set("keep_on_completion", strconv.FormatBool(keeponcompletion)) + + return r +} + +// WaitForCompletionTimeout The period to wait for the request to finish. +// By default, the request waits for 1 second for the query results. +// If the query completes during this period, results are returned +// Otherwise, a query ID is returned that can later be used to retrieve the +// results. +// API name: wait_for_completion_timeout +func (r *AsyncQuery) WaitForCompletionTimeout(duration string) *AsyncQuery { + r.values.Set("wait_for_completion_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *AsyncQuery) ErrorTrace(errortrace bool) *AsyncQuery { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *AsyncQuery) FilterPath(filterpaths ...string) *AsyncQuery { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *AsyncQuery) Human(human bool) *AsyncQuery { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *AsyncQuery) Pretty(pretty bool) *AsyncQuery { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// By default, ES|QL returns results as rows. For example, FROM returns each +// individual document as one row. For the JSON, YAML, CBOR and smile formats, +// ES|QL can return the results in a columnar fashion where one row represents +// all the values of a certain column in the results. +// API name: columnar +func (r *AsyncQuery) Columnar(columnar bool) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Columnar = &columnar + + return r +} + +// Specify a Query DSL query in the filter parameter to filter the set of +// documents that an ES|QL query runs on. +// API name: filter +func (r *AsyncQuery) Filter(filter types.QueryVariant) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Filter = filter.QueryCaster() + + return r +} + +// When set to `true` and performing a cross-cluster query, the response will +// include an extra `_clusters` +// object with information about the clusters that participated in the search +// along with info such as shards +// count. +// API name: include_ccs_metadata +func (r *AsyncQuery) IncludeCcsMetadata(includeccsmetadata bool) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IncludeCcsMetadata = &includeccsmetadata + + return r +} + +// API name: locale +func (r *AsyncQuery) Locale(locale string) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Locale = &locale + + return r +} + +// To avoid any attempts of hacking or code injection, extract the values in a +// separate list of parameters. Use question mark placeholders (?) in the query +// string for each of the parameters. +// API name: params +func (r *AsyncQuery) Params(params ...types.FieldValueVariant) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range params { + + r.req.Params = append(r.req.Params, *v.FieldValueCaster()) + + } + return r +} + +// If provided and `true` the response will include an extra `profile` object +// with information on how the query was executed. This information is for human +// debugging +// and its format can change at any time but it can give some insight into the +// performance +// of each part of the query. +// API name: profile +func (r *AsyncQuery) Profile(profile bool) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Profile = &profile + + return r +} + +// The ES|QL query API accepts an ES|QL query string in the query parameter, +// runs it, and returns the results. +// API name: query +func (r *AsyncQuery) Query(query string) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query + + return r +} + +// Tables to use with the LOOKUP operation. The top level key is the table +// name and the next level key is the column name. +// API name: tables +func (r *AsyncQuery) Tables(tables map[string]map[string]types.TableValuesContainer) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Tables = tables + return r +} diff --git a/typedapi/esql/asyncquery/request.go b/typedapi/esql/asyncquery/request.go new file mode 100644 index 0000000000..4c76f4a312 --- /dev/null +++ b/typedapi/esql/asyncquery/request.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package asyncquery + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package asyncquery +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/esql/async_query/AsyncQueryRequest.ts#L28-L125 +type Request struct { + + // Columnar By default, ES|QL returns results as rows. For example, FROM returns each + // individual document as one row. For the JSON, YAML, CBOR and smile formats, + // ES|QL can return the results in a columnar fashion where one row represents + // all the values of a certain column in the results. + Columnar *bool `json:"columnar,omitempty"` + // Filter Specify a Query DSL query in the filter parameter to filter the set of + // documents that an ES|QL query runs on. + Filter *types.Query `json:"filter,omitempty"` + // IncludeCcsMetadata When set to `true` and performing a cross-cluster query, the response will + // include an extra `_clusters` + // object with information about the clusters that participated in the search + // along with info such as shards + // count. + IncludeCcsMetadata *bool `json:"include_ccs_metadata,omitempty"` + Locale *string `json:"locale,omitempty"` + // Params To avoid any attempts of hacking or code injection, extract the values in a + // separate list of parameters. Use question mark placeholders (?) in the query + // string for each of the parameters. + Params []types.FieldValue `json:"params,omitempty"` + // Profile If provided and `true` the response will include an extra `profile` object + // with information on how the query was executed. This information is for human + // debugging + // and its format can change at any time but it can give some insight into the + // performance + // of each part of the query. + Profile *bool `json:"profile,omitempty"` + // Query The ES|QL query API accepts an ES|QL query string in the query parameter, + // runs it, and returns the results. + Query string `json:"query"` + // Tables Tables to use with the LOOKUP operation. The top level key is the table + // name and the next level key is the column name. + Tables map[string]map[string]types.TableValuesContainer `json:"tables,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Tables: make(map[string]map[string]types.TableValuesContainer, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Asyncquery request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/esql/asyncquery/response.go b/typedapi/esql/asyncquery/response.go new file mode 100644 index 0000000000..172dbddeaf --- /dev/null +++ b/typedapi/esql/asyncquery/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package asyncquery + +// Response holds the response body struct for the package asyncquery +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/esql/async_query/AsyncQueryResponse.ts#L22-L24 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/typedapi/esql/asyncquerydelete/async_query_delete.go b/typedapi/esql/asyncquerydelete/async_query_delete.go new file mode 100644 index 0000000000..9577793dc8 --- /dev/null +++ b/typedapi/esql/asyncquerydelete/async_query_delete.go @@ -0,0 +1,364 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Delete an async ES|QL query. +// If the query is still running, it is cancelled. +// Otherwise, the stored results are deleted. +// +// If the Elasticsearch security features are enabled, only the following users +// can use this API to delete a query: +// +// * The authenticated user that submitted the original query request +// * Users with the `cancel_task` cluster privilege +package asyncquerydelete + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type AsyncQueryDelete struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAsyncQueryDelete type alias for index. +type NewAsyncQueryDelete func(id string) *AsyncQueryDelete + +// NewAsyncQueryDeleteFunc returns a new instance of AsyncQueryDelete with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAsyncQueryDeleteFunc(tp elastictransport.Interface) NewAsyncQueryDelete { + return func(id string) *AsyncQueryDelete { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete an async ES|QL query. +// If the query is still running, it is cancelled. +// Otherwise, the stored results are deleted. +// +// If the Elasticsearch security features are enabled, only the following users +// can use this API to delete a query: +// +// * The authenticated user that submitted the original query request +// * Users with the `cancel_task` cluster privilege +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete +func New(tp elastictransport.Interface) *AsyncQueryDelete { + r := &AsyncQueryDelete{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *AsyncQueryDelete) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r AsyncQueryDelete) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "esql.async_query_delete") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query_delete") + if reader := instrument.RecordRequestBody(ctx, "esql.async_query_delete", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query_delete") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the AsyncQueryDelete query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a asyncquerydelete.Response +func (r AsyncQueryDelete) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r AsyncQueryDelete) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the AsyncQueryDelete query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the AsyncQueryDelete headers map. +func (r *AsyncQueryDelete) Header(key, value string) *AsyncQueryDelete { + r.headers.Set(key, value) + + return r +} + +// Id The unique identifier of the query. +// A query ID is provided in the ES|QL async query API response for a query that +// does not complete in the designated time. +// A query ID is also provided when the request was submitted with the +// `keep_on_completion` parameter set to `true`. +// API Name: id +func (r *AsyncQueryDelete) _id(id string) *AsyncQueryDelete { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *AsyncQueryDelete) ErrorTrace(errortrace bool) *AsyncQueryDelete { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *AsyncQueryDelete) FilterPath(filterpaths ...string) *AsyncQueryDelete { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *AsyncQueryDelete) Human(human bool) *AsyncQueryDelete { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *AsyncQueryDelete) Pretty(pretty bool) *AsyncQueryDelete { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/esql/asyncquerydelete/response.go b/typedapi/esql/asyncquerydelete/response.go new file mode 100644 index 0000000000..e0ca9cf4a6 --- /dev/null +++ b/typedapi/esql/asyncquerydelete/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package asyncquerydelete + +// Response holds the response body struct for the package asyncquerydelete +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/esql/async_query_delete/AsyncQueryDeleteResponse.ts#L22-L24 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/esql/asyncqueryget/async_query_get.go b/typedapi/esql/asyncqueryget/async_query_get.go new file mode 100644 index 0000000000..1dc6820247 --- /dev/null +++ b/typedapi/esql/asyncqueryget/async_query_get.go @@ -0,0 +1,390 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Get async ES|QL query results. +// Get the current status and available results or stored results for an ES|QL +// asynchronous query. +// If the Elasticsearch security features are enabled, only the user who first +// submitted the ES|QL query can retrieve the results using this API. +package asyncqueryget + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type AsyncQueryGet struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAsyncQueryGet type alias for index. +type NewAsyncQueryGet func(id string) *AsyncQueryGet + +// NewAsyncQueryGetFunc returns a new instance of AsyncQueryGet with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAsyncQueryGetFunc(tp elastictransport.Interface) NewAsyncQueryGet { + return func(id string) *AsyncQueryGet { + n := New(tp) + + n._id(id) + + return n + } +} + +// Get async ES|QL query results. +// Get the current status and available results or stored results for an ES|QL +// asynchronous query. +// If the Elasticsearch security features are enabled, only the user who first +// submitted the ES|QL query can retrieve the results using this API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get +func New(tp elastictransport.Interface) *AsyncQueryGet { + r := &AsyncQueryGet{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *AsyncQueryGet) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r AsyncQueryGet) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "esql.async_query_get") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query_get") + if reader := instrument.RecordRequestBody(ctx, "esql.async_query_get", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query_get") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the AsyncQueryGet query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a asyncqueryget.Response +func (r AsyncQueryGet) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r AsyncQueryGet) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the AsyncQueryGet query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the AsyncQueryGet headers map. +func (r *AsyncQueryGet) Header(key, value string) *AsyncQueryGet { + r.headers.Set(key, value) + + return r +} + +// Id The unique identifier of the query. +// A query ID is provided in the ES|QL async query API response for a query that +// does not complete in the designated time. +// A query ID is also provided when the request was submitted with the +// `keep_on_completion` parameter set to `true`. +// API Name: id +func (r *AsyncQueryGet) _id(id string) *AsyncQueryGet { + r.paramSet |= idMask + r.id = id + + return r +} + +// DropNullColumns Indicates whether columns that are entirely `null` will be removed from the +// `columns` and `values` portion of the results. +// If `true`, the response will include an extra section under the name +// `all_columns` which has the name of all the columns. +// API name: drop_null_columns +func (r *AsyncQueryGet) DropNullColumns(dropnullcolumns bool) *AsyncQueryGet { + r.values.Set("drop_null_columns", strconv.FormatBool(dropnullcolumns)) + + return r +} + +// KeepAlive The period for which the query and its results are stored in the cluster. +// When this period expires, the query and its results are deleted, even if the +// query is still ongoing. +// API name: keep_alive +func (r *AsyncQueryGet) KeepAlive(duration string) *AsyncQueryGet { + r.values.Set("keep_alive", duration) + + return r +} + +// WaitForCompletionTimeout The period to wait for the request to finish. +// By default, the request waits for complete query results. +// If the request completes during the period specified in this parameter, +// complete query results are returned. +// Otherwise, the response returns an `is_running` value of `true` and no +// results. +// API name: wait_for_completion_timeout +func (r *AsyncQueryGet) WaitForCompletionTimeout(duration string) *AsyncQueryGet { + r.values.Set("wait_for_completion_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *AsyncQueryGet) ErrorTrace(errortrace bool) *AsyncQueryGet { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *AsyncQueryGet) FilterPath(filterpaths ...string) *AsyncQueryGet { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *AsyncQueryGet) Human(human bool) *AsyncQueryGet { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *AsyncQueryGet) Pretty(pretty bool) *AsyncQueryGet { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/esql/asyncqueryget/response.go b/typedapi/esql/asyncqueryget/response.go new file mode 100644 index 0000000000..d60c86a037 --- /dev/null +++ b/typedapi/esql/asyncqueryget/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package asyncqueryget + +// Response holds the response body struct for the package asyncqueryget +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/esql/async_query_get/AsyncQueryGetResponse.ts#L22-L24 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/typedapi/esql/asyncquerystop/async_query_stop.go b/typedapi/esql/asyncquerystop/async_query_stop.go new file mode 100644 index 0000000000..8be03cfe75 --- /dev/null +++ b/typedapi/esql/asyncquerystop/async_query_stop.go @@ -0,0 +1,369 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Stop async ES|QL query. +// +// This API interrupts the query execution and returns the results so far. +// If the Elasticsearch security features are enabled, only the user who first +// submitted the ES|QL query can stop it. +package asyncquerystop + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type AsyncQueryStop struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAsyncQueryStop type alias for index. +type NewAsyncQueryStop func(id string) *AsyncQueryStop + +// NewAsyncQueryStopFunc returns a new instance of AsyncQueryStop with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAsyncQueryStopFunc(tp elastictransport.Interface) NewAsyncQueryStop { + return func(id string) *AsyncQueryStop { + n := New(tp) + + n._id(id) + + return n + } +} + +// Stop async ES|QL query. +// +// This API interrupts the query execution and returns the results so far. +// If the Elasticsearch security features are enabled, only the user who first +// submitted the ES|QL query can stop it. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-async-query-stop-api.html +func New(tp elastictransport.Interface) *AsyncQueryStop { + r := &AsyncQueryStop{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *AsyncQueryStop) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + path.WriteString("/") + path.WriteString("stop") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r AsyncQueryStop) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "esql.async_query_stop") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query_stop") + if reader := instrument.RecordRequestBody(ctx, "esql.async_query_stop", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query_stop") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the AsyncQueryStop query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a asyncquerystop.Response +func (r AsyncQueryStop) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r AsyncQueryStop) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the AsyncQueryStop query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the AsyncQueryStop headers map. +func (r *AsyncQueryStop) Header(key, value string) *AsyncQueryStop { + r.headers.Set(key, value) + + return r +} + +// Id The unique identifier of the query. +// A query ID is provided in the ES|QL async query API response for a query that +// does not complete in the designated time. +// A query ID is also provided when the request was submitted with the +// `keep_on_completion` parameter set to `true`. +// API Name: id +func (r *AsyncQueryStop) _id(id string) *AsyncQueryStop { + r.paramSet |= idMask + r.id = id + + return r +} + +// DropNullColumns Indicates whether columns that are entirely `null` will be removed from the +// `columns` and `values` portion of the results. +// If `true`, the response will include an extra section under the name +// `all_columns` which has the name of all the columns. +// API name: drop_null_columns +func (r *AsyncQueryStop) DropNullColumns(dropnullcolumns bool) *AsyncQueryStop { + r.values.Set("drop_null_columns", strconv.FormatBool(dropnullcolumns)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *AsyncQueryStop) ErrorTrace(errortrace bool) *AsyncQueryStop { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *AsyncQueryStop) FilterPath(filterpaths ...string) *AsyncQueryStop { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *AsyncQueryStop) Human(human bool) *AsyncQueryStop { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *AsyncQueryStop) Pretty(pretty bool) *AsyncQueryStop { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/esql/asyncquerystop/response.go b/typedapi/esql/asyncquerystop/response.go new file mode 100644 index 0000000000..7b0e026531 --- /dev/null +++ b/typedapi/esql/asyncquerystop/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package asyncquerystop + +// Response holds the response body struct for the package asyncquerystop +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/esql/async_query_stop/AsyncQueryStopResponse.ts#L22-L24 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/typedapi/esql/query/query.go b/typedapi/esql/query/query.go index de4a4be86c..43d550192f 100644 --- a/typedapi/esql/query/query.go +++ b/typedapi/esql/query/query.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Executes an ES|QL request +// Run an ES|QL query. +// Get search results for an ES|QL (Elasticsearch query language) query. package query import ( @@ -74,7 +75,8 @@ func NewQueryFunc(tp elastictransport.Interface) NewQuery { } } -// Executes an ES|QL request +// Run an ES|QL query. +// Get search results for an ES|QL (Elasticsearch query language) query. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-rest.html func New(tp elastictransport.Interface) *Query { @@ -84,8 +86,6 @@ func New(tp elastictransport.Interface) *Query { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -363,46 +363,83 @@ func (r *Query) Pretty(pretty bool) *Query { return r } -// Columnar By default, ES|QL returns results as rows. For example, FROM returns each +// By default, ES|QL returns results as rows. For example, FROM returns each // individual document as one row. For the JSON, YAML, CBOR and smile formats, // ES|QL can return the results in a columnar fashion where one row represents // all the values of a certain column in the results. // API name: columnar func (r *Query) Columnar(columnar bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Columnar = &columnar return r } -// Filter Specify a Query DSL query in the filter parameter to filter the set of +// Specify a Query DSL query in the filter parameter to filter the set of // documents that an ES|QL query runs on. // API name: filter -func (r *Query) Filter(filter *types.Query) *Query { +func (r *Query) Filter(filter types.QueryVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Filter = filter.QueryCaster() + + return r +} + +// When set to `true` and performing a cross-cluster query, the response will +// include an extra `_clusters` +// object with information about the clusters that participated in the search +// along with info such as shards +// count. +// API name: include_ccs_metadata +func (r *Query) IncludeCcsMetadata(includeccsmetadata bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Filter = filter + r.req.IncludeCcsMetadata = &includeccsmetadata return r } // API name: locale func (r *Query) Locale(locale string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Locale = &locale return r } -// Params To avoid any attempts of hacking or code injection, extract the values in a +// To avoid any attempts of hacking or code injection, extract the values in a // separate list of parameters. Use question mark placeholders (?) in the query // string for each of the parameters. // API name: params -func (r *Query) Params(params ...types.FieldValue) *Query { - r.req.Params = params +func (r *Query) Params(params ...types.FieldValueVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range params { + + r.req.Params = append(r.req.Params, *v.FieldValueCaster()) + } return r } -// Profile If provided and `true` the response will include an extra `profile` object +// If provided and `true` the response will include an extra `profile` object // with information on how the query was executed. This information is for human // debugging // and its format can change at any time but it can give some insight into the @@ -410,27 +447,38 @@ func (r *Query) Params(params ...types.FieldValue) *Query { // of each part of the query. // API name: profile func (r *Query) Profile(profile bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Profile = &profile return r } -// Query The ES|QL query API accepts an ES|QL query string in the query parameter, +// The ES|QL query API accepts an ES|QL query string in the query parameter, // runs it, and returns the results. // API name: query func (r *Query) Query(query string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Query = query return r } -// Tables Tables to use with the LOOKUP operation. The top level key is the table +// Tables to use with the LOOKUP operation. The top level key is the table // name and the next level key is the column name. // API name: tables func (r *Query) Tables(tables map[string]map[string]types.TableValuesContainer) *Query { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Tables = tables - return r } diff --git a/typedapi/esql/query/request.go b/typedapi/esql/query/request.go index f97fdff2d8..6835ce7bc2 100644 --- a/typedapi/esql/query/request.go +++ b/typedapi/esql/query/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package query @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package query // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/esql/query/QueryRequest.ts#L26-L89 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/esql/query/QueryRequest.ts#L27-L105 type Request struct { // Columnar By default, ES|QL returns results as rows. For example, FROM returns each @@ -40,7 +40,13 @@ type Request struct { // Filter Specify a Query DSL query in the filter parameter to filter the set of // documents that an ES|QL query runs on. Filter *types.Query `json:"filter,omitempty"` - Locale *string `json:"locale,omitempty"` + // IncludeCcsMetadata When set to `true` and performing a cross-cluster query, the response will + // include an extra `_clusters` + // object with information about the clusters that participated in the search + // along with info such as shards + // count. + IncludeCcsMetadata *bool `json:"include_ccs_metadata,omitempty"` + Locale *string `json:"locale,omitempty"` // Params To avoid any attempts of hacking or code injection, extract the values in a // separate list of parameters. Use question mark placeholders (?) in the query // string for each of the parameters. diff --git a/typedapi/esql/query/response.go b/typedapi/esql/query/response.go index 751b2854e3..4c0ce6e397 100644 --- a/typedapi/esql/query/response.go +++ b/typedapi/esql/query/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package query // Response holds the response body struct for the package query // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/esql/query/QueryResponse.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/esql/query/QueryResponse.ts#L22-L25 type Response = []byte diff --git a/typedapi/features/getfeatures/get_features.go b/typedapi/features/getfeatures/get_features.go index 1b7f29ec21..b30a8036ad 100644 --- a/typedapi/features/getfeatures/get_features.go +++ b/typedapi/features/getfeatures/get_features.go @@ -16,10 +16,26 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// Gets a list of features which can be included in snapshots using the -// feature_states field when creating a snapshot +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Get the features. +// Get a list of features that can be included in snapshots using the +// `feature_states` field when creating a snapshot. +// You can use this API to determine which feature states to include when taking +// a snapshot. +// By default, all feature states are included in a snapshot if that snapshot +// includes the global state, or none if it does not. +// +// A feature state includes one or more system indices necessary for a given +// feature to function. +// In order to ensure data integrity, all system indices that comprise a feature +// state are snapshotted and restored together. +// +// The features listed by this API are a combination of built-in features and +// features defined by plugins. +// In order for a feature state to be listed in this API and recognized as a +// valid feature state by the create snapshot API, the plugin that defines that +// feature must be installed on the master node. package getfeatures import ( @@ -69,10 +85,26 @@ func NewGetFeaturesFunc(tp elastictransport.Interface) NewGetFeatures { } } -// Gets a list of features which can be included in snapshots using the -// feature_states field when creating a snapshot +// Get the features. +// Get a list of features that can be included in snapshots using the +// `feature_states` field when creating a snapshot. +// You can use this API to determine which feature states to include when taking +// a snapshot. +// By default, all feature states are included in a snapshot if that snapshot +// includes the global state, or none if it does not. +// +// A feature state includes one or more system indices necessary for a given +// feature to function. +// In order to ensure data integrity, all system indices that comprise a feature +// state are snapshotted and restored together. +// +// The features listed by this API are a combination of built-in features and +// features defined by plugins. +// In order for a feature state to be listed in this API and recognized as a +// valid feature state by the create snapshot API, the plugin that defines that +// feature must be installed on the master node. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-features-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features func New(tp elastictransport.Interface) *GetFeatures { r := &GetFeatures{ transport: tp, @@ -276,6 +308,14 @@ func (r *GetFeatures) Header(key, value string) *GetFeatures { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *GetFeatures) MasterTimeout(duration string) *GetFeatures { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/features/getfeatures/response.go b/typedapi/features/getfeatures/response.go index a91ff43aff..599003b62d 100644 --- a/typedapi/features/getfeatures/response.go +++ b/typedapi/features/getfeatures/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getfeatures @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getfeatures // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/features/get_features/GetFeaturesResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/features/get_features/GetFeaturesResponse.ts#L22-L26 type Response struct { Features []types.Feature `json:"features"` } diff --git a/typedapi/features/resetfeatures/reset_features.go b/typedapi/features/resetfeatures/reset_features.go index e06e737ae8..19a87c5b4d 100644 --- a/typedapi/features/resetfeatures/reset_features.go +++ b/typedapi/features/resetfeatures/reset_features.go @@ -16,9 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Resets the internal state of features, usually by deleting system indices +// Reset the features. +// Clear all of the state information stored in system indices by Elasticsearch +// features, including the security and machine learning indices. +// +// WARNING: Intended for development and testing use only. Do not reset features +// on a production cluster. +// +// Return a cluster to the same state as a new installation by resetting the +// feature state for all Elasticsearch features. +// This deletes all state information stored in system indices. +// +// The response code is HTTP 200 if the state is successfully reset for all +// features. +// It is HTTP 500 if the reset operation failed for any feature. +// +// Note that select features might provide a way to reset particular system +// indices. +// Using this API resets all features, both those that are built-in and +// implemented as plugins. +// +// To list the features that will be affected, use the get features API. +// +// IMPORTANT: The features installed on the node you submit this request to are +// the features that will be reset. Run on the master node if you have any +// doubts about which plugins are installed on individual nodes. package resetfeatures import ( @@ -68,9 +92,33 @@ func NewResetFeaturesFunc(tp elastictransport.Interface) NewResetFeatures { } } -// Resets the internal state of features, usually by deleting system indices +// Reset the features. +// Clear all of the state information stored in system indices by Elasticsearch +// features, including the security and machine learning indices. +// +// WARNING: Intended for development and testing use only. Do not reset features +// on a production cluster. +// +// Return a cluster to the same state as a new installation by resetting the +// feature state for all Elasticsearch features. +// This deletes all state information stored in system indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// The response code is HTTP 200 if the state is successfully reset for all +// features. +// It is HTTP 500 if the reset operation failed for any feature. +// +// Note that select features might provide a way to reset particular system +// indices. +// Using this API resets all features, both those that are built-in and +// implemented as plugins. +// +// To list the features that will be affected, use the get features API. +// +// IMPORTANT: The features installed on the node you submit this request to are +// the features that will be reset. Run on the master node if you have any +// doubts about which plugins are installed on individual nodes. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features func New(tp elastictransport.Interface) *ResetFeatures { r := &ResetFeatures{ transport: tp, @@ -276,6 +324,14 @@ func (r *ResetFeatures) Header(key, value string) *ResetFeatures { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *ResetFeatures) MasterTimeout(duration string) *ResetFeatures { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/features/resetfeatures/response.go b/typedapi/features/resetfeatures/response.go index 46c3817d3e..909101bac2 100644 --- a/typedapi/features/resetfeatures/response.go +++ b/typedapi/features/resetfeatures/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package resetfeatures @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package resetfeatures // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/features/reset_features/ResetFeaturesResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/features/reset_features/ResetFeaturesResponse.ts#L22-L26 type Response struct { Features []types.Feature `json:"features"` } diff --git a/typedapi/fleet/globalcheckpoints/global_checkpoints.go b/typedapi/fleet/globalcheckpoints/global_checkpoints.go index 30ab753778..de3ed98cf5 100644 --- a/typedapi/fleet/globalcheckpoints/global_checkpoints.go +++ b/typedapi/fleet/globalcheckpoints/global_checkpoints.go @@ -16,10 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns the current global checkpoints for an index. This API is design for -// internal use by the fleet server project. +// Get global checkpoints. +// +// Get the current global checkpoints for an index. +// This API is designed for internal use by the Fleet server project. package globalcheckpoints import ( @@ -77,10 +79,12 @@ func NewGlobalCheckpointsFunc(tp elastictransport.Interface) NewGlobalCheckpoint } } -// Returns the current global checkpoints for an index. This API is design for -// internal use by the fleet server project. +// Get global checkpoints. +// +// Get the current global checkpoints for an index. +// This API is designed for internal use by the Fleet server project. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-global-checkpoints.html +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet func New(tp elastictransport.Interface) *GlobalCheckpoints { r := &GlobalCheckpoints{ transport: tp, diff --git a/typedapi/fleet/globalcheckpoints/response.go b/typedapi/fleet/globalcheckpoints/response.go index 0a53ef52d4..01494ecb47 100644 --- a/typedapi/fleet/globalcheckpoints/response.go +++ b/typedapi/fleet/globalcheckpoints/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package globalcheckpoints // Response holds the response body struct for the package globalcheckpoints // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/fleet/global_checkpoints/GlobalCheckpointsResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/fleet/global_checkpoints/GlobalCheckpointsResponse.ts#L22-L27 type Response struct { GlobalCheckpoints []int64 `json:"global_checkpoints"` TimedOut bool `json:"timed_out"` diff --git a/typedapi/fleet/msearch/msearch.go b/typedapi/fleet/msearch/msearch.go index c1037f44ed..e2586122b9 100644 --- a/typedapi/fleet/msearch/msearch.go +++ b/typedapi/fleet/msearch/msearch.go @@ -16,15 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// Executes several [fleet -// searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) -// with a single API request. -// The API follows the same structure as the [multi -// search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) -// API. However, similar to the fleet search API, it -// supports the wait_for_checkpoints parameter. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Run multiple Fleet searches. +// Run several Fleet searches with a single API request. +// The API follows the same structure as the multi search API. +// However, similar to the Fleet search API, it supports the +// `wait_for_checkpoints` parameter. package msearch import ( @@ -87,13 +85,13 @@ func NewMsearchFunc(tp elastictransport.Interface) NewMsearch { } } -// Executes several [fleet -// searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) -// with a single API request. -// The API follows the same structure as the [multi -// search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) -// API. However, similar to the fleet search API, it -// supports the wait_for_checkpoints parameter. +// Run multiple Fleet searches. +// Run several Fleet searches with a single API request. +// The API follows the same structure as the multi search API. +// However, similar to the Fleet search API, it supports the +// `wait_for_checkpoints` parameter. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-msearch func New(tp elastictransport.Interface) *Msearch { r := &Msearch{ transport: tp, diff --git a/typedapi/fleet/msearch/request.go b/typedapi/fleet/msearch/request.go index 79c9fcbc27..73fac99a49 100644 --- a/typedapi/fleet/msearch/request.go +++ b/typedapi/fleet/msearch/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package msearch @@ -26,5 +26,5 @@ import ( // Request holds the request body struct for the package msearch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/fleet/msearch/MultiSearchRequest.ts#L31-L114 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/fleet/msearch/MultiSearchRequest.ts#L31-L126 type Request = []types.MsearchRequestItem diff --git a/typedapi/fleet/msearch/response.go b/typedapi/fleet/msearch/response.go index 55c2d8e8c2..f22f730358 100644 --- a/typedapi/fleet/msearch/response.go +++ b/typedapi/fleet/msearch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package msearch @@ -32,7 +32,7 @@ import ( // Response holds the response body struct for the package msearch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/fleet/msearch/MultiSearchResponse.ts#L25-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/fleet/msearch/MultiSearchResponse.ts#L25-L29 type Response struct { Docs []types.MsearchResponseItem `json:"docs"` } diff --git a/typedapi/fleet/postsecret/post_secret.go b/typedapi/fleet/postsecret/post_secret.go index 200fe7f75e..cb73207b4a 100644 --- a/typedapi/fleet/postsecret/post_secret.go +++ b/typedapi/fleet/postsecret/post_secret.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Creates a secret stored by Fleet. package postsecret diff --git a/typedapi/fleet/search/request.go b/typedapi/fleet/search/request.go index c1e0dc7a2d..ae0b41b686 100644 --- a/typedapi/fleet/search/request.go +++ b/typedapi/fleet/search/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package search @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/fleet/search/SearchRequest.ts#L55-L260 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/fleet/search/SearchRequest.ts#L54-L266 type Request struct { Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` Collapse *types.FieldCollapse `json:"collapse,omitempty"` diff --git a/typedapi/fleet/search/response.go b/typedapi/fleet/search/response.go index a11292115e..b7307719b9 100644 --- a/typedapi/fleet/search/response.go +++ b/typedapi/fleet/search/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package search @@ -34,7 +34,7 @@ import ( // Response holds the response body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/fleet/search/SearchResponse.ts#L33-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/fleet/search/SearchResponse.ts#L33-L50 type Response struct { Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` diff --git a/typedapi/fleet/search/search.go b/typedapi/fleet/search/search.go index 7601b39f68..6533672a54 100644 --- a/typedapi/fleet/search/search.go +++ b/typedapi/fleet/search/search.go @@ -16,11 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// The purpose of the fleet search api is to provide a search api where the -// search will only be executed -// after provided checkpoint has been processed and is visible for searches +// Run a Fleet search. +// The purpose of the Fleet search API is to provide an API where the search +// will be run only +// after the provided checkpoint has been processed and is visible for searches // inside of Elasticsearch. package search @@ -88,10 +89,13 @@ func NewSearchFunc(tp elastictransport.Interface) NewSearch { } } -// The purpose of the fleet search api is to provide a search api where the -// search will only be executed -// after provided checkpoint has been processed and is visible for searches +// Run a Fleet search. +// The purpose of the Fleet search API is to provide an API where the search +// will be run only +// after the provided checkpoint has been processed and is visible for searches // inside of Elasticsearch. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-search func New(tp elastictransport.Interface) *Search { r := &Search{ transport: tp, @@ -99,8 +103,6 @@ func New(tp elastictransport.Interface) *Search { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -414,13 +416,6 @@ func (r *Search) MaxConcurrentShardRequests(maxconcurrentshardrequests string) * return r } -// API name: min_compatible_shard_node -func (r *Search) MinCompatibleShardNode(versionstring string) *Search { - r.values.Set("min_compatible_shard_node", versionstring) - - return r -} - // API name: preference func (r *Search) Preference(preference string) *Search { r.values.Set("preference", preference) @@ -604,213 +599,386 @@ func (r *Search) Pretty(pretty bool) *Search { // API name: aggregations func (r *Search) Aggregations(aggregations map[string]types.Aggregations) *Search { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aggregations = aggregations + return r +} + +func (r *Search) AddAggregation(key string, value types.AggregationsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + + tmp[key] = *value.AggregationsCaster() + r.req.Aggregations = tmp return r } // API name: collapse -func (r *Search) Collapse(collapse *types.FieldCollapse) *Search { +func (r *Search) Collapse(collapse types.FieldCollapseVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Collapse = collapse + r.req.Collapse = collapse.FieldCollapseCaster() return r } -// DocvalueFields Array of wildcard (*) patterns. The request returns doc values for field +// Array of wildcard (*) patterns. The request returns doc values for field // names matching these patterns in the hits.fields property of the response. // API name: docvalue_fields -func (r *Search) DocvalueFields(docvaluefields ...types.FieldAndFormat) *Search { - r.req.DocvalueFields = docvaluefields +func (r *Search) DocvalueFields(docvaluefields ...types.FieldAndFormatVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docvaluefields { + + r.req.DocvalueFields = append(r.req.DocvalueFields, *v.FieldAndFormatCaster()) + } return r } -// Explain If true, returns detailed information about score computation as part of a +// If true, returns detailed information about score computation as part of a // hit. // API name: explain func (r *Search) Explain(explain bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Explain = &explain return r } -// Ext Configuration of search extensions defined by Elasticsearch plugins. +// Configuration of search extensions defined by Elasticsearch plugins. // API name: ext func (r *Search) Ext(ext map[string]json.RawMessage) *Search { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Ext = ext + return r +} + +func (r *Search) AddExt(key string, value json.RawMessage) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + var tmp map[string]json.RawMessage + if r.req.Ext == nil { + r.req.Ext = make(map[string]json.RawMessage) + } else { + tmp = r.req.Ext + } + + tmp[key] = value + + r.req.Ext = tmp return r } -// Fields Array of wildcard (*) patterns. The request returns values for field names +// Array of wildcard (*) patterns. The request returns values for field names // matching these patterns in the hits.fields property of the response. // API name: fields -func (r *Search) Fields(fields ...types.FieldAndFormat) *Search { - r.req.Fields = fields +func (r *Search) Fields(fields ...types.FieldAndFormatVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range fields { + r.req.Fields = append(r.req.Fields, *v.FieldAndFormatCaster()) + + } return r } -// From Starting document offset. By default, you cannot page through more than +// Starting document offset. By default, you cannot page through more than // 10,000 // hits using the from and size parameters. To page through more hits, use the // search_after parameter. // API name: from func (r *Search) From(from int) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.From = &from return r } // API name: highlight -func (r *Search) Highlight(highlight *types.Highlight) *Search { +func (r *Search) Highlight(highlight types.HighlightVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Highlight = highlight + r.req.Highlight = highlight.HighlightCaster() return r } -// IndicesBoost Boosts the _score of documents from specified indices. +// Boosts the _score of documents from specified indices. // API name: indices_boost -func (r *Search) IndicesBoost(indicesboosts ...map[string]types.Float64) *Search { - r.req.IndicesBoost = indicesboosts +func (r *Search) IndicesBoost(indicesboost []map[string]types.Float64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndicesBoost = indicesboost return r } -// MinScore Minimum _score for matching documents. Documents with a lower _score are +// Minimum _score for matching documents. Documents with a lower _score are // not included in the search results. // API name: min_score func (r *Search) MinScore(minscore types.Float64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MinScore = &minscore return r } -// Pit Limits the search to a point in time (PIT). If you provide a PIT, you +// Limits the search to a point in time (PIT). If you provide a PIT, you // cannot specify an in the request path. // API name: pit -func (r *Search) Pit(pit *types.PointInTimeReference) *Search { +func (r *Search) Pit(pit types.PointInTimeReferenceVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Pit = pit + r.req.Pit = pit.PointInTimeReferenceCaster() return r } // API name: post_filter -func (r *Search) PostFilter(postfilter *types.Query) *Search { +func (r *Search) PostFilter(postfilter types.QueryVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.PostFilter = postfilter + r.req.PostFilter = postfilter.QueryCaster() return r } // API name: profile func (r *Search) Profile(profile bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Profile = &profile return r } -// Query Defines the search definition using the Query DSL. +// Defines the search definition using the Query DSL. // API name: query -func (r *Search) Query(query *types.Query) *Search { +func (r *Search) Query(query types.QueryVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } // API name: rescore -func (r *Search) Rescore(rescores ...types.Rescore) *Search { - r.req.Rescore = rescores +func (r *Search) Rescore(rescores ...types.RescoreVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Rescore = make([]types.Rescore, len(rescores)) + for i, v := range rescores { + r.req.Rescore[i] = *v.RescoreCaster() + } return r } -// RuntimeMappings Defines one or more runtime fields in the search request. These fields take +// Defines one or more runtime fields in the search request. These fields take // precedence over mapped fields with the same name. // API name: runtime_mappings -func (r *Search) RuntimeMappings(runtimefields types.RuntimeFields) *Search { - r.req.RuntimeMappings = runtimefields +func (r *Search) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } -// ScriptFields Retrieve a script evaluation (based on different fields) for each hit. +// Retrieve a script evaluation (based on different fields) for each hit. // API name: script_fields func (r *Search) ScriptFields(scriptfields map[string]types.ScriptField) *Search { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ScriptFields = scriptfields + return r +} + +func (r *Search) AddScriptField(key string, value types.ScriptFieldVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ScriptField + if r.req.ScriptFields == nil { + r.req.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = r.req.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + r.req.ScriptFields = tmp return r } // API name: search_after -func (r *Search) SearchAfter(sortresults ...types.FieldValue) *Search { - r.req.SearchAfter = sortresults +func (r *Search) SearchAfter(sortresults ...types.FieldValueVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } return r } -// SeqNoPrimaryTerm If true, returns sequence number and primary term of the last modification +// If true, returns sequence number and primary term of the last modification // of each hit. See Optimistic concurrency control. // API name: seq_no_primary_term func (r *Search) SeqNoPrimaryTerm(seqnoprimaryterm bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.SeqNoPrimaryTerm = &seqnoprimaryterm return r } -// Size The number of hits to return. By default, you cannot page through more +// The number of hits to return. By default, you cannot page through more // than 10,000 hits using the from and size parameters. To page through more // hits, use the search_after parameter. // API name: size func (r *Search) Size(size int) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } // API name: slice -func (r *Search) Slice(slice *types.SlicedScroll) *Search { +func (r *Search) Slice(slice types.SlicedScrollVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Slice = slice + r.req.Slice = slice.SlicedScrollCaster() return r } // API name: sort -func (r *Search) Sort(sorts ...types.SortCombinations) *Search { - r.req.Sort = sorts +func (r *Search) Sort(sorts ...types.SortCombinationsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } return r } -// Source_ Indicates which source fields are returned for matching documents. These +// Indicates which source fields are returned for matching documents. These // fields are returned in the hits._source property of the search response. // API name: _source -func (r *Search) Source_(sourceconfig types.SourceConfig) *Search { - r.req.Source_ = sourceconfig +func (r *Search) Source_(sourceconfig types.SourceConfigVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source_ = *sourceconfig.SourceConfigCaster() return r } -// Stats Stats groups to associate with the search. Each group maintains a statistics +// Stats groups to associate with the search. Each group maintains a statistics // aggregation for its associated searches. You can retrieve these stats using // the indices stats API. // API name: stats func (r *Search) Stats(stats ...string) *Search { - r.req.Stats = stats + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range stats { + + r.req.Stats = append(r.req.Stats, v) + } return r } -// StoredFields List of stored fields to return as part of a hit. If no fields are specified, +// List of stored fields to return as part of a hit. If no fields are specified, // no stored fields are included in the response. If this field is specified, // the _source // parameter defaults to false. You can pass _source: true to return both source @@ -818,20 +986,29 @@ func (r *Search) Stats(stats ...string) *Search { // and stored fields in the search response. // API name: stored_fields func (r *Search) StoredFields(fields ...string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.StoredFields = fields return r } // API name: suggest -func (r *Search) Suggest(suggest *types.Suggester) *Search { +func (r *Search) Suggest(suggest types.SuggesterVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Suggest = suggest + r.req.Suggest = suggest.SuggesterCaster() return r } -// TerminateAfter Maximum number of documents to collect for each shard. If a query reaches +// Maximum number of documents to collect for each shard. If a query reaches // this // limit, Elasticsearch terminates the query early. Elasticsearch collects // documents @@ -839,48 +1016,71 @@ func (r *Search) Suggest(suggest *types.Suggester) *Search { // early. // API name: terminate_after func (r *Search) TerminateAfter(terminateafter int64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.TerminateAfter = &terminateafter return r } -// Timeout Specifies the period of time to wait for a response from each shard. If no +// Specifies the period of time to wait for a response from each shard. If no // response // is received before the timeout expires, the request fails and returns an // error. // Defaults to no timeout. // API name: timeout func (r *Search) Timeout(timeout string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Timeout = &timeout return r } -// TrackScores If true, calculate and return document scores, even if the scores are not +// If true, calculate and return document scores, even if the scores are not // used for sorting. // API name: track_scores func (r *Search) TrackScores(trackscores bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TrackScores = &trackscores return r } -// TrackTotalHits Number of hits matching the query to count accurately. If true, the exact +// Number of hits matching the query to count accurately. If true, the exact // number of hits is returned at the cost of some performance. If false, the // response does not include the total number of hits matching the query. // Defaults to 10,000 hits. // API name: track_total_hits -func (r *Search) TrackTotalHits(trackhits types.TrackHits) *Search { - r.req.TrackTotalHits = trackhits +func (r *Search) TrackTotalHits(trackhits types.TrackHitsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TrackTotalHits = *trackhits.TrackHitsCaster() return r } -// Version If true, returns document version as part of a hit. +// If true, returns document version as part of a hit. // API name: version func (r *Search) Version(version bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &version return r diff --git a/typedapi/graph/explore/explore.go b/typedapi/graph/explore/explore.go index ccb7e5ff73..863bf6e0fa 100644 --- a/typedapi/graph/explore/explore.go +++ b/typedapi/graph/explore/explore.go @@ -16,10 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Extracts and summarizes information about the documents and terms in an +// Explore graph analytics. +// Extract and summarize information about the documents and terms in an // Elasticsearch data stream or index. +// The easiest way to understand the behavior of this API is to use the Graph UI +// to explore connections. +// An initial request to the `_explore` API contains a seed query that +// identifies the documents of interest and specifies the fields that define the +// vertices and connections you want to include in the graph. +// Subsequent requests enable you to spider out from one more vertices of +// interest. +// You can exclude vertices that have already been returned. package explore import ( @@ -82,10 +91,19 @@ func NewExploreFunc(tp elastictransport.Interface) NewExplore { } } -// Extracts and summarizes information about the documents and terms in an +// Explore graph analytics. +// Extract and summarize information about the documents and terms in an // Elasticsearch data stream or index. +// The easiest way to understand the behavior of this API is to use the Graph UI +// to explore connections. +// An initial request to the `_explore` API contains a seed query that +// identifies the documents of interest and specifies the fields that define the +// vertices and connections you want to include in the graph. +// Subsequent requests enable you to spider out from one more vertices of +// interest. +// You can exclude vertices that have already been returned. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph func New(tp elastictransport.Interface) *Explore { r := &Explore{ transport: tp, @@ -93,8 +111,6 @@ func New(tp elastictransport.Interface) *Explore { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -380,40 +396,59 @@ func (r *Explore) Pretty(pretty bool) *Explore { return r } -// Connections Specifies or more fields from which you want to extract terms that are +// Specifies or more fields from which you want to extract terms that are // associated with the specified vertices. // API name: connections -func (r *Explore) Connections(connections *types.Hop) *Explore { +func (r *Explore) Connections(connections types.HopVariant) *Explore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Connections = connections + r.req.Connections = connections.HopCaster() return r } -// Controls Direct the Graph API how to build the graph. +// Direct the Graph API how to build the graph. // API name: controls -func (r *Explore) Controls(controls *types.ExploreControls) *Explore { +func (r *Explore) Controls(controls types.ExploreControlsVariant) *Explore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Controls = controls + r.req.Controls = controls.ExploreControlsCaster() return r } -// Query A seed query that identifies the documents of interest. Can be any valid +// A seed query that identifies the documents of interest. Can be any valid // Elasticsearch query. // API name: query -func (r *Explore) Query(query *types.Query) *Explore { +func (r *Explore) Query(query types.QueryVariant) *Explore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// Vertices Specifies one or more fields that contain the terms you want to include in +// Specifies one or more fields that contain the terms you want to include in // the graph as vertices. // API name: vertices -func (r *Explore) Vertices(vertices ...types.VertexDefinition) *Explore { - r.req.Vertices = vertices +func (r *Explore) Vertices(vertices ...types.VertexDefinitionVariant) *Explore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range vertices { + + r.req.Vertices = append(r.req.Vertices, *v.VertexDefinitionCaster()) + } return r } diff --git a/typedapi/graph/explore/request.go b/typedapi/graph/explore/request.go index 96e4871d30..86a272783f 100644 --- a/typedapi/graph/explore/request.go +++ b/typedapi/graph/explore/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package explore @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package explore // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/graph/explore/GraphExploreRequest.ts#L28-L72 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/graph/explore/GraphExploreRequest.ts#L28-L84 type Request struct { // Connections Specifies or more fields from which you want to extract terms that are diff --git a/typedapi/graph/explore/response.go b/typedapi/graph/explore/response.go index b6ab69ef50..8260092ee5 100644 --- a/typedapi/graph/explore/response.go +++ b/typedapi/graph/explore/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package explore @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package explore // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/graph/explore/GraphExploreResponse.ts#L25-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/graph/explore/GraphExploreResponse.ts#L25-L33 type Response struct { Connections []types.Connection `json:"connections"` Failures []types.ShardFailure `json:"failures"` diff --git a/typedapi/ilm/deletelifecycle/delete_lifecycle.go b/typedapi/ilm/deletelifecycle/delete_lifecycle.go index 68c6685397..13e2a12fdd 100644 --- a/typedapi/ilm/deletelifecycle/delete_lifecycle.go +++ b/typedapi/ilm/deletelifecycle/delete_lifecycle.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes the specified lifecycle policy definition. You cannot delete policies -// that are currently in use. If the policy is being used to manage any indices, -// the request fails and returns an error. +// Delete a lifecycle policy. +// You cannot delete policies that are currently in use. If the policy is being +// used to manage any indices, the request fails and returns an error. package deletelifecycle import ( @@ -78,11 +78,11 @@ func NewDeleteLifecycleFunc(tp elastictransport.Interface) NewDeleteLifecycle { } } -// Deletes the specified lifecycle policy definition. You cannot delete policies -// that are currently in use. If the policy is being used to manage any indices, -// the request fails and returns an error. +// Delete a lifecycle policy. +// You cannot delete policies that are currently in use. If the policy is being +// used to manage any indices, the request fails and returns an error. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete-lifecycle.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle func New(tp elastictransport.Interface) *DeleteLifecycle { r := &DeleteLifecycle{ transport: tp, diff --git a/typedapi/ilm/deletelifecycle/response.go b/typedapi/ilm/deletelifecycle/response.go index bb629ca050..0689096d76 100644 --- a/typedapi/ilm/deletelifecycle/response.go +++ b/typedapi/ilm/deletelifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletelifecycle // Response holds the response body struct for the package deletelifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/delete_lifecycle/DeleteLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/delete_lifecycle/DeleteLifecycleResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ilm/explainlifecycle/explain_lifecycle.go b/typedapi/ilm/explainlifecycle/explain_lifecycle.go index 789889cbd3..98b96cef8d 100644 --- a/typedapi/ilm/explainlifecycle/explain_lifecycle.go +++ b/typedapi/ilm/explainlifecycle/explain_lifecycle.go @@ -16,12 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves information about the index’s current lifecycle state, such as the -// currently executing phase, action, and step. Shows when the index entered -// each one, the definition of the running phase, and information about any -// failures. +// Explain the lifecycle state. +// Get the current lifecycle status for one or more indices. +// For data streams, the API retrieves the current lifecycle status for the +// stream's backing indices. +// +// The response indicates when the index entered each lifecycle state, provides +// the definition of the running phase, and information about any failures. package explainlifecycle import ( @@ -79,12 +82,15 @@ func NewExplainLifecycleFunc(tp elastictransport.Interface) NewExplainLifecycle } } -// Retrieves information about the index’s current lifecycle state, such as the -// currently executing phase, action, and step. Shows when the index entered -// each one, the definition of the running phase, and information about any -// failures. +// Explain the lifecycle state. +// Get the current lifecycle status for one or more indices. +// For data streams, the API retrieves the current lifecycle status for the +// stream's backing indices. +// +// The response indicates when the index entered each lifecycle state, provides +// the definition of the running phase, and information about any failures. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-explain-lifecycle.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle func New(tp elastictransport.Interface) *ExplainLifecycle { r := &ExplainLifecycle{ transport: tp, @@ -334,15 +340,6 @@ func (r *ExplainLifecycle) MasterTimeout(duration string) *ExplainLifecycle { return r } -// Timeout Period to wait for a response. If no response is received before the timeout -// expires, the request fails and returns an error. -// API name: timeout -func (r *ExplainLifecycle) Timeout(duration string) *ExplainLifecycle { - r.values.Set("timeout", duration) - - return r -} - // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ilm/explainlifecycle/response.go b/typedapi/ilm/explainlifecycle/response.go index 32df15e8c2..7bb09b1b68 100644 --- a/typedapi/ilm/explainlifecycle/response.go +++ b/typedapi/ilm/explainlifecycle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package explainlifecycle @@ -24,6 +24,7 @@ import ( "bytes" "encoding/json" "errors" + "fmt" "io" "github.com/elastic/go-elasticsearch/v8/typedapi/types" @@ -31,7 +32,7 @@ import ( // Response holds the response body struct for the package explainlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/explain_lifecycle/ExplainLifecycleResponse.ts#L24-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/explain_lifecycle/ExplainLifecycleResponse.ts#L24-L28 type Response struct { Indices map[string]types.LifecycleExplain `json:"indices"` } @@ -75,19 +76,19 @@ func (s *Response) UnmarshalJSON(data []byte) error { case true: oo := types.NewLifecycleExplainManaged() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Indices | %w", err) } s.Indices[key] = oo case false: oo := types.NewLifecycleExplainUnmanaged() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Indices | %w", err) } s.Indices[key] = oo default: oo := new(types.LifecycleExplain) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(types.LifecycleExplain) | %w", err) } s.Indices[key] = oo } diff --git a/typedapi/ilm/getlifecycle/get_lifecycle.go b/typedapi/ilm/getlifecycle/get_lifecycle.go index dba7dc9b08..aad2599248 100644 --- a/typedapi/ilm/getlifecycle/get_lifecycle.go +++ b/typedapi/ilm/getlifecycle/get_lifecycle.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves a lifecycle policy. +// Get lifecycle policies. package getlifecycle import ( @@ -74,9 +74,9 @@ func NewGetLifecycleFunc(tp elastictransport.Interface) NewGetLifecycle { } } -// Retrieves a lifecycle policy. +// Get lifecycle policies. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-lifecycle.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle func New(tp elastictransport.Interface) *GetLifecycle { r := &GetLifecycle{ transport: tp, diff --git a/typedapi/ilm/getlifecycle/response.go b/typedapi/ilm/getlifecycle/response.go index 6eb880be11..cc52bb0bd8 100644 --- a/typedapi/ilm/getlifecycle/response.go +++ b/typedapi/ilm/getlifecycle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getlifecycle @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/get_lifecycle/GetLifecycleResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/get_lifecycle/GetLifecycleResponse.ts#L23-L26 type Response map[string]types.Lifecycle diff --git a/typedapi/ilm/getstatus/get_status.go b/typedapi/ilm/getstatus/get_status.go index 46821539ee..a8b1aa7bc4 100644 --- a/typedapi/ilm/getstatus/get_status.go +++ b/typedapi/ilm/getstatus/get_status.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves the current index lifecycle management (ILM) status. +// Get the ILM status. +// +// Get the current index lifecycle management status. package getstatus import ( @@ -68,9 +70,11 @@ func NewGetStatusFunc(tp elastictransport.Interface) NewGetStatus { } } -// Retrieves the current index lifecycle management (ILM) status. +// Get the ILM status. +// +// Get the current index lifecycle management status. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-status.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status func New(tp elastictransport.Interface) *GetStatus { r := &GetStatus{ transport: tp, diff --git a/typedapi/ilm/getstatus/response.go b/typedapi/ilm/getstatus/response.go index 493c076b76..f950ddf174 100644 --- a/typedapi/ilm/getstatus/response.go +++ b/typedapi/ilm/getstatus/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getstatus @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getstatus // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/get_status/GetIlmStatusResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/get_status/GetIlmStatusResponse.ts#L22-L24 type Response struct { OperationMode lifecycleoperationmode.LifecycleOperationMode `json:"operation_mode"` } diff --git a/typedapi/ilm/migratetodatatiers/migrate_to_data_tiers.go b/typedapi/ilm/migratetodatatiers/migrate_to_data_tiers.go index dfab99e48c..34743fd951 100644 --- a/typedapi/ilm/migratetodatatiers/migrate_to_data_tiers.go +++ b/typedapi/ilm/migratetodatatiers/migrate_to_data_tiers.go @@ -16,14 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Switches the indices, ILM policies, and legacy, composable and component -// templates from using custom node attributes and -// attribute-based allocation filters to using data tiers, and optionally -// deletes one legacy index template.+ +// Migrate to data tiers routing. +// Switch the indices, ILM policies, and legacy, composable, and component +// templates from using custom node attributes and attribute-based allocation +// filters to using data tiers. +// Optionally, delete one legacy index template. // Using node roles enables ILM to automatically move the indices between data // tiers. +// +// Migrating away from custom node attributes routing can be manually performed. +// This API provides an automated way of performing three out of the four manual +// steps listed in the migration guide: +// +// 1. Stop setting the custom hot attribute on new indices. +// 1. Remove custom allocation settings from existing ILM policies. +// 1. Replace custom allocation settings from existing indices with the +// corresponding tier preference. +// +// ILM must be stopped before performing the migration. +// Use the stop ILM and get ILM status APIs to wait until the reported operation +// mode is `STOPPED`. package migratetodatatiers import ( @@ -78,14 +92,28 @@ func NewMigrateToDataTiersFunc(tp elastictransport.Interface) NewMigrateToDataTi } } -// Switches the indices, ILM policies, and legacy, composable and component -// templates from using custom node attributes and -// attribute-based allocation filters to using data tiers, and optionally -// deletes one legacy index template.+ +// Migrate to data tiers routing. +// Switch the indices, ILM policies, and legacy, composable, and component +// templates from using custom node attributes and attribute-based allocation +// filters to using data tiers. +// Optionally, delete one legacy index template. // Using node roles enables ILM to automatically move the indices between data // tiers. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate-to-data-tiers.html +// Migrating away from custom node attributes routing can be manually performed. +// This API provides an automated way of performing three out of the four manual +// steps listed in the migration guide: +// +// 1. Stop setting the custom hot attribute on new indices. +// 1. Remove custom allocation settings from existing ILM policies. +// 1. Replace custom allocation settings from existing indices with the +// corresponding tier preference. +// +// ILM must be stopped before performing the migration. +// Use the stop ILM and get ILM status APIs to wait until the reported operation +// mode is `STOPPED`. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers func New(tp elastictransport.Interface) *MigrateToDataTiers { r := &MigrateToDataTiers{ transport: tp, @@ -93,8 +121,6 @@ func New(tp elastictransport.Interface) *MigrateToDataTiers { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -313,6 +339,17 @@ func (r *MigrateToDataTiers) DryRun(dryrun bool) *MigrateToDataTiers { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *MigrateToDataTiers) MasterTimeout(duration string) *MigrateToDataTiers { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -359,6 +396,10 @@ func (r *MigrateToDataTiers) Pretty(pretty bool) *MigrateToDataTiers { // API name: legacy_template_to_delete func (r *MigrateToDataTiers) LegacyTemplateToDelete(legacytemplatetodelete string) *MigrateToDataTiers { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.LegacyTemplateToDelete = &legacytemplatetodelete @@ -367,6 +408,10 @@ func (r *MigrateToDataTiers) LegacyTemplateToDelete(legacytemplatetodelete strin // API name: node_attribute func (r *MigrateToDataTiers) NodeAttribute(nodeattribute string) *MigrateToDataTiers { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.NodeAttribute = &nodeattribute diff --git a/typedapi/ilm/migratetodatatiers/request.go b/typedapi/ilm/migratetodatatiers/request.go index 9de320b907..884eecf746 100644 --- a/typedapi/ilm/migratetodatatiers/request.go +++ b/typedapi/ilm/migratetodatatiers/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package migratetodatatiers @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package migratetodatatiers // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/migrate_to_data_tiers/Request.ts#L22-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/migrate_to_data_tiers/Request.ts#L23-L69 type Request struct { LegacyTemplateToDelete *string `json:"legacy_template_to_delete,omitempty"` NodeAttribute *string `json:"node_attribute,omitempty"` diff --git a/typedapi/ilm/migratetodatatiers/response.go b/typedapi/ilm/migratetodatatiers/response.go index 63012ac379..b16fb97eb7 100644 --- a/typedapi/ilm/migratetodatatiers/response.go +++ b/typedapi/ilm/migratetodatatiers/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package migratetodatatiers @@ -31,15 +31,25 @@ import ( // Response holds the response body struct for the package migratetodatatiers // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/migrate_to_data_tiers/Response.ts#L22-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/migrate_to_data_tiers/Response.ts#L22-L51 type Response struct { - DryRun bool `json:"dry_run"` - MigratedComponentTemplates []string `json:"migrated_component_templates"` + DryRun bool `json:"dry_run"` + // MigratedComponentTemplates The component templates that were updated to not contain custom routing + // settings for the provided data attribute. + MigratedComponentTemplates []string `json:"migrated_component_templates"` + // MigratedComposableTemplates The composable index templates that were updated to not contain custom + // routing settings for the provided data attribute. MigratedComposableTemplates []string `json:"migrated_composable_templates"` - MigratedIlmPolicies []string `json:"migrated_ilm_policies"` - MigratedIndices []string `json:"migrated_indices"` - MigratedLegacyTemplates []string `json:"migrated_legacy_templates"` - RemovedLegacyTemplate string `json:"removed_legacy_template"` + // MigratedIlmPolicies The ILM policies that were updated. + MigratedIlmPolicies []string `json:"migrated_ilm_policies"` + // MigratedIndices The indices that were migrated to tier preference routing. + MigratedIndices []string `json:"migrated_indices"` + // MigratedLegacyTemplates The legacy index templates that were updated to not contain custom routing + // settings for the provided data attribute. + MigratedLegacyTemplates []string `json:"migrated_legacy_templates"` + // RemovedLegacyTemplate The name of the legacy index template that was deleted. + // This information is missing if no legacy index templates were deleted. + RemovedLegacyTemplate string `json:"removed_legacy_template"` } // NewResponse returns a Response diff --git a/typedapi/ilm/movetostep/move_to_step.go b/typedapi/ilm/movetostep/move_to_step.go index a05ad2aaa2..be086098da 100644 --- a/typedapi/ilm/movetostep/move_to_step.go +++ b/typedapi/ilm/movetostep/move_to_step.go @@ -16,9 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Manually moves an index into the specified step and executes that step. +// Move to a lifecycle step. +// Manually move an index into a specific step in the lifecycle policy and run +// that step. +// +// WARNING: This operation can result in the loss of data. Manually moving an +// index into a specific step runs that step even if it has already been +// performed. This is a potentially destructive action and this should be +// considered an expert level API. +// +// You must specify both the current step and the step to be executed in the +// body of the request. +// The request will fail if the current step does not match the step currently +// running for the index +// This is to prevent the index from being moved from an unexpected step into +// the next step. +// +// When specifying the target (`next_step`) to which the index will be moved, +// either the name or both the action and name fields are optional. +// If only the phase is specified, the index will move to the first step of the +// first action in the target phase. +// If the phase and action are specified, the index will move to the first step +// of the specified action in the specified phase. +// Only actions specified in the ILM policy are considered valid. +// An index cannot move to a step that is not part of its policy. package movetostep import ( @@ -81,9 +104,32 @@ func NewMoveToStepFunc(tp elastictransport.Interface) NewMoveToStep { } } -// Manually moves an index into the specified step and executes that step. +// Move to a lifecycle step. +// Manually move an index into a specific step in the lifecycle policy and run +// that step. +// +// WARNING: This operation can result in the loss of data. Manually moving an +// index into a specific step runs that step even if it has already been +// performed. This is a potentially destructive action and this should be +// considered an expert level API. +// +// You must specify both the current step and the step to be executed in the +// body of the request. +// The request will fail if the current step does not match the step currently +// running for the index +// This is to prevent the index from being moved from an unexpected step into +// the next step. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-move-to-step.html +// When specifying the target (`next_step`) to which the index will be moved, +// either the name or both the action and name fields are optional. +// If only the phase is specified, the index will move to the first step of the +// first action in the target phase. +// If the phase and action are specified, the index will move to the first step +// of the specified action in the specified phase. +// Only actions specified in the ILM policy are considered valid. +// An index cannot move to a step that is not part of its policy. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step func New(tp elastictransport.Interface) *MoveToStep { r := &MoveToStep{ transport: tp, @@ -91,8 +137,6 @@ func New(tp elastictransport.Interface) *MoveToStep { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -359,18 +403,28 @@ func (r *MoveToStep) Pretty(pretty bool) *MoveToStep { return r } +// The step that the index is expected to be in. // API name: current_step -func (r *MoveToStep) CurrentStep(currentstep *types.StepKey) *MoveToStep { +func (r *MoveToStep) CurrentStep(currentstep types.StepKeyVariant) *MoveToStep { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.CurrentStep = *currentstep + r.req.CurrentStep = *currentstep.StepKeyCaster() return r } +// The step that you want to run. // API name: next_step -func (r *MoveToStep) NextStep(nextstep *types.StepKey) *MoveToStep { +func (r *MoveToStep) NextStep(nextstep types.StepKeyVariant) *MoveToStep { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.NextStep = *nextstep + r.req.NextStep = *nextstep.StepKeyCaster() return r } diff --git a/typedapi/ilm/movetostep/request.go b/typedapi/ilm/movetostep/request.go index 1aefa7315e..bf486d7b40 100644 --- a/typedapi/ilm/movetostep/request.go +++ b/typedapi/ilm/movetostep/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package movetostep @@ -29,10 +29,13 @@ import ( // Request holds the request body struct for the package movetostep // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/move_to_step/MoveToStepRequest.ts#L24-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/move_to_step/MoveToStepRequest.ts#L24-L64 type Request struct { + + // CurrentStep The step that the index is expected to be in. CurrentStep types.StepKey `json:"current_step"` - NextStep types.StepKey `json:"next_step"` + // NextStep The step that you want to run. + NextStep types.StepKey `json:"next_step"` } // NewRequest returns a Request diff --git a/typedapi/ilm/movetostep/response.go b/typedapi/ilm/movetostep/response.go index 4edb264503..d573139e3b 100644 --- a/typedapi/ilm/movetostep/response.go +++ b/typedapi/ilm/movetostep/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package movetostep // Response holds the response body struct for the package movetostep // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/move_to_step/MoveToStepResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/move_to_step/MoveToStepResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ilm/putlifecycle/put_lifecycle.go b/typedapi/ilm/putlifecycle/put_lifecycle.go index 312b3d23ad..244399dd8e 100644 --- a/typedapi/ilm/putlifecycle/put_lifecycle.go +++ b/typedapi/ilm/putlifecycle/put_lifecycle.go @@ -16,10 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Creates a lifecycle policy. If the specified policy exists, the policy is -// replaced and the policy version is incremented. +// Create or update a lifecycle policy. +// If the specified policy exists, it is replaced and the policy version is +// incremented. +// +// NOTE: Only the latest version of the policy is stored, you cannot revert to +// previous versions. package putlifecycle import ( @@ -82,10 +86,14 @@ func NewPutLifecycleFunc(tp elastictransport.Interface) NewPutLifecycle { } } -// Creates a lifecycle policy. If the specified policy exists, the policy is -// replaced and the policy version is incremented. +// Create or update a lifecycle policy. +// If the specified policy exists, it is replaced and the policy version is +// incremented. +// +// NOTE: Only the latest version of the policy is stored, you cannot revert to +// previous versions. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-put-lifecycle.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle func New(tp elastictransport.Interface) *PutLifecycle { r := &PutLifecycle{ transport: tp, @@ -93,8 +101,6 @@ func New(tp elastictransport.Interface) *PutLifecycle { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { diff --git a/typedapi/ilm/putlifecycle/request.go b/typedapi/ilm/putlifecycle/request.go index 54f28d9fde..9a468070e2 100644 --- a/typedapi/ilm/putlifecycle/request.go +++ b/typedapi/ilm/putlifecycle/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putlifecycle @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package putlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/put_lifecycle/PutLifecycleRequest.ts#L25-L55 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/put_lifecycle/PutLifecycleRequest.ts#L25-L66 type Request struct { Policy *types.IlmPolicy `json:"policy,omitempty"` } diff --git a/typedapi/ilm/putlifecycle/response.go b/typedapi/ilm/putlifecycle/response.go index 98020e48a7..9c6f24aff0 100644 --- a/typedapi/ilm/putlifecycle/response.go +++ b/typedapi/ilm/putlifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putlifecycle // Response holds the response body struct for the package putlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/put_lifecycle/PutLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/put_lifecycle/PutLifecycleResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ilm/removepolicy/remove_policy.go b/typedapi/ilm/removepolicy/remove_policy.go index 1d98f1f3dd..912d501325 100644 --- a/typedapi/ilm/removepolicy/remove_policy.go +++ b/typedapi/ilm/removepolicy/remove_policy.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Removes the assigned lifecycle policy and stops managing the specified index +// Remove policies from an index. +// Remove the assigned lifecycle policies from an index or a data stream's +// backing indices. +// It also stops managing the indices. package removepolicy import ( @@ -76,9 +79,12 @@ func NewRemovePolicyFunc(tp elastictransport.Interface) NewRemovePolicy { } } -// Removes the assigned lifecycle policy and stops managing the specified index +// Remove policies from an index. +// Remove the assigned lifecycle policies from an index or a data stream's +// backing indices. +// It also stops managing the indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-remove-policy.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy func New(tp elastictransport.Interface) *RemovePolicy { r := &RemovePolicy{ transport: tp, diff --git a/typedapi/ilm/removepolicy/response.go b/typedapi/ilm/removepolicy/response.go index dd2c283cf1..09660c7832 100644 --- a/typedapi/ilm/removepolicy/response.go +++ b/typedapi/ilm/removepolicy/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package removepolicy // Response holds the response body struct for the package removepolicy // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/remove_policy/RemovePolicyResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/remove_policy/RemovePolicyResponse.ts#L22-L27 type Response struct { FailedIndexes []string `json:"failed_indexes"` HasFailures bool `json:"has_failures"` diff --git a/typedapi/ilm/retry/response.go b/typedapi/ilm/retry/response.go index 7f4214427a..f2d4b5a071 100644 --- a/typedapi/ilm/retry/response.go +++ b/typedapi/ilm/retry/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package retry // Response holds the response body struct for the package retry // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/retry/RetryIlmResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/retry/RetryIlmResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ilm/retry/retry.go b/typedapi/ilm/retry/retry.go index 8600a34dec..4c0d84ba86 100644 --- a/typedapi/ilm/retry/retry.go +++ b/typedapi/ilm/retry/retry.go @@ -16,9 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// Retries executing the policy for an index that is in the ERROR step. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Retry a policy. +// Retry running the lifecycle policy for an index that is in the ERROR step. +// The API sets the policy back to the step where the error occurred and runs +// the step. +// Use the explain lifecycle state API to determine whether an index is in the +// ERROR step. package retry import ( @@ -76,9 +81,14 @@ func NewRetryFunc(tp elastictransport.Interface) NewRetry { } } -// Retries executing the policy for an index that is in the ERROR step. +// Retry a policy. +// Retry running the lifecycle policy for an index that is in the ERROR step. +// The API sets the policy back to the step where the error occurred and runs +// the step. +// Use the explain lifecycle state API to determine whether an index is in the +// ERROR step. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-retry-policy.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry func New(tp elastictransport.Interface) *Retry { r := &Retry{ transport: tp, diff --git a/typedapi/ilm/start/response.go b/typedapi/ilm/start/response.go index 5b5de48285..d2700c856a 100644 --- a/typedapi/ilm/start/response.go +++ b/typedapi/ilm/start/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package start // Response holds the response body struct for the package start // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/start/StartIlmResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/start/StartIlmResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ilm/start/start.go b/typedapi/ilm/start/start.go index a795df5e43..cc436758db 100644 --- a/typedapi/ilm/start/start.go +++ b/typedapi/ilm/start/start.go @@ -16,9 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Start the index lifecycle management (ILM) plugin. +// Start the ILM plugin. +// Start the index lifecycle management plugin if it is currently stopped. +// ILM is started automatically when the cluster is formed. +// Restarting ILM is necessary only when it has been stopped using the stop ILM +// API. package start import ( @@ -68,9 +72,13 @@ func NewStartFunc(tp elastictransport.Interface) NewStart { } } -// Start the index lifecycle management (ILM) plugin. +// Start the ILM plugin. +// Start the index lifecycle management plugin if it is currently stopped. +// ILM is started automatically when the cluster is formed. +// Restarting ILM is necessary only when it has been stopped using the stop ILM +// API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-start.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start func New(tp elastictransport.Interface) *Start { r := &Start{ transport: tp, @@ -276,6 +284,8 @@ func (r *Start) Header(key, value string) *Start { return r } +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. // API name: master_timeout func (r *Start) MasterTimeout(duration string) *Start { r.values.Set("master_timeout", duration) @@ -283,6 +293,8 @@ func (r *Start) MasterTimeout(duration string) *Start { return r } +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. // API name: timeout func (r *Start) Timeout(duration string) *Start { r.values.Set("timeout", duration) diff --git a/typedapi/ilm/stop/response.go b/typedapi/ilm/stop/response.go index 5e2d541c4b..68ae1eaf8b 100644 --- a/typedapi/ilm/stop/response.go +++ b/typedapi/ilm/stop/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package stop // Response holds the response body struct for the package stop // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/stop/StopIlmResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/stop/StopIlmResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ilm/stop/stop.go b/typedapi/ilm/stop/stop.go index b71fe32027..da5150c5c9 100644 --- a/typedapi/ilm/stop/stop.go +++ b/typedapi/ilm/stop/stop.go @@ -16,10 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Halts all lifecycle management operations and stops the index lifecycle -// management (ILM) plugin +// Stop the ILM plugin. +// Halt all lifecycle management operations and stop the index lifecycle +// management plugin. +// This is useful when you are performing maintenance on the cluster and need to +// prevent ILM from performing any actions on your indices. +// +// The API returns as soon as the stop request has been acknowledged, but the +// plugin might continue to run until in-progress operations complete and the +// plugin can be safely stopped. +// Use the get ILM status API to check whether ILM is running. package stop import ( @@ -69,10 +77,18 @@ func NewStopFunc(tp elastictransport.Interface) NewStop { } } -// Halts all lifecycle management operations and stops the index lifecycle -// management (ILM) plugin +// Stop the ILM plugin. +// Halt all lifecycle management operations and stop the index lifecycle +// management plugin. +// This is useful when you are performing maintenance on the cluster and need to +// prevent ILM from performing any actions on your indices. +// +// The API returns as soon as the stop request has been acknowledged, but the +// plugin might continue to run until in-progress operations complete and the +// plugin can be safely stopped. +// Use the get ILM status API to check whether ILM is running. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-stop.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop func New(tp elastictransport.Interface) *Stop { r := &Stop{ transport: tp, @@ -278,6 +294,8 @@ func (r *Stop) Header(key, value string) *Stop { return r } +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. // API name: master_timeout func (r *Stop) MasterTimeout(duration string) *Stop { r.values.Set("master_timeout", duration) @@ -285,6 +303,8 @@ func (r *Stop) MasterTimeout(duration string) *Stop { return r } +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. // API name: timeout func (r *Stop) Timeout(duration string) *Stop { r.values.Set("timeout", duration) diff --git a/typedapi/indices/addblock/add_block.go b/typedapi/indices/addblock/add_block.go index a3627d3593..2620b85163 100644 --- a/typedapi/indices/addblock/add_block.go +++ b/typedapi/indices/addblock/add_block.go @@ -16,11 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Add an index block. -// Limits the operations allowed on an index by blocking specific operation -// types. +// +// Add an index block to an index. +// Index blocks limit the operations allowed on an index by blocking specific +// operation types. package addblock import ( @@ -85,10 +87,12 @@ func NewAddBlockFunc(tp elastictransport.Interface) NewAddBlock { } // Add an index block. -// Limits the operations allowed on an index by blocking specific operation -// types. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-blocks.html +// Add an index block to an index. +// Index blocks limit the operations allowed on an index by blocking specific +// operation types. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block func New(tp elastictransport.Interface) *AddBlock { r := &AddBlock{ transport: tp, @@ -304,7 +308,14 @@ func (r *AddBlock) Header(key, value string) *AddBlock { return r } -// Index A comma separated list of indices to add a block to +// Index A comma-separated list or wildcard expression of index names used to limit +// the request. +// By default, you must explicitly name the indices you are adding blocks to. +// To allow the adding of blocks to indices with `_all`, `*`, or other wildcard +// expressions, change the `action.destructive_requires_name` setting to +// `false`. +// You can update this setting in the `elasticsearch.yml` file or by using the +// cluster update settings API. // API Name: index func (r *AddBlock) _index(index string) *AddBlock { r.paramSet |= indexMask @@ -313,7 +324,7 @@ func (r *AddBlock) _index(index string) *AddBlock { return r } -// Block The block to add (one of read, write, read_only or metadata) +// Block The block type to add to the index. // API Name: block func (r *AddBlock) _block(block string) *AddBlock { r.paramSet |= blockMask @@ -322,8 +333,11 @@ func (r *AddBlock) _block(block string) *AddBlock { return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. // API name: allow_no_indices func (r *AddBlock) AllowNoIndices(allownoindices bool) *AddBlock { r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) @@ -331,8 +345,10 @@ func (r *AddBlock) AllowNoIndices(allownoindices bool) *AddBlock { return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards The type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// It supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards func (r *AddBlock) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *AddBlock { tmp := []string{} @@ -344,8 +360,8 @@ func (r *AddBlock) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildc return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable func (r *AddBlock) IgnoreUnavailable(ignoreunavailable bool) *AddBlock { r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) @@ -353,7 +369,10 @@ func (r *AddBlock) IgnoreUnavailable(ignoreunavailable bool) *AddBlock { return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. // API name: master_timeout func (r *AddBlock) MasterTimeout(duration string) *AddBlock { r.values.Set("master_timeout", duration) @@ -361,7 +380,12 @@ func (r *AddBlock) MasterTimeout(duration string) *AddBlock { return r } -// Timeout Explicit operation timeout +// Timeout The period to wait for a response from all relevant nodes in the cluster +// after updating the cluster metadata. +// If no response is received before the timeout expires, the cluster metadata +// update still applies but the response will indicate that it was not +// completely acknowledged. +// It can also be set to `-1` to indicate that the request should never timeout. // API name: timeout func (r *AddBlock) Timeout(duration string) *AddBlock { r.values.Set("timeout", duration) diff --git a/typedapi/indices/addblock/response.go b/typedapi/indices/addblock/response.go index 8e6dff6c86..2d36561323 100644 --- a/typedapi/indices/addblock/response.go +++ b/typedapi/indices/addblock/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package addblock @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package addblock // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/add_block/IndicesAddBlockResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/add_block/IndicesAddBlockResponse.ts#L22-L28 type Response struct { Acknowledged bool `json:"acknowledged"` Indices []types.IndicesBlockStatus `json:"indices"` diff --git a/typedapi/indices/analyze/analyze.go b/typedapi/indices/analyze/analyze.go index 60033a34cd..74017fb300 100644 --- a/typedapi/indices/analyze/analyze.go +++ b/typedapi/indices/analyze/analyze.go @@ -16,12 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get tokens from text analysis. -// The analyze API performs -// [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) -// on a text string and returns the resulting tokens. +// The analyze API performs analysis on a text string and returns the resulting +// tokens. +// +// Generating excessive amount of tokens may cause a node to run out of memory. +// The `index.analyze.max_token_count` setting enables you to limit the number +// of tokens that can be produced. +// If more than this limit of tokens gets generated, an error occurs. +// The `_analyze` endpoint without a specified index will always use `10000` as +// its limit. package analyze import ( @@ -83,11 +89,17 @@ func NewAnalyzeFunc(tp elastictransport.Interface) NewAnalyze { } // Get tokens from text analysis. -// The analyze API performs -// [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) -// on a text string and returns the resulting tokens. +// The analyze API performs analysis on a text string and returns the resulting +// tokens. +// +// Generating excessive amount of tokens may cause a node to run out of memory. +// The `index.analyze.max_token_count` setting enables you to limit the number +// of tokens that can be produced. +// If more than this limit of tokens gets generated, an error occurs. +// The `_analyze` endpoint without a specified index will always use `10000` as +// its limit. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-analyze.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze func New(tp elastictransport.Interface) *Analyze { r := &Analyze{ transport: tp, @@ -95,8 +107,6 @@ func New(tp elastictransport.Interface) *Analyze { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -369,83 +379,132 @@ func (r *Analyze) Pretty(pretty bool) *Analyze { return r } -// Analyzer The name of the analyzer that should be applied to the provided `text`. +// The name of the analyzer that should be applied to the provided `text`. // This could be a built-in analyzer, or an analyzer that’s been configured in // the index. // API name: analyzer func (r *Analyze) Analyzer(analyzer string) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Analyzer = &analyzer return r } -// Attributes Array of token attributes used to filter the output of the `explain` +// Array of token attributes used to filter the output of the `explain` // parameter. // API name: attributes func (r *Analyze) Attributes(attributes ...string) *Analyze { - r.req.Attributes = attributes + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range attributes { + + r.req.Attributes = append(r.req.Attributes, v) + } return r } -// CharFilter Array of character filters used to preprocess characters before the +// Array of character filters used to preprocess characters before the // tokenizer. // API name: char_filter -func (r *Analyze) CharFilter(charfilters ...types.CharFilter) *Analyze { - r.req.CharFilter = charfilters +func (r *Analyze) CharFilter(charfilters ...types.CharFilterVariant) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range charfilters { + + r.req.CharFilter = append(r.req.CharFilter, *v.CharFilterCaster()) + } return r } -// Explain If `true`, the response includes token attributes and additional details. +// If `true`, the response includes token attributes and additional details. // API name: explain func (r *Analyze) Explain(explain bool) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Explain = &explain return r } -// Field Field used to derive the analyzer. +// Field used to derive the analyzer. // To use this parameter, you must specify an index. // If specified, the `analyzer` parameter overrides this value. // API name: field func (r *Analyze) Field(field string) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Field = &field return r } -// Filter Array of token filters used to apply after the tokenizer. +// Array of token filters used to apply after the tokenizer. // API name: filter -func (r *Analyze) Filter(filters ...types.TokenFilter) *Analyze { - r.req.Filter = filters +func (r *Analyze) Filter(filters ...types.TokenFilterVariant) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range filters { + + r.req.Filter = append(r.req.Filter, *v.TokenFilterCaster()) + } return r } -// Normalizer Normalizer to use to convert text into a single token. +// Normalizer to use to convert text into a single token. // API name: normalizer func (r *Analyze) Normalizer(normalizer string) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Normalizer = &normalizer return r } -// Text Text to analyze. +// Text to analyze. // If an array of strings is provided, it is analyzed as a multi-value field. // API name: text func (r *Analyze) Text(texttoanalyzes ...string) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Text = texttoanalyzes return r } -// Tokenizer Tokenizer to use to convert text into tokens. +// Tokenizer to use to convert text into tokens. // API name: tokenizer -func (r *Analyze) Tokenizer(tokenizer types.Tokenizer) *Analyze { - r.req.Tokenizer = tokenizer +func (r *Analyze) Tokenizer(tokenizer types.TokenizerVariant) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Tokenizer = *tokenizer.TokenizerCaster() return r } diff --git a/typedapi/indices/analyze/request.go b/typedapi/indices/analyze/request.go index d600e4134a..c5340e6239 100644 --- a/typedapi/indices/analyze/request.go +++ b/typedapi/indices/analyze/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package analyze @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package analyze // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/analyze/IndicesAnalyzeRequest.ts#L27-L93 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/analyze/IndicesAnalyzeRequest.ts#L27-L110 type Request struct { // Analyzer The name of the analyzer that should be applied to the provided `text`. @@ -130,37 +130,37 @@ func (s *Request) UnmarshalJSON(data []byte) error { case "html_strip": o := types.NewHtmlStripCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "html_strip", err) } s.CharFilter = append(s.CharFilter, *o) case "mapping": o := types.NewMappingCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "mapping", err) } s.CharFilter = append(s.CharFilter, *o) case "pattern_replace": o := types.NewPatternReplaceCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "pattern_replace", err) } s.CharFilter = append(s.CharFilter, *o) case "icu_normalizer": o := types.NewIcuNormalizationCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_normalizer", err) } s.CharFilter = append(s.CharFilter, *o) case "kuromoji_iteration_mark": o := types.NewKuromojiIterationMarkCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_iteration_mark", err) } s.CharFilter = append(s.CharFilter, *o) default: o := new(any) if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("CharFilter | %w", err) } s.CharFilter = append(s.CharFilter, *o) } @@ -202,289 +202,289 @@ func (s *Request) UnmarshalJSON(data []byte) error { case "asciifolding": o := types.NewAsciiFoldingTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "asciifolding", err) } s.Filter = append(s.Filter, *o) case "common_grams": o := types.NewCommonGramsTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "common_grams", err) } s.Filter = append(s.Filter, *o) case "condition": o := types.NewConditionTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "condition", err) } s.Filter = append(s.Filter, *o) case "delimited_payload": o := types.NewDelimitedPayloadTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "delimited_payload", err) } s.Filter = append(s.Filter, *o) case "edge_ngram": o := types.NewEdgeNGramTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "edge_ngram", err) } s.Filter = append(s.Filter, *o) case "elision": o := types.NewElisionTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "elision", err) } s.Filter = append(s.Filter, *o) case "fingerprint": o := types.NewFingerprintTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "fingerprint", err) } s.Filter = append(s.Filter, *o) case "hunspell": o := types.NewHunspellTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "hunspell", err) } s.Filter = append(s.Filter, *o) case "hyphenation_decompounder": o := types.NewHyphenationDecompounderTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "hyphenation_decompounder", err) } s.Filter = append(s.Filter, *o) case "keep_types": o := types.NewKeepTypesTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keep_types", err) } s.Filter = append(s.Filter, *o) case "keep": o := types.NewKeepWordsTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keep", err) } s.Filter = append(s.Filter, *o) case "keyword_marker": o := types.NewKeywordMarkerTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keyword_marker", err) } s.Filter = append(s.Filter, *o) case "kstem": o := types.NewKStemTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kstem", err) } s.Filter = append(s.Filter, *o) case "length": o := types.NewLengthTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "length", err) } s.Filter = append(s.Filter, *o) case "limit": o := types.NewLimitTokenCountTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "limit", err) } s.Filter = append(s.Filter, *o) case "lowercase": o := types.NewLowercaseTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "lowercase", err) } s.Filter = append(s.Filter, *o) case "multiplexer": o := types.NewMultiplexerTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "multiplexer", err) } s.Filter = append(s.Filter, *o) case "ngram": o := types.NewNGramTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "ngram", err) } s.Filter = append(s.Filter, *o) case "nori_part_of_speech": o := types.NewNoriPartOfSpeechTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "nori_part_of_speech", err) } s.Filter = append(s.Filter, *o) case "pattern_capture": o := types.NewPatternCaptureTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "pattern_capture", err) } s.Filter = append(s.Filter, *o) case "pattern_replace": o := types.NewPatternReplaceTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "pattern_replace", err) } s.Filter = append(s.Filter, *o) case "porter_stem": o := types.NewPorterStemTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "porter_stem", err) } s.Filter = append(s.Filter, *o) case "predicate_token_filter": o := types.NewPredicateTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "predicate_token_filter", err) } s.Filter = append(s.Filter, *o) case "remove_duplicates": o := types.NewRemoveDuplicatesTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "remove_duplicates", err) } s.Filter = append(s.Filter, *o) case "reverse": o := types.NewReverseTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "reverse", err) } s.Filter = append(s.Filter, *o) case "shingle": o := types.NewShingleTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "shingle", err) } s.Filter = append(s.Filter, *o) case "snowball": o := types.NewSnowballTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "snowball", err) } s.Filter = append(s.Filter, *o) case "stemmer_override": o := types.NewStemmerOverrideTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "stemmer_override", err) } s.Filter = append(s.Filter, *o) case "stemmer": o := types.NewStemmerTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "stemmer", err) } s.Filter = append(s.Filter, *o) case "stop": o := types.NewStopTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "stop", err) } s.Filter = append(s.Filter, *o) case "synonym_graph": o := types.NewSynonymGraphTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "synonym_graph", err) } s.Filter = append(s.Filter, *o) case "synonym": o := types.NewSynonymTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "synonym", err) } s.Filter = append(s.Filter, *o) case "trim": o := types.NewTrimTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "trim", err) } s.Filter = append(s.Filter, *o) case "truncate": o := types.NewTruncateTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "truncate", err) } s.Filter = append(s.Filter, *o) case "unique": o := types.NewUniqueTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "unique", err) } s.Filter = append(s.Filter, *o) case "uppercase": o := types.NewUppercaseTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "uppercase", err) } s.Filter = append(s.Filter, *o) case "word_delimiter_graph": o := types.NewWordDelimiterGraphTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "word_delimiter_graph", err) } s.Filter = append(s.Filter, *o) case "word_delimiter": o := types.NewWordDelimiterTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "word_delimiter", err) } s.Filter = append(s.Filter, *o) case "kuromoji_stemmer": o := types.NewKuromojiStemmerTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_stemmer", err) } s.Filter = append(s.Filter, *o) case "kuromoji_readingform": o := types.NewKuromojiReadingFormTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_readingform", err) } s.Filter = append(s.Filter, *o) case "kuromoji_part_of_speech": o := types.NewKuromojiPartOfSpeechTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_part_of_speech", err) } s.Filter = append(s.Filter, *o) case "icu_collation": o := types.NewIcuCollationTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_collation", err) } s.Filter = append(s.Filter, *o) case "icu_folding": o := types.NewIcuFoldingTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_folding", err) } s.Filter = append(s.Filter, *o) case "icu_normalizer": o := types.NewIcuNormalizationTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_normalizer", err) } s.Filter = append(s.Filter, *o) case "icu_transform": o := types.NewIcuTransformTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_transform", err) } s.Filter = append(s.Filter, *o) case "phonetic": o := types.NewPhoneticTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "phonetic", err) } s.Filter = append(s.Filter, *o) case "dictionary_decompounder": o := types.NewDictionaryDecompounderTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "dictionary_decompounder", err) } s.Filter = append(s.Filter, *o) default: o := new(any) if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter = append(s.Filter, *o) } @@ -533,114 +533,114 @@ func (s *Request) UnmarshalJSON(data []byte) error { case "char_group": o := types.NewCharGroupTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "char_group", err) } s.Tokenizer = *o case "classic": o := types.NewClassicTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "classic", err) } s.Tokenizer = *o case "edge_ngram": o := types.NewEdgeNGramTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "edge_ngram", err) } s.Tokenizer = *o case "keyword": o := types.NewKeywordTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keyword", err) } s.Tokenizer = *o case "letter": o := types.NewLetterTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "letter", err) } s.Tokenizer = *o case "lowercase": o := types.NewLowercaseTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "lowercase", err) } s.Tokenizer = *o case "ngram": o := types.NewNGramTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "ngram", err) } s.Tokenizer = *o case "path_hierarchy": o := types.NewPathHierarchyTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "path_hierarchy", err) } s.Tokenizer = *o case "pattern": o := types.NewPatternTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "pattern", err) } s.Tokenizer = *o case "simple_pattern": o := types.NewSimplePatternTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "simple_pattern", err) } s.Tokenizer = *o case "simple_pattern_split": o := types.NewSimplePatternSplitTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "simple_pattern_split", err) } s.Tokenizer = *o case "standard": o := types.NewStandardTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "standard", err) } s.Tokenizer = *o case "thai": o := types.NewThaiTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "thai", err) } s.Tokenizer = *o case "uax_url_email": o := types.NewUaxEmailUrlTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "uax_url_email", err) } s.Tokenizer = *o case "whitespace": o := types.NewWhitespaceTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "whitespace", err) } s.Tokenizer = *o case "icu_tokenizer": o := types.NewIcuTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_tokenizer", err) } s.Tokenizer = *o case "kuromoji_tokenizer": o := types.NewKuromojiTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_tokenizer", err) } s.Tokenizer = *o case "nori_tokenizer": o := types.NewNoriTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "nori_tokenizer", err) } s.Tokenizer = *o default: if err := localDec.Decode(&s.Tokenizer); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } } diff --git a/typedapi/indices/analyze/response.go b/typedapi/indices/analyze/response.go index 6c257da073..ba47958567 100644 --- a/typedapi/indices/analyze/response.go +++ b/typedapi/indices/analyze/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package analyze @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package analyze // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/analyze/IndicesAnalyzeResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/analyze/IndicesAnalyzeResponse.ts#L22-L27 type Response struct { Detail *types.AnalyzeDetail `json:"detail,omitempty"` Tokens []types.AnalyzeToken `json:"tokens,omitempty"` diff --git a/typedapi/indices/cancelmigratereindex/cancel_migrate_reindex.go b/typedapi/indices/cancelmigratereindex/cancel_migrate_reindex.go new file mode 100644 index 0000000000..d7acda8062 --- /dev/null +++ b/typedapi/indices/cancelmigratereindex/cancel_migrate_reindex.go @@ -0,0 +1,356 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Cancel a migration reindex operation. +// +// Cancel a migration reindex attempt for a data stream or index. +package cancelmigratereindex + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type CancelMigrateReindex struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCancelMigrateReindex type alias for index. +type NewCancelMigrateReindex func(index string) *CancelMigrateReindex + +// NewCancelMigrateReindexFunc returns a new instance of CancelMigrateReindex with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCancelMigrateReindexFunc(tp elastictransport.Interface) NewCancelMigrateReindex { + return func(index string) *CancelMigrateReindex { + n := New(tp) + + n._index(index) + + return n + } +} + +// Cancel a migration reindex operation. +// +// Cancel a migration reindex attempt for a data stream or index. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html +func New(tp elastictransport.Interface) *CancelMigrateReindex { + r := &CancelMigrateReindex{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *CancelMigrateReindex) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("reindex") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_cancel") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r CancelMigrateReindex) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.cancel_migrate_reindex") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.cancel_migrate_reindex") + if reader := instrument.RecordRequestBody(ctx, "indices.cancel_migrate_reindex", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.cancel_migrate_reindex") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the CancelMigrateReindex query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a cancelmigratereindex.Response +func (r CancelMigrateReindex) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.cancel_migrate_reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r CancelMigrateReindex) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.cancel_migrate_reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the CancelMigrateReindex query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the CancelMigrateReindex headers map. +func (r *CancelMigrateReindex) Header(key, value string) *CancelMigrateReindex { + r.headers.Set(key, value) + + return r +} + +// Index The index or data stream name +// API Name: index +func (r *CancelMigrateReindex) _index(index string) *CancelMigrateReindex { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *CancelMigrateReindex) ErrorTrace(errortrace bool) *CancelMigrateReindex { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *CancelMigrateReindex) FilterPath(filterpaths ...string) *CancelMigrateReindex { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *CancelMigrateReindex) Human(human bool) *CancelMigrateReindex { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *CancelMigrateReindex) Pretty(pretty bool) *CancelMigrateReindex { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/indices/cancelmigratereindex/response.go b/typedapi/indices/cancelmigratereindex/response.go new file mode 100644 index 0000000000..097b972952 --- /dev/null +++ b/typedapi/indices/cancelmigratereindex/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package cancelmigratereindex + +// Response holds the response body struct for the package cancelmigratereindex +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/cancel_migrate_reindex/MigrateCancelReindexResponse.ts#L22-L24 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/indices/clearcache/clear_cache.go b/typedapi/indices/clearcache/clear_cache.go index 7c4cb694ea..dcc0c73268 100644 --- a/typedapi/indices/clearcache/clear_cache.go +++ b/typedapi/indices/clearcache/clear_cache.go @@ -16,10 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Clears the caches of one or more indices. -// For data streams, the API clears the caches of the stream’s backing indices. +// Clear the cache. +// Clear the cache of one or more indices. +// For data streams, the API clears the caches of the stream's backing indices. +// +// By default, the clear cache API clears all caches. +// To clear only specific caches, use the `fielddata`, `query`, or `request` +// parameters. +// To clear the cache only of specific fields, use the `fields` parameter. package clearcache import ( @@ -76,10 +82,16 @@ func NewClearCacheFunc(tp elastictransport.Interface) NewClearCache { } } -// Clears the caches of one or more indices. -// For data streams, the API clears the caches of the stream’s backing indices. +// Clear the cache. +// Clear the cache of one or more indices. +// For data streams, the API clears the caches of the stream's backing indices. +// +// By default, the clear cache API clears all caches. +// To clear only specific caches, use the `fielddata`, `query`, or `request` +// parameters. +// To clear the cache only of specific fields, use the `fields` parameter. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clearcache.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache func New(tp elastictransport.Interface) *ClearCache { r := &ClearCache{ transport: tp, diff --git a/typedapi/indices/clearcache/response.go b/typedapi/indices/clearcache/response.go index a301cac9f8..1f5c4b4a9a 100644 --- a/typedapi/indices/clearcache/response.go +++ b/typedapi/indices/clearcache/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package clearcache @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearcache // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/clear_cache/IndicesClearCacheResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/clear_cache/IndicesClearCacheResponse.ts#L22-L24 type Response struct { Shards_ *types.ShardStatistics `json:"_shards,omitempty"` } diff --git a/typedapi/indices/clone/clone.go b/typedapi/indices/clone/clone.go index 13e15e676c..9a3ad02cbb 100644 --- a/typedapi/indices/clone/clone.go +++ b/typedapi/indices/clone/clone.go @@ -16,9 +16,79 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Clones an existing index. +// Clone an index. +// Clone an existing index into a new index. +// Each original primary shard is cloned into a new primary shard in the new +// index. +// +// IMPORTANT: Elasticsearch does not apply index templates to the resulting +// index. +// The API also does not copy index metadata from the original index. +// Index metadata includes aliases, index lifecycle management phase +// definitions, and cross-cluster replication (CCR) follower information. +// For example, if you clone a CCR follower index, the resulting clone will not +// be a follower index. +// +// The clone API copies most index settings from the source index to the +// resulting index, with the exception of `index.number_of_replicas` and +// `index.auto_expand_replicas`. +// To set the number of replicas in the resulting index, configure these +// settings in the clone request. +// +// Cloning works as follows: +// +// * First, it creates a new target index with the same definition as the source +// index. +// * Then it hard-links segments from the source index into the target index. If +// the file system does not support hard-linking, all segments are copied into +// the new index, which is a much more time consuming process. +// * Finally, it recovers the target index as though it were a closed index +// which had just been re-opened. +// +// IMPORTANT: Indices can only be cloned if they meet the following +// requirements: +// +// * The index must be marked as read-only and have a cluster health status of +// green. +// * The target index must not exist. +// * The source index must have the same number of primary shards as the target +// index. +// * The node handling the clone process must have sufficient free disk space to +// accommodate a second copy of the existing index. +// +// The current write index on a data stream cannot be cloned. +// In order to clone the current write index, the data stream must first be +// rolled over so that a new write index is created and then the previous write +// index can be cloned. +// +// NOTE: Mappings cannot be specified in the `_clone` request. The mappings of +// the source index will be used for the target index. +// +// **Monitor the cloning process** +// +// The cloning process can be monitored with the cat recovery API or the cluster +// health API can be used to wait until all primary shards have been allocated +// by setting the `wait_for_status` parameter to `yellow`. +// +// The `_clone` API returns as soon as the target index has been added to the +// cluster state, before any shards have been allocated. +// At this point, all shards are in the state unassigned. +// If, for any reason, the target index can't be allocated, its primary shard +// will remain unassigned until it can be allocated on that node. +// +// Once the primary shard is allocated, it moves to state initializing, and the +// clone process begins. +// When the clone operation completes, the shard will become active. +// At that point, Elasticsearch will try to allocate any replicas and may decide +// to relocate the primary shard to another node. +// +// **Wait for active shards** +// +// Because the clone operation creates a new index to clone the shards to, the +// wait for active shards setting on index creation applies to the clone index +// action as well. package clone import ( @@ -86,9 +156,79 @@ func NewCloneFunc(tp elastictransport.Interface) NewClone { } } -// Clones an existing index. +// Clone an index. +// Clone an existing index into a new index. +// Each original primary shard is cloned into a new primary shard in the new +// index. +// +// IMPORTANT: Elasticsearch does not apply index templates to the resulting +// index. +// The API also does not copy index metadata from the original index. +// Index metadata includes aliases, index lifecycle management phase +// definitions, and cross-cluster replication (CCR) follower information. +// For example, if you clone a CCR follower index, the resulting clone will not +// be a follower index. +// +// The clone API copies most index settings from the source index to the +// resulting index, with the exception of `index.number_of_replicas` and +// `index.auto_expand_replicas`. +// To set the number of replicas in the resulting index, configure these +// settings in the clone request. +// +// Cloning works as follows: +// +// * First, it creates a new target index with the same definition as the source +// index. +// * Then it hard-links segments from the source index into the target index. If +// the file system does not support hard-linking, all segments are copied into +// the new index, which is a much more time consuming process. +// * Finally, it recovers the target index as though it were a closed index +// which had just been re-opened. +// +// IMPORTANT: Indices can only be cloned if they meet the following +// requirements: +// +// * The index must be marked as read-only and have a cluster health status of +// green. +// * The target index must not exist. +// * The source index must have the same number of primary shards as the target +// index. +// * The node handling the clone process must have sufficient free disk space to +// accommodate a second copy of the existing index. +// +// The current write index on a data stream cannot be cloned. +// In order to clone the current write index, the data stream must first be +// rolled over so that a new write index is created and then the previous write +// index can be cloned. +// +// NOTE: Mappings cannot be specified in the `_clone` request. The mappings of +// the source index will be used for the target index. +// +// **Monitor the cloning process** +// +// The cloning process can be monitored with the cat recovery API or the cluster +// health API can be used to wait until all primary shards have been allocated +// by setting the `wait_for_status` parameter to `yellow`. +// +// The `_clone` API returns as soon as the target index has been added to the +// cluster state, before any shards have been allocated. +// At this point, all shards are in the state unassigned. +// If, for any reason, the target index can't be allocated, its primary shard +// will remain unassigned until it can be allocated on that node. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clone-index.html +// Once the primary shard is allocated, it moves to state initializing, and the +// clone process begins. +// When the clone operation completes, the shard will become active. +// At that point, Elasticsearch will try to allocate any replicas and may decide +// to relocate the primary shard to another node. +// +// **Wait for active shards** +// +// Because the clone operation creates a new index to clone the shards to, the +// wait for active shards setting on index creation applies to the clone index +// action as well. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone func New(tp elastictransport.Interface) *Clone { r := &Clone{ transport: tp, @@ -96,8 +236,6 @@ func New(tp elastictransport.Interface) *Clone { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -408,20 +546,62 @@ func (r *Clone) Pretty(pretty bool) *Clone { return r } -// Aliases Aliases for the resulting index. +// Aliases for the resulting index. // API name: aliases func (r *Clone) Aliases(aliases map[string]types.Alias) *Clone { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aliases = aliases + return r +} + +func (r *Clone) AddAlias(key string, value types.AliasVariant) *Clone { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + r.req.Aliases = tmp return r } -// Settings Configuration options for the target index. +// Configuration options for the target index. // API name: settings func (r *Clone) Settings(settings map[string]json.RawMessage) *Clone { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Settings = settings + return r +} + +func (r *Clone) AddSetting(key string, value json.RawMessage) *Clone { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Settings == nil { + r.req.Settings = make(map[string]json.RawMessage) + } else { + tmp = r.req.Settings + } + + tmp[key] = value + r.req.Settings = tmp return r } diff --git a/typedapi/indices/clone/request.go b/typedapi/indices/clone/request.go index 033d5c36e7..6da81aea3a 100644 --- a/typedapi/indices/clone/request.go +++ b/typedapi/indices/clone/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package clone @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package clone // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/clone/IndicesCloneRequest.ts#L27-L75 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/clone/IndicesCloneRequest.ts#L27-L127 type Request struct { // Aliases Aliases for the resulting index. diff --git a/typedapi/indices/clone/response.go b/typedapi/indices/clone/response.go index 1d29bb2b66..a458ba5f04 100644 --- a/typedapi/indices/clone/response.go +++ b/typedapi/indices/clone/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package clone // Response holds the response body struct for the package clone // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/clone/IndicesCloneResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/clone/IndicesCloneResponse.ts#L22-L28 type Response struct { Acknowledged bool `json:"acknowledged"` Index string `json:"index"` diff --git a/typedapi/indices/close/close.go b/typedapi/indices/close/close.go index d1abf77822..e6812e16f5 100644 --- a/typedapi/indices/close/close.go +++ b/typedapi/indices/close/close.go @@ -16,9 +16,37 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Closes an index. +// Close an index. +// A closed index is blocked for read or write operations and does not allow all +// operations that opened indices allow. +// It is not possible to index documents or to search for documents in a closed +// index. +// Closed indices do not have to maintain internal data structures for indexing +// or searching documents, which results in a smaller overhead on the cluster. +// +// When opening or closing an index, the master node is responsible for +// restarting the index shards to reflect the new state of the index. +// The shards will then go through the normal recovery process. +// The data of opened and closed indices is automatically replicated by the +// cluster to ensure that enough shard copies are safely kept around at all +// times. +// +// You can open and close multiple indices. +// An error is thrown if the request explicitly refers to a missing index. +// This behaviour can be turned off using the `ignore_unavailable=true` +// parameter. +// +// By default, you must explicitly name the indices you are opening or closing. +// To open or close indices with `_all`, `*`, or other wildcard expressions, +// change the` action.destructive_requires_name` setting to `false`. This +// setting can also be changed with the cluster update settings API. +// +// Closed indices consume a significant amount of disk-space which can cause +// problems in managed environments. +// Closing indices can be turned off with the cluster settings API by setting +// `cluster.indices.close.enable` to `false`. package close import ( @@ -77,9 +105,37 @@ func NewCloseFunc(tp elastictransport.Interface) NewClose { } } -// Closes an index. +// Close an index. +// A closed index is blocked for read or write operations and does not allow all +// operations that opened indices allow. +// It is not possible to index documents or to search for documents in a closed +// index. +// Closed indices do not have to maintain internal data structures for indexing +// or searching documents, which results in a smaller overhead on the cluster. +// +// When opening or closing an index, the master node is responsible for +// restarting the index shards to reflect the new state of the index. +// The shards will then go through the normal recovery process. +// The data of opened and closed indices is automatically replicated by the +// cluster to ensure that enough shard copies are safely kept around at all +// times. +// +// You can open and close multiple indices. +// An error is thrown if the request explicitly refers to a missing index. +// This behaviour can be turned off using the `ignore_unavailable=true` +// parameter. +// +// By default, you must explicitly name the indices you are opening or closing. +// To open or close indices with `_all`, `*`, or other wildcard expressions, +// change the` action.destructive_requires_name` setting to `false`. This +// setting can also be changed with the cluster update settings API. +// +// Closed indices consume a significant amount of disk-space which can cause +// problems in managed environments. +// Closing indices can be turned off with the cluster settings API by setting +// `cluster.indices.close.enable` to `false`. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-close.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close func New(tp elastictransport.Interface) *Close { r := &Close{ transport: tp, diff --git a/typedapi/indices/close/response.go b/typedapi/indices/close/response.go index 68b517460e..9d873e2720 100644 --- a/typedapi/indices/close/response.go +++ b/typedapi/indices/close/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package close @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package close // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/close/CloseIndexResponse.ts#L24-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/close/CloseIndexResponse.ts#L24-L30 type Response struct { Acknowledged bool `json:"acknowledged"` Indices map[string]types.CloseIndexResult `json:"indices"` diff --git a/typedapi/indices/create/create.go b/typedapi/indices/create/create.go index a162af246e..6779cc87f7 100644 --- a/typedapi/indices/create/create.go +++ b/typedapi/indices/create/create.go @@ -16,10 +16,43 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create an index. -// Creates a new index. +// You can use the create index API to add a new index to an Elasticsearch +// cluster. +// When creating an index, you can specify the following: +// +// * Settings for the index. +// * Mappings for fields in the index. +// * Index aliases +// +// **Wait for active shards** +// +// By default, index creation will only return a response to the client when the +// primary copies of each shard have been started, or the request times out. +// The index creation response will indicate what happened. +// For example, `acknowledged` indicates whether the index was successfully +// created in the cluster, `while shards_acknowledged` indicates whether the +// requisite number of shard copies were started for each shard in the index +// before timing out. +// Note that it is still possible for either `acknowledged` or +// `shards_acknowledged` to be `false`, but for the index creation to be +// successful. +// These values simply indicate whether the operation completed before the +// timeout. +// If `acknowledged` is false, the request timed out before the cluster state +// was updated with the newly created index, but it probably will be created +// sometime soon. +// If `shards_acknowledged` is false, then the request timed out before the +// requisite number of shards were started (by default just the primaries), even +// if the cluster state was successfully updated to reflect the newly created +// index (that is to say, `acknowledged` is `true`). +// +// You can change the default of only waiting for the primary shards to start +// through the index setting `index.write.wait_for_active_shards`. +// Note that changing this setting will also affect the `wait_for_active_shards` +// value on all subsequent write operations. package create import ( @@ -83,9 +116,42 @@ func NewCreateFunc(tp elastictransport.Interface) NewCreate { } // Create an index. -// Creates a new index. +// You can use the create index API to add a new index to an Elasticsearch +// cluster. +// When creating an index, you can specify the following: +// +// * Settings for the index. +// * Mappings for fields in the index. +// * Index aliases +// +// **Wait for active shards** // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html +// By default, index creation will only return a response to the client when the +// primary copies of each shard have been started, or the request times out. +// The index creation response will indicate what happened. +// For example, `acknowledged` indicates whether the index was successfully +// created in the cluster, `while shards_acknowledged` indicates whether the +// requisite number of shard copies were started for each shard in the index +// before timing out. +// Note that it is still possible for either `acknowledged` or +// `shards_acknowledged` to be `false`, but for the index creation to be +// successful. +// These values simply indicate whether the operation completed before the +// timeout. +// If `acknowledged` is false, the request timed out before the cluster state +// was updated with the newly created index, but it probably will be created +// sometime soon. +// If `shards_acknowledged` is false, then the request timed out before the +// requisite number of shards were started (by default just the primaries), even +// if the cluster state was successfully updated to reflect the newly created +// index (that is to say, `acknowledged` is `true`). +// +// You can change the default of only waiting for the primary shards to start +// through the index setting `index.write.wait_for_active_shards`. +// Note that changing this setting will also affect the `wait_for_active_shards` +// value on all subsequent write operations. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create func New(tp elastictransport.Interface) *Create { r := &Create{ transport: tp, @@ -93,8 +159,6 @@ func New(tp elastictransport.Interface) *Create { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -388,32 +452,61 @@ func (r *Create) Pretty(pretty bool) *Create { return r } -// Aliases Aliases for the index. +// Aliases for the index. // API name: aliases func (r *Create) Aliases(aliases map[string]types.Alias) *Create { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aliases = aliases + return r +} + +func (r *Create) AddAlias(key string, value types.AliasVariant) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + r.req.Aliases = tmp return r } -// Mappings Mapping for fields in the index. If specified, this mapping can include: +// Mapping for fields in the index. If specified, this mapping can include: // - Field names // - Field data types // - Mapping parameters // API name: mappings -func (r *Create) Mappings(mappings *types.TypeMapping) *Create { +func (r *Create) Mappings(mappings types.TypeMappingVariant) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Mappings = mappings + r.req.Mappings = mappings.TypeMappingCaster() return r } -// Settings Configuration options for the index. +// Configuration options for the index. // API name: settings -func (r *Create) Settings(settings *types.IndexSettings) *Create { +func (r *Create) Settings(settings types.IndexSettingsVariant) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Settings = settings + r.req.Settings = settings.IndexSettingsCaster() return r } diff --git a/typedapi/indices/create/request.go b/typedapi/indices/create/request.go index e3ce304973..a5bf19808b 100644 --- a/typedapi/indices/create/request.go +++ b/typedapi/indices/create/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package create @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/create/IndicesCreateRequest.ts#L28-L82 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/create/IndicesCreateRequest.ts#L28-L108 type Request struct { // Aliases Aliases for the index. diff --git a/typedapi/indices/create/response.go b/typedapi/indices/create/response.go index ace847a669..d0f621a641 100644 --- a/typedapi/indices/create/response.go +++ b/typedapi/indices/create/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package create // Response holds the response body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/create/IndicesCreateResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/create/IndicesCreateResponse.ts#L22-L28 type Response struct { Acknowledged bool `json:"acknowledged"` Index string `json:"index"` diff --git a/typedapi/indices/createdatastream/create_data_stream.go b/typedapi/indices/createdatastream/create_data_stream.go index 8472bd73a2..dc7e616314 100644 --- a/typedapi/indices/createdatastream/create_data_stream.go +++ b/typedapi/indices/createdatastream/create_data_stream.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create a data stream. -// Creates a data stream. +// // You must have a matching index template with data stream enabled. package createdatastream @@ -79,10 +79,10 @@ func NewCreateDataStreamFunc(tp elastictransport.Interface) NewCreateDataStream } // Create a data stream. -// Creates a data stream. +// // You must have a matching index template with data stream enabled. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream func New(tp elastictransport.Interface) *CreateDataStream { r := &CreateDataStream{ transport: tp, diff --git a/typedapi/indices/createdatastream/response.go b/typedapi/indices/createdatastream/response.go index d8205b033e..44ab9086c5 100644 --- a/typedapi/indices/createdatastream/response.go +++ b/typedapi/indices/createdatastream/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package createdatastream // Response holds the response body struct for the package createdatastream // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/create_data_stream/IndicesCreateDataStreamResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/create_data_stream/IndicesCreateDataStreamResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/createfrom/create_from.go b/typedapi/indices/createfrom/create_from.go new file mode 100644 index 0000000000..f28090532a --- /dev/null +++ b/typedapi/indices/createfrom/create_from.go @@ -0,0 +1,421 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Create an index from a source index. +// +// Copy the mappings and settings from the source index to a destination index +// while allowing request settings and mappings to override the source values. +package createfrom + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + sourceMask = iota + 1 + + destMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type CreateFrom struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + source string + dest string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCreateFrom type alias for index. +type NewCreateFrom func(source, dest string) *CreateFrom + +// NewCreateFromFunc returns a new instance of CreateFrom with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCreateFromFunc(tp elastictransport.Interface) NewCreateFrom { + return func(source, dest string) *CreateFrom { + n := New(tp) + + n._source(source) + + n._dest(dest) + + return n + } +} + +// Create an index from a source index. +// +// Copy the mappings and settings from the source index to a destination index +// while allowing request settings and mappings to override the source values. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html +func New(tp elastictransport.Interface) *CreateFrom { + r := &CreateFrom{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *CreateFrom) Raw(raw io.Reader) *CreateFrom { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *CreateFrom) Request(req *Request) *CreateFrom { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *CreateFrom) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for CreateFrom: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == sourceMask|destMask: + path.WriteString("/") + path.WriteString("_create_from") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "source", r.source) + } + path.WriteString(r.source) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "dest", r.dest) + } + path.WriteString(r.dest) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r CreateFrom) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.create_from") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.create_from") + if reader := instrument.RecordRequestBody(ctx, "indices.create_from", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.create_from") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the CreateFrom query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a createfrom.Response +func (r CreateFrom) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.create_from") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the CreateFrom headers map. +func (r *CreateFrom) Header(key, value string) *CreateFrom { + r.headers.Set(key, value) + + return r +} + +// Source The source index or data stream name +// API Name: source +func (r *CreateFrom) _source(source string) *CreateFrom { + r.paramSet |= sourceMask + r.source = source + + return r +} + +// Dest The destination index or data stream name +// API Name: dest +func (r *CreateFrom) _dest(dest string) *CreateFrom { + r.paramSet |= destMask + r.dest = dest + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *CreateFrom) ErrorTrace(errortrace bool) *CreateFrom { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *CreateFrom) FilterPath(filterpaths ...string) *CreateFrom { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *CreateFrom) Human(human bool) *CreateFrom { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *CreateFrom) Pretty(pretty bool) *CreateFrom { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Mappings overrides to be applied to the destination index (optional) +// API name: mappings_override +func (r *CreateFrom) MappingsOverride(mappingsoverride types.TypeMappingVariant) *CreateFrom { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MappingsOverride = mappingsoverride.TypeMappingCaster() + + return r +} + +// If index blocks should be removed when creating destination index (optional) +// API name: remove_index_blocks +func (r *CreateFrom) RemoveIndexBlocks(removeindexblocks bool) *CreateFrom { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RemoveIndexBlocks = &removeindexblocks + + return r +} + +// Settings overrides to be applied to the destination index (optional) +// API name: settings_override +func (r *CreateFrom) SettingsOverride(settingsoverride types.IndexSettingsVariant) *CreateFrom { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SettingsOverride = settingsoverride.IndexSettingsCaster() + + return r +} diff --git a/typedapi/indices/createfrom/request.go b/typedapi/indices/createfrom/request.go new file mode 100644 index 0000000000..7210f97384 --- /dev/null +++ b/typedapi/indices/createfrom/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package createfrom + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package createfrom +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/create_from/MigrateCreateFromRequest.ts#L25-L44 +type Request = types.CreateFrom + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewCreateFrom() + + return r +} diff --git a/typedapi/indices/createfrom/response.go b/typedapi/indices/createfrom/response.go new file mode 100644 index 0000000000..2496f4f1a2 --- /dev/null +++ b/typedapi/indices/createfrom/response.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package createfrom + +// Response holds the response body struct for the package createfrom +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/create_from/MigrateCreateFromResponse.ts#L22-L28 +type Response struct { + Acknowledged bool `json:"acknowledged"` + Index string `json:"index"` + ShardsAcknowledged bool `json:"shards_acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/indices/datastreamsstats/data_streams_stats.go b/typedapi/indices/datastreamsstats/data_streams_stats.go index 01a6e60122..69257e346a 100644 --- a/typedapi/indices/datastreamsstats/data_streams_stats.go +++ b/typedapi/indices/datastreamsstats/data_streams_stats.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get data stream stats. -// Retrieves statistics for one or more data streams. +// +// Get statistics for one or more data streams. package datastreamsstats import ( @@ -77,9 +78,10 @@ func NewDataStreamsStatsFunc(tp elastictransport.Interface) NewDataStreamsStats } // Get data stream stats. -// Retrieves statistics for one or more data streams. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html +// Get statistics for one or more data streams. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1 func New(tp elastictransport.Interface) *DataStreamsStats { r := &DataStreamsStats{ transport: tp, diff --git a/typedapi/indices/datastreamsstats/response.go b/typedapi/indices/datastreamsstats/response.go index 1b90aa1df1..21e528a93a 100644 --- a/typedapi/indices/datastreamsstats/response.go +++ b/typedapi/indices/datastreamsstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package datastreamsstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package datastreamsstats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L25-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L25-L43 type Response struct { // BackingIndices Total number of backing indices for the selected data streams. diff --git a/typedapi/indices/delete/delete.go b/typedapi/indices/delete/delete.go index fefffa5d13..c440008ef6 100644 --- a/typedapi/indices/delete/delete.go +++ b/typedapi/indices/delete/delete.go @@ -16,10 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete indices. -// Deletes one or more indices. +// Deleting an index deletes its documents, shards, and metadata. +// It does not delete related Kibana components, such as data views, +// visualizations, or dashboards. +// +// You cannot delete the current write index of a data stream. +// To delete the index, you must roll over the data stream so a new write index +// is created. +// You can then use the delete index API to delete the previous write index. package delete import ( @@ -79,9 +86,16 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { } // Delete indices. -// Deletes one or more indices. +// Deleting an index deletes its documents, shards, and metadata. +// It does not delete related Kibana components, such as data views, +// visualizations, or dashboards. +// +// You cannot delete the current write index of a data stream. +// To delete the index, you must roll over the data stream so a new write index +// is created. +// You can then use the delete index API to delete the previous write index. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete func New(tp elastictransport.Interface) *Delete { r := &Delete{ transport: tp, diff --git a/typedapi/indices/delete/response.go b/typedapi/indices/delete/response.go index b7b58e9cf2..381c774f5a 100644 --- a/typedapi/indices/delete/response.go +++ b/typedapi/indices/delete/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package delete @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/delete/IndicesDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/delete/IndicesDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/deletealias/delete_alias.go b/typedapi/indices/deletealias/delete_alias.go index 791ad45232..97c771f99a 100644 --- a/typedapi/indices/deletealias/delete_alias.go +++ b/typedapi/indices/deletealias/delete_alias.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete an alias. // Removes a data stream or index from an alias. @@ -85,7 +85,7 @@ func NewDeleteAliasFunc(tp elastictransport.Interface) NewDeleteAlias { // Delete an alias. // Removes a data stream or index from an alias. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias func New(tp elastictransport.Interface) *DeleteAlias { r := &DeleteAlias{ transport: tp, diff --git a/typedapi/indices/deletealias/response.go b/typedapi/indices/deletealias/response.go index 491ab414db..1bca395af1 100644 --- a/typedapi/indices/deletealias/response.go +++ b/typedapi/indices/deletealias/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletealias // Response holds the response body struct for the package deletealias // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/delete_alias/IndicesDeleteAliasResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/delete_alias/IndicesDeleteAliasResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/deletedatalifecycle/delete_data_lifecycle.go b/typedapi/indices/deletedatalifecycle/delete_data_lifecycle.go index 0c43774f0b..eeffc04b15 100644 --- a/typedapi/indices/deletedatalifecycle/delete_data_lifecycle.go +++ b/typedapi/indices/deletedatalifecycle/delete_data_lifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete data stream lifecycles. // Removes the data stream lifecycle from a data stream, rendering it not @@ -83,7 +83,7 @@ func NewDeleteDataLifecycleFunc(tp elastictransport.Interface) NewDeleteDataLife // Removes the data stream lifecycle from a data stream, rendering it not // managed by the data stream lifecycle. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-delete-lifecycle.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle func New(tp elastictransport.Interface) *DeleteDataLifecycle { r := &DeleteDataLifecycle{ transport: tp, diff --git a/typedapi/indices/deletedatalifecycle/response.go b/typedapi/indices/deletedatalifecycle/response.go index af6f5656bb..0d88cb1ed8 100644 --- a/typedapi/indices/deletedatalifecycle/response.go +++ b/typedapi/indices/deletedatalifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletedatalifecycle // Response holds the response body struct for the package deletedatalifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/delete_data_lifecycle/IndicesDeleteDataLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/delete_data_lifecycle/IndicesDeleteDataLifecycleResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/deletedatastream/delete_data_stream.go b/typedapi/indices/deletedatastream/delete_data_stream.go index ec51914783..c7f71b54ea 100644 --- a/typedapi/indices/deletedatastream/delete_data_stream.go +++ b/typedapi/indices/deletedatastream/delete_data_stream.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete data streams. // Deletes one or more data streams and their backing indices. @@ -81,7 +81,7 @@ func NewDeleteDataStreamFunc(tp elastictransport.Interface) NewDeleteDataStream // Delete data streams. // Deletes one or more data streams and their backing indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream func New(tp elastictransport.Interface) *DeleteDataStream { r := &DeleteDataStream{ transport: tp, diff --git a/typedapi/indices/deletedatastream/response.go b/typedapi/indices/deletedatastream/response.go index a172dd1208..f15a0de0c0 100644 --- a/typedapi/indices/deletedatastream/response.go +++ b/typedapi/indices/deletedatastream/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletedatastream // Response holds the response body struct for the package deletedatastream // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/delete_data_stream/IndicesDeleteDataStreamResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/delete_data_stream/IndicesDeleteDataStreamResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/deleteindextemplate/delete_index_template.go b/typedapi/indices/deleteindextemplate/delete_index_template.go index 9bb6d1fc75..734d7d7b23 100644 --- a/typedapi/indices/deleteindextemplate/delete_index_template.go +++ b/typedapi/indices/deleteindextemplate/delete_index_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete an index template. // The provided may contain multiple template names separated @@ -88,7 +88,7 @@ func NewDeleteIndexTemplateFunc(tp elastictransport.Interface) NewDeleteIndexTem // should match completely with // existing templates. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-template.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template func New(tp elastictransport.Interface) *DeleteIndexTemplate { r := &DeleteIndexTemplate{ transport: tp, diff --git a/typedapi/indices/deleteindextemplate/response.go b/typedapi/indices/deleteindextemplate/response.go index 5d11113c01..b92c0c27ec 100644 --- a/typedapi/indices/deleteindextemplate/response.go +++ b/typedapi/indices/deleteindextemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deleteindextemplate // Response holds the response body struct for the package deleteindextemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/delete_index_template/IndicesDeleteIndexTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/delete_index_template/IndicesDeleteIndexTemplateResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/deletetemplate/delete_template.go b/typedapi/indices/deletetemplate/delete_template.go index cd5713beac..69183648da 100644 --- a/typedapi/indices/deletetemplate/delete_template.go +++ b/typedapi/indices/deletetemplate/delete_template.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes a legacy index template. +// Delete a legacy index template. package deletetemplate import ( @@ -76,9 +76,9 @@ func NewDeleteTemplateFunc(tp elastictransport.Interface) NewDeleteTemplate { } } -// Deletes a legacy index template. +// Delete a legacy index template. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-template-v1.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template func New(tp elastictransport.Interface) *DeleteTemplate { r := &DeleteTemplate{ transport: tp, diff --git a/typedapi/indices/deletetemplate/response.go b/typedapi/indices/deletetemplate/response.go index 8ebb4070ff..93ff61ebec 100644 --- a/typedapi/indices/deletetemplate/response.go +++ b/typedapi/indices/deletetemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletetemplate // Response holds the response body struct for the package deletetemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/delete_template/IndicesDeleteTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/delete_template/IndicesDeleteTemplateResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/diskusage/disk_usage.go b/typedapi/indices/diskusage/disk_usage.go index 675394b101..38125c894d 100644 --- a/typedapi/indices/diskusage/disk_usage.go +++ b/typedapi/indices/diskusage/disk_usage.go @@ -16,9 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// Analyzes the disk usage of each field of an index or data stream. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Analyze the index disk usage. +// Analyze the disk usage of each field of an index or data stream. +// This API might not support indices created in previous Elasticsearch +// versions. +// The result of a small index can be inaccurate as some parts of an index might +// not be analyzed by the API. +// +// NOTE: The total size of fields of the analyzed shards of the index in the +// response is usually smaller than the index `store_size` value because some +// small metadata files are ignored and some parts of data files might not be +// scanned by the API. +// Since stored fields are stored together in a compressed format, the sizes of +// stored fields are also estimates and can be inaccurate. +// The stored size of the `_id` field is likely underestimated while the +// `_source` field is overestimated. package diskusage import ( @@ -77,9 +91,23 @@ func NewDiskUsageFunc(tp elastictransport.Interface) NewDiskUsage { } } -// Analyzes the disk usage of each field of an index or data stream. +// Analyze the index disk usage. +// Analyze the disk usage of each field of an index or data stream. +// This API might not support indices created in previous Elasticsearch +// versions. +// The result of a small index can be inaccurate as some parts of an index might +// not be analyzed by the API. +// +// NOTE: The total size of fields of the analyzed shards of the index in the +// response is usually smaller than the index `store_size` value because some +// small metadata files are ignored and some parts of data files might not be +// scanned by the API. +// Since stored fields are stored together in a compressed format, the sizes of +// stored fields are also estimates and can be inaccurate. +// The stored size of the `_id` field is likely underestimated while the +// `_source` field is overestimated. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-disk-usage.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage func New(tp elastictransport.Interface) *DiskUsage { r := &DiskUsage{ transport: tp, diff --git a/typedapi/indices/diskusage/response.go b/typedapi/indices/diskusage/response.go index 0161a4bf91..677f8d594c 100644 --- a/typedapi/indices/diskusage/response.go +++ b/typedapi/indices/diskusage/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package diskusage @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package diskusage // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/disk_usage/IndicesDiskUsageResponse.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/disk_usage/IndicesDiskUsageResponse.ts#L22-L25 type Response = json.RawMessage diff --git a/typedapi/indices/downsample/downsample.go b/typedapi/indices/downsample/downsample.go index 1cd1d0bd4e..f6ea1bf0e0 100644 --- a/typedapi/indices/downsample/downsample.go +++ b/typedapi/indices/downsample/downsample.go @@ -16,11 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Aggregates a time series (TSDS) index and stores pre-computed statistical +// Downsample an index. +// Aggregate a time series (TSDS) index and store pre-computed statistical // summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric // field grouped by a configured time interval. +// For example, a TSDS index that contains metrics sampled every 10 seconds can +// be downsampled to an hourly index. +// All documents within an hour interval are summarized and stored as a single +// document in the downsample index. +// +// NOTE: Only indices in a time series data stream are supported. +// Neither field nor document level security can be defined on the source index. +// The source index must be read only (`index.blocks.write: true`). package downsample import ( @@ -88,11 +97,20 @@ func NewDownsampleFunc(tp elastictransport.Interface) NewDownsample { } } -// Aggregates a time series (TSDS) index and stores pre-computed statistical +// Downsample an index. +// Aggregate a time series (TSDS) index and store pre-computed statistical // summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric // field grouped by a configured time interval. +// For example, a TSDS index that contains metrics sampled every 10 seconds can +// be downsampled to an hourly index. +// All documents within an hour interval are summarized and stored as a single +// document in the downsample index. +// +// NOTE: Only indices in a time series data stream are supported. +// Neither field nor document level security can be defined on the source index. +// The source index must be read only (`index.blocks.write: true`). // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-downsample-data-stream.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample func New(tp elastictransport.Interface) *Downsample { r := &Downsample{ transport: tp, @@ -100,8 +118,6 @@ func New(tp elastictransport.Interface) *Downsample { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -381,9 +397,14 @@ func (r *Downsample) Pretty(pretty bool) *Downsample { return r } -// FixedInterval The interval at which to aggregate the original time series index. +// The interval at which to aggregate the original time series index. // API name: fixed_interval func (r *Downsample) FixedInterval(durationlarge string) *Downsample { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FixedInterval = durationlarge return r diff --git a/typedapi/indices/downsample/request.go b/typedapi/indices/downsample/request.go index 6935235647..bab55b6fd8 100644 --- a/typedapi/indices/downsample/request.go +++ b/typedapi/indices/downsample/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package downsample @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package downsample // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/downsample/Request.ts#L24-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/downsample/Request.ts#L24-L58 type Request = types.DownsampleConfig // NewRequest returns a Request diff --git a/typedapi/indices/downsample/response.go b/typedapi/indices/downsample/response.go index a645b6dfe7..793e59b2da 100644 --- a/typedapi/indices/downsample/response.go +++ b/typedapi/indices/downsample/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package downsample @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package downsample // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/downsample/Response.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/downsample/Response.ts#L22-L25 type Response = json.RawMessage diff --git a/typedapi/indices/exists/exists.go b/typedapi/indices/exists/exists.go index 6f02221019..d2437c5bb0 100644 --- a/typedapi/indices/exists/exists.go +++ b/typedapi/indices/exists/exists.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Check indices. -// Checks if one or more indices, index aliases, or data streams exist. +// Check if one or more indices, index aliases, or data streams exist. package exists import ( @@ -77,9 +77,9 @@ func NewExistsFunc(tp elastictransport.Interface) NewExists { } // Check indices. -// Checks if one or more indices, index aliases, or data streams exist. +// Check if one or more indices, index aliases, or data streams exist. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-exists.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists func New(tp elastictransport.Interface) *Exists { r := &Exists{ transport: tp, diff --git a/typedapi/indices/existsalias/exists_alias.go b/typedapi/indices/existsalias/exists_alias.go index f4be5b74fc..a088361e23 100644 --- a/typedapi/indices/existsalias/exists_alias.go +++ b/typedapi/indices/existsalias/exists_alias.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Check aliases. -// Checks if one or more data stream or index aliases exist. +// +// Check if one or more data stream or index aliases exist. package existsalias import ( @@ -80,9 +81,10 @@ func NewExistsAliasFunc(tp elastictransport.Interface) NewExistsAlias { } // Check aliases. -// Checks if one or more data stream or index aliases exist. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html +// Check if one or more data stream or index aliases exist. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias func New(tp elastictransport.Interface) *ExistsAlias { r := &ExistsAlias{ transport: tp, @@ -316,6 +318,16 @@ func (r *ExistsAlias) IgnoreUnavailable(ignoreunavailable bool) *ExistsAlias { return r } +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *ExistsAlias) MasterTimeout(duration string) *ExistsAlias { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/indices/existsindextemplate/exists_index_template.go b/typedapi/indices/existsindextemplate/exists_index_template.go index 7f8846eb0a..2dbf31c302 100644 --- a/typedapi/indices/existsindextemplate/exists_index_template.go +++ b/typedapi/indices/existsindextemplate/exists_index_template.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information about whether a particular index template exists. +// Check index templates. +// +// Check whether index templates exist. package existsindextemplate import ( @@ -74,9 +76,11 @@ func NewExistsIndexTemplateFunc(tp elastictransport.Interface) NewExistsIndexTem } } -// Returns information about whether a particular index template exists. +// Check index templates. +// +// Check whether index templates exist. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template func New(tp elastictransport.Interface) *ExistsIndexTemplate { r := &ExistsIndexTemplate{ transport: tp, diff --git a/typedapi/indices/existstemplate/exists_template.go b/typedapi/indices/existstemplate/exists_template.go index 399616f8c3..e58ebd15b9 100644 --- a/typedapi/indices/existstemplate/exists_template.go +++ b/typedapi/indices/existstemplate/exists_template.go @@ -16,10 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Check existence of index templates. -// Returns information about whether a particular index template exists. +// Get information about whether index templates exist. +// Index templates define settings, mappings, and aliases that can be applied +// automatically to new indices. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. package existstemplate import ( @@ -76,9 +82,15 @@ func NewExistsTemplateFunc(tp elastictransport.Interface) NewExistsTemplate { } // Check existence of index templates. -// Returns information about whether a particular index template exists. +// Get information about whether index templates exist. +// Index templates define settings, mappings, and aliases that can be applied +// automatically to new indices. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-template-exists-v1.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template func New(tp elastictransport.Interface) *ExistsTemplate { r := &ExistsTemplate{ transport: tp, @@ -239,7 +251,8 @@ func (r *ExistsTemplate) Header(key, value string) *ExistsTemplate { return r } -// Name The comma separated names of the index templates +// Name A comma-separated list of index template names used to limit the request. +// Wildcard (`*`) expressions are supported. // API Name: name func (r *ExistsTemplate) _name(name string) *ExistsTemplate { r.paramSet |= nameMask @@ -248,7 +261,7 @@ func (r *ExistsTemplate) _name(name string) *ExistsTemplate { return r } -// FlatSettings Return settings in flat format (default: false) +// FlatSettings Indicates whether to use a flat format for the response. // API name: flat_settings func (r *ExistsTemplate) FlatSettings(flatsettings bool) *ExistsTemplate { r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) @@ -256,8 +269,7 @@ func (r *ExistsTemplate) FlatSettings(flatsettings bool) *ExistsTemplate { return r } -// Local Return local information, do not retrieve the state from master node -// (default: false) +// Local Indicates whether to get information from the local node only. // API name: local func (r *ExistsTemplate) Local(local bool) *ExistsTemplate { r.values.Set("local", strconv.FormatBool(local)) @@ -265,7 +277,10 @@ func (r *ExistsTemplate) Local(local bool) *ExistsTemplate { return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: master_timeout func (r *ExistsTemplate) MasterTimeout(duration string) *ExistsTemplate { r.values.Set("master_timeout", duration) diff --git a/typedapi/indices/explaindatalifecycle/explain_data_lifecycle.go b/typedapi/indices/explaindatalifecycle/explain_data_lifecycle.go index 302771c35d..1515911320 100644 --- a/typedapi/indices/explaindatalifecycle/explain_data_lifecycle.go +++ b/typedapi/indices/explaindatalifecycle/explain_data_lifecycle.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get the status for a data stream lifecycle. -// Retrieves information about an index or data stream’s current data stream -// lifecycle status, such as time since index creation, time since rollover, the -// lifecycle configuration managing the index, or any errors encountered during -// lifecycle execution. +// Get information about an index or data stream's current data stream lifecycle +// status, such as time since index creation, time since rollover, the lifecycle +// configuration managing the index, or any errors encountered during lifecycle +// execution. package explaindatalifecycle import ( @@ -81,12 +81,12 @@ func NewExplainDataLifecycleFunc(tp elastictransport.Interface) NewExplainDataLi } // Get the status for a data stream lifecycle. -// Retrieves information about an index or data stream’s current data stream -// lifecycle status, such as time since index creation, time since rollover, the -// lifecycle configuration managing the index, or any errors encountered during -// lifecycle execution. +// Get information about an index or data stream's current data stream lifecycle +// status, such as time since index creation, time since rollover, the lifecycle +// configuration managing the index, or any errors encountered during lifecycle +// execution. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-explain-lifecycle.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle func New(tp elastictransport.Interface) *ExplainDataLifecycle { r := &ExplainDataLifecycle{ transport: tp, diff --git a/typedapi/indices/explaindatalifecycle/response.go b/typedapi/indices/explaindatalifecycle/response.go index ee89ac03d2..6e8697fbfd 100644 --- a/typedapi/indices/explaindatalifecycle/response.go +++ b/typedapi/indices/explaindatalifecycle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package explaindatalifecycle @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package explaindatalifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L25-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L25-L29 type Response struct { Indices map[string]types.DataStreamLifecycleExplain `json:"indices"` } diff --git a/typedapi/indices/fieldusagestats/field_usage_stats.go b/typedapi/indices/fieldusagestats/field_usage_stats.go index faefce29fa..34a88f67b3 100644 --- a/typedapi/indices/fieldusagestats/field_usage_stats.go +++ b/typedapi/indices/fieldusagestats/field_usage_stats.go @@ -16,9 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// Returns field usage information for each shard and field of an index. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Get field usage stats. +// Get field usage information for each shard and field of an index. +// Field usage statistics are automatically captured when queries are running on +// a cluster. +// A shard-level search request that accesses a given field, even if multiple +// times during that request, is counted as a single use. +// +// The response body reports the per-shard usage count of the data structures +// that back the fields in the index. +// A given request will increment each count by a maximum value of 1, even if +// the request accesses the same field multiple times. package fieldusagestats import ( @@ -77,9 +87,19 @@ func NewFieldUsageStatsFunc(tp elastictransport.Interface) NewFieldUsageStats { } } -// Returns field usage information for each shard and field of an index. +// Get field usage stats. +// Get field usage information for each shard and field of an index. +// Field usage statistics are automatically captured when queries are running on +// a cluster. +// A shard-level search request that accesses a given field, even if multiple +// times during that request, is counted as a single use. +// +// The response body reports the per-shard usage count of the data structures +// that back the fields in the index. +// A given request will increment each count by a maximum value of 1, even if +// the request accesses the same field multiple times. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/field-usage-stats.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats func New(tp elastictransport.Interface) *FieldUsageStats { r := &FieldUsageStats{ transport: tp, @@ -343,26 +363,6 @@ func (r *FieldUsageStats) Fields(fields ...string) *FieldUsageStats { return r } -// MasterTimeout Period to wait for a connection to the master node. -// If no response is received before the timeout expires, the request fails and -// returns an error. -// API name: master_timeout -func (r *FieldUsageStats) MasterTimeout(duration string) *FieldUsageStats { - r.values.Set("master_timeout", duration) - - return r -} - -// Timeout Period to wait for a response. -// If no response is received before the timeout expires, the request fails and -// returns an error. -// API name: timeout -func (r *FieldUsageStats) Timeout(duration string) *FieldUsageStats { - r.values.Set("timeout", duration) - - return r -} - // WaitForActiveShards The number of shard copies that must be active before proceeding with the // operation. // Set to all or any positive integer up to the total number of shards in the diff --git a/typedapi/indices/fieldusagestats/response.go b/typedapi/indices/fieldusagestats/response.go index 4d69ba4e67..c1864b4c17 100644 --- a/typedapi/indices/fieldusagestats/response.go +++ b/typedapi/indices/fieldusagestats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package fieldusagestats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package fieldusagestats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L28-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L28-L30 type Response struct { FieldsUsageBody map[string]types.UsageStatsIndex `json:"-"` Shards_ types.ShardStatistics `json:"_shards"` diff --git a/typedapi/indices/flush/flush.go b/typedapi/indices/flush/flush.go index e39c025f86..8425bace48 100644 --- a/typedapi/indices/flush/flush.go +++ b/typedapi/indices/flush/flush.go @@ -16,9 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// Flushes one or more data streams or indices. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Flush data streams or indices. +// Flushing a data stream or index is the process of making sure that any data +// that is currently only stored in the transaction log is also permanently +// stored in the Lucene index. +// When restarting, Elasticsearch replays any unflushed operations from the +// transaction log into the Lucene index to bring it back into the state that it +// was in before the restart. +// Elasticsearch automatically triggers flushes as needed, using heuristics that +// trade off the size of the unflushed transaction log against the cost of +// performing each flush. +// +// After each operation has been flushed it is permanently stored in the Lucene +// index. +// This may mean that there is no need to maintain an additional copy of it in +// the transaction log. +// The transaction log is made up of multiple files, called generations, and +// Elasticsearch will delete any generation files when they are no longer +// needed, freeing up disk space. +// +// It is also possible to trigger a flush on one or more indices using the flush +// API, although it is rare for users to need to call this API directly. +// If you call the flush API after indexing some documents then a successful +// response indicates that Elasticsearch has flushed all the documents that were +// indexed before the flush API was called. package flush import ( @@ -75,9 +98,32 @@ func NewFlushFunc(tp elastictransport.Interface) NewFlush { } } -// Flushes one or more data streams or indices. +// Flush data streams or indices. +// Flushing a data stream or index is the process of making sure that any data +// that is currently only stored in the transaction log is also permanently +// stored in the Lucene index. +// When restarting, Elasticsearch replays any unflushed operations from the +// transaction log into the Lucene index to bring it back into the state that it +// was in before the restart. +// Elasticsearch automatically triggers flushes as needed, using heuristics that +// trade off the size of the unflushed transaction log against the cost of +// performing each flush. +// +// After each operation has been flushed it is permanently stored in the Lucene +// index. +// This may mean that there is no need to maintain an additional copy of it in +// the transaction log. +// The transaction log is made up of multiple files, called generations, and +// Elasticsearch will delete any generation files when they are no longer +// needed, freeing up disk space. +// +// It is also possible to trigger a flush on one or more indices using the flush +// API, although it is rare for users to need to call this API directly. +// If you call the flush API after indexing some documents then a successful +// response indicates that Elasticsearch has flushed all the documents that were +// indexed before the flush API was called. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush func New(tp elastictransport.Interface) *Flush { r := &Flush{ transport: tp, diff --git a/typedapi/indices/flush/response.go b/typedapi/indices/flush/response.go index 056aaf9ae1..be0f1dc84a 100644 --- a/typedapi/indices/flush/response.go +++ b/typedapi/indices/flush/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package flush @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package flush // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/flush/IndicesFlushResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/flush/IndicesFlushResponse.ts#L22-L24 type Response struct { Shards_ *types.ShardStatistics `json:"_shards,omitempty"` } diff --git a/typedapi/indices/forcemerge/forcemerge.go b/typedapi/indices/forcemerge/forcemerge.go index f35134a96f..9581b20b28 100644 --- a/typedapi/indices/forcemerge/forcemerge.go +++ b/typedapi/indices/forcemerge/forcemerge.go @@ -16,9 +16,89 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Performs the force merge operation on one or more indices. +// Force a merge. +// Perform the force merge operation on the shards of one or more indices. +// For data streams, the API forces a merge on the shards of the stream's +// backing indices. +// +// Merging reduces the number of segments in each shard by merging some of them +// together and also frees up the space used by deleted documents. +// Merging normally happens automatically, but sometimes it is useful to trigger +// a merge manually. +// +// WARNING: We recommend force merging only a read-only index (meaning the index +// is no longer receiving writes). +// When documents are updated or deleted, the old version is not immediately +// removed but instead soft-deleted and marked with a "tombstone". +// These soft-deleted documents are automatically cleaned up during regular +// segment merges. +// But force merge can cause very large (greater than 5 GB) segments to be +// produced, which are not eligible for regular merges. +// So the number of soft-deleted documents can then grow rapidly, resulting in +// higher disk usage and worse search performance. +// If you regularly force merge an index receiving writes, this can also make +// snapshots more expensive, since the new documents can't be backed up +// incrementally. +// +// **Blocks during a force merge** +// +// Calls to this API block until the merge is complete (unless request contains +// `wait_for_completion=false`). +// If the client connection is lost before completion then the force merge +// process will continue in the background. +// Any new requests to force merge the same indices will also block until the +// ongoing force merge is complete. +// +// **Running force merge asynchronously** +// +// If the request contains `wait_for_completion=false`, Elasticsearch performs +// some preflight checks, launches the request, and returns a task you can use +// to get the status of the task. +// However, you can not cancel this task as the force merge task is not +// cancelable. +// Elasticsearch creates a record of this task as a document at +// `_tasks/`. +// When you are done with a task, you should delete the task document so +// Elasticsearch can reclaim the space. +// +// **Force merging multiple indices** +// +// You can force merge multiple indices with a single request by targeting: +// +// * One or more data streams that contain multiple backing indices +// * Multiple indices +// * One or more aliases +// * All data streams and indices in a cluster +// +// Each targeted shard is force-merged separately using the force_merge +// threadpool. +// By default each node only has a single `force_merge` thread which means that +// the shards on that node are force-merged one at a time. +// If you expand the `force_merge` threadpool on a node then it will force merge +// its shards in parallel +// +// Force merge makes the storage for the shard being merged temporarily +// increase, as it may require free space up to triple its size in case +// `max_num_segments parameter` is set to `1`, to rewrite all segments into a +// new one. +// +// **Data streams and time-based indices** +// +// Force-merging is useful for managing a data stream's older backing indices +// and other time-based indices, particularly after a rollover. +// In these cases, each index only receives indexing traffic for a certain +// period of time. +// Once an index receive no more writes, its shards can be force-merged to a +// single segment. +// This can be a good idea because single-segment shards can sometimes use +// simpler and more efficient data structures to perform searches. +// For example: +// +// ``` +// POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 +// ``` package forcemerge import ( @@ -75,9 +155,89 @@ func NewForcemergeFunc(tp elastictransport.Interface) NewForcemerge { } } -// Performs the force merge operation on one or more indices. +// Force a merge. +// Perform the force merge operation on the shards of one or more indices. +// For data streams, the API forces a merge on the shards of the stream's +// backing indices. +// +// Merging reduces the number of segments in each shard by merging some of them +// together and also frees up the space used by deleted documents. +// Merging normally happens automatically, but sometimes it is useful to trigger +// a merge manually. +// +// WARNING: We recommend force merging only a read-only index (meaning the index +// is no longer receiving writes). +// When documents are updated or deleted, the old version is not immediately +// removed but instead soft-deleted and marked with a "tombstone". +// These soft-deleted documents are automatically cleaned up during regular +// segment merges. +// But force merge can cause very large (greater than 5 GB) segments to be +// produced, which are not eligible for regular merges. +// So the number of soft-deleted documents can then grow rapidly, resulting in +// higher disk usage and worse search performance. +// If you regularly force merge an index receiving writes, this can also make +// snapshots more expensive, since the new documents can't be backed up +// incrementally. +// +// **Blocks during a force merge** +// +// Calls to this API block until the merge is complete (unless request contains +// `wait_for_completion=false`). +// If the client connection is lost before completion then the force merge +// process will continue in the background. +// Any new requests to force merge the same indices will also block until the +// ongoing force merge is complete. +// +// **Running force merge asynchronously** +// +// If the request contains `wait_for_completion=false`, Elasticsearch performs +// some preflight checks, launches the request, and returns a task you can use +// to get the status of the task. +// However, you can not cancel this task as the force merge task is not +// cancelable. +// Elasticsearch creates a record of this task as a document at +// `_tasks/`. +// When you are done with a task, you should delete the task document so +// Elasticsearch can reclaim the space. +// +// **Force merging multiple indices** +// +// You can force merge multiple indices with a single request by targeting: +// +// * One or more data streams that contain multiple backing indices +// * Multiple indices +// * One or more aliases +// * All data streams and indices in a cluster +// +// Each targeted shard is force-merged separately using the force_merge +// threadpool. +// By default each node only has a single `force_merge` thread which means that +// the shards on that node are force-merged one at a time. +// If you expand the `force_merge` threadpool on a node then it will force merge +// its shards in parallel +// +// Force merge makes the storage for the shard being merged temporarily +// increase, as it may require free space up to triple its size in case +// `max_num_segments parameter` is set to `1`, to rewrite all segments into a +// new one. +// +// **Data streams and time-based indices** +// +// Force-merging is useful for managing a data stream's older backing indices +// and other time-based indices, particularly after a rollover. +// In these cases, each index only receives indexing traffic for a certain +// period of time. +// Once an index receive no more writes, its shards can be force-merged to a +// single segment. +// This can be a good idea because single-segment shards can sometimes use +// simpler and more efficient data structures to perform searches. +// For example: +// +// ``` +// POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 +// ``` // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge func New(tp elastictransport.Interface) *Forcemerge { r := &Forcemerge{ transport: tp, diff --git a/typedapi/indices/forcemerge/response.go b/typedapi/indices/forcemerge/response.go index 3f84ef75e3..784ece6872 100644 --- a/typedapi/indices/forcemerge/response.go +++ b/typedapi/indices/forcemerge/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package forcemerge @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package forcemerge // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/forcemerge/IndicesForceMergeResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/forcemerge/IndicesForceMergeResponse.ts#L22-L24 type Response struct { Shards_ *types.ShardStatistics `json:"_shards,omitempty"` // Task task contains a task id returned when wait_for_completion=false, diff --git a/typedapi/indices/get/get.go b/typedapi/indices/get/get.go index 971e6a123e..1555c27ca3 100644 --- a/typedapi/indices/get/get.go +++ b/typedapi/indices/get/get.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get index information. -// Returns information about one or more indices. For data streams, the API -// returns information about the +// Get information about one or more indices. For data streams, the API returns +// information about the // stream’s backing indices. package get @@ -82,11 +82,11 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { } // Get index information. -// Returns information about one or more indices. For data streams, the API -// returns information about the +// Get information about one or more indices. For data streams, the API returns +// information about the // stream’s backing indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get func New(tp elastictransport.Interface) *Get { r := &Get{ transport: tp, diff --git a/typedapi/indices/get/response.go b/typedapi/indices/get/response.go index 85e7eff8a1..641876fb34 100644 --- a/typedapi/indices/get/response.go +++ b/typedapi/indices/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package get @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/get/IndicesGetResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get/IndicesGetResponse.ts#L24-L27 type Response map[string]types.IndexState diff --git a/typedapi/indices/getalias/get_alias.go b/typedapi/indices/getalias/get_alias.go index 93d0bd46e3..2d3431d148 100644 --- a/typedapi/indices/getalias/get_alias.go +++ b/typedapi/indices/getalias/get_alias.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get aliases. // Retrieves information for one or more data stream or index aliases. @@ -83,7 +83,7 @@ func NewGetAliasFunc(tp elastictransport.Interface) NewGetAlias { // Get aliases. // Retrieves information for one or more data stream or index aliases. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias func New(tp elastictransport.Interface) *GetAlias { r := &GetAlias{ transport: tp, @@ -420,6 +420,16 @@ func (r *GetAlias) IgnoreUnavailable(ignoreunavailable bool) *GetAlias { return r } +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetAlias) MasterTimeout(duration string) *GetAlias { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/indices/getalias/response.go b/typedapi/indices/getalias/response.go index 5d2a8d0a22..e2f9a6c54d 100644 --- a/typedapi/indices/getalias/response.go +++ b/typedapi/indices/getalias/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getalias @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getalias // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/get_alias/IndicesGetAliasResponse.ts#L26-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_alias/IndicesGetAliasResponse.ts#L26-L35 type Response map[string]types.IndexAliases diff --git a/typedapi/indices/getdatalifecycle/get_data_lifecycle.go b/typedapi/indices/getdatalifecycle/get_data_lifecycle.go index 743fb4c6b1..d136e912e0 100644 --- a/typedapi/indices/getdatalifecycle/get_data_lifecycle.go +++ b/typedapi/indices/getdatalifecycle/get_data_lifecycle.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get data stream lifecycles. -// Retrieves the data stream lifecycle configuration of one or more data -// streams. +// +// Get the data stream lifecycle configuration of one or more data streams. package getdatalifecycle import ( @@ -80,10 +80,10 @@ func NewGetDataLifecycleFunc(tp elastictransport.Interface) NewGetDataLifecycle } // Get data stream lifecycles. -// Retrieves the data stream lifecycle configuration of one or more data -// streams. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-get-lifecycle.html +// Get the data stream lifecycle configuration of one or more data streams. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle func New(tp elastictransport.Interface) *GetDataLifecycle { r := &GetDataLifecycle{ transport: tp, diff --git a/typedapi/indices/getdatalifecycle/response.go b/typedapi/indices/getdatalifecycle/response.go index f92e0faede..1c353017b2 100644 --- a/typedapi/indices/getdatalifecycle/response.go +++ b/typedapi/indices/getdatalifecycle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getdatalifecycle @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdatalifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L23-L25 type Response struct { DataStreams []types.DataStreamWithLifecycle `json:"data_streams"` } diff --git a/typedapi/indices/getdatalifecyclestats/get_data_lifecycle_stats.go b/typedapi/indices/getdatalifecyclestats/get_data_lifecycle_stats.go new file mode 100644 index 0000000000..029cdd1129 --- /dev/null +++ b/typedapi/indices/getdatalifecyclestats/get_data_lifecycle_stats.go @@ -0,0 +1,325 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Get data stream lifecycle stats. +// Get statistics about the data streams that are managed by a data stream +// lifecycle. +package getdatalifecyclestats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetDataLifecycleStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetDataLifecycleStats type alias for index. +type NewGetDataLifecycleStats func() *GetDataLifecycleStats + +// NewGetDataLifecycleStatsFunc returns a new instance of GetDataLifecycleStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetDataLifecycleStatsFunc(tp elastictransport.Interface) NewGetDataLifecycleStats { + return func() *GetDataLifecycleStats { + n := New(tp) + + return n + } +} + +// Get data stream lifecycle stats. +// Get statistics about the data streams that are managed by a data stream +// lifecycle. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats +func New(tp elastictransport.Interface) *GetDataLifecycleStats { + r := &GetDataLifecycleStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetDataLifecycleStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_lifecycle") + path.WriteString("/") + path.WriteString("stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetDataLifecycleStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get_data_lifecycle_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_data_lifecycle_stats") + if reader := instrument.RecordRequestBody(ctx, "indices.get_data_lifecycle_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_data_lifecycle_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetDataLifecycleStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getdatalifecyclestats.Response +func (r GetDataLifecycleStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_lifecycle_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetDataLifecycleStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_lifecycle_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetDataLifecycleStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetDataLifecycleStats headers map. +func (r *GetDataLifecycleStats) Header(key, value string) *GetDataLifecycleStats { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetDataLifecycleStats) ErrorTrace(errortrace bool) *GetDataLifecycleStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetDataLifecycleStats) FilterPath(filterpaths ...string) *GetDataLifecycleStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetDataLifecycleStats) Human(human bool) *GetDataLifecycleStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetDataLifecycleStats) Pretty(pretty bool) *GetDataLifecycleStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/indices/getdatalifecyclestats/response.go b/typedapi/indices/getdatalifecyclestats/response.go new file mode 100644 index 0000000000..330ef66ae4 --- /dev/null +++ b/typedapi/indices/getdatalifecyclestats/response.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package getdatalifecyclestats + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package getdatalifecyclestats +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_data_lifecycle_stats/IndicesGetDataLifecycleStatsResponse.ts#L24-L44 +type Response struct { + + // DataStreamCount The count of data streams currently being managed by the data stream + // lifecycle. + DataStreamCount int `json:"data_stream_count"` + // DataStreams Information about the data streams that are managed by the data stream + // lifecycle. + DataStreams []types.DataStreamStats `json:"data_streams"` + // LastRunDurationInMillis The duration of the last data stream lifecycle execution. + LastRunDurationInMillis *int64 `json:"last_run_duration_in_millis,omitempty"` + // TimeBetweenStartsInMillis The time that passed between the start of the last two data stream lifecycle + // executions. + // This value should amount approximately to + // `data_streams.lifecycle.poll_interval`. + TimeBetweenStartsInMillis *int64 `json:"time_between_starts_in_millis,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/indices/getdatastream/get_data_stream.go b/typedapi/indices/getdatastream/get_data_stream.go index 0e836c7486..70807b5263 100644 --- a/typedapi/indices/getdatastream/get_data_stream.go +++ b/typedapi/indices/getdatastream/get_data_stream.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get data streams. -// Retrieves information about one or more data streams. +// +// Get information about one or more data streams. package getdatastream import ( @@ -77,9 +78,10 @@ func NewGetDataStreamFunc(tp elastictransport.Interface) NewGetDataStream { } // Get data streams. -// Retrieves information about one or more data streams. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html +// Get information about one or more data streams. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream func New(tp elastictransport.Interface) *GetDataStream { r := &GetDataStream{ transport: tp, diff --git a/typedapi/indices/getdatastream/response.go b/typedapi/indices/getdatastream/response.go index d9c6f8f818..d6c5a8d54c 100644 --- a/typedapi/indices/getdatastream/response.go +++ b/typedapi/indices/getdatastream/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getdatastream @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdatastream // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/get_data_stream/IndicesGetDataStreamResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_data_stream/IndicesGetDataStreamResponse.ts#L22-L24 type Response struct { DataStreams []types.DataStream `json:"data_streams"` } diff --git a/typedapi/indices/getfieldmapping/get_field_mapping.go b/typedapi/indices/getfieldmapping/get_field_mapping.go index 61e7e1b100..9ee1342179 100644 --- a/typedapi/indices/getfieldmapping/get_field_mapping.go +++ b/typedapi/indices/getfieldmapping/get_field_mapping.go @@ -16,12 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get mapping definitions. // Retrieves mapping definitions for one or more fields. // For data streams, the API retrieves field mappings for the stream’s backing // indices. +// +// This API is useful if you don't need a complete mapping or if an index +// mapping contains a large number of fields. package getfieldmapping import ( @@ -88,7 +91,10 @@ func NewGetFieldMappingFunc(tp elastictransport.Interface) NewGetFieldMapping { // For data streams, the API retrieves field mappings for the stream’s backing // indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-field-mapping.html +// This API is useful if you don't need a complete mapping or if an index +// mapping contains a large number of fields. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping func New(tp elastictransport.Interface) *GetFieldMapping { r := &GetFieldMapping{ transport: tp, @@ -321,6 +327,7 @@ func (r *GetFieldMapping) Header(key, value string) *GetFieldMapping { // Fields Comma-separated list or wildcard expression of fields used to limit returned // information. +// Supports wildcards (`*`). // API Name: fields func (r *GetFieldMapping) _fields(fields string) *GetFieldMapping { r.paramSet |= fieldsMask diff --git a/typedapi/indices/getfieldmapping/response.go b/typedapi/indices/getfieldmapping/response.go index 768b065cb2..fbe23694b1 100644 --- a/typedapi/indices/getfieldmapping/response.go +++ b/typedapi/indices/getfieldmapping/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getfieldmapping @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getfieldmapping // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/get_field_mapping/IndicesGetFieldMappingResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_field_mapping/IndicesGetFieldMappingResponse.ts#L24-L27 type Response map[string]types.TypeFieldMappings diff --git a/typedapi/indices/getindextemplate/get_index_template.go b/typedapi/indices/getindextemplate/get_index_template.go index 72b0e66f20..950b691301 100644 --- a/typedapi/indices/getindextemplate/get_index_template.go +++ b/typedapi/indices/getindextemplate/get_index_template.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get index templates. -// Returns information about one or more index templates. +// Get information about one or more index templates. package getindextemplate import ( @@ -76,9 +76,9 @@ func NewGetIndexTemplateFunc(tp elastictransport.Interface) NewGetIndexTemplate } // Get index templates. -// Returns information about one or more index templates. +// Get information about one or more index templates. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-template.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template func New(tp elastictransport.Interface) *GetIndexTemplate { r := &GetIndexTemplate{ transport: tp, diff --git a/typedapi/indices/getindextemplate/response.go b/typedapi/indices/getindextemplate/response.go index 34e2d54ca9..9fd7634212 100644 --- a/typedapi/indices/getindextemplate/response.go +++ b/typedapi/indices/getindextemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getindextemplate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getindextemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L23-L27 type Response struct { IndexTemplates []types.IndexTemplateItem `json:"index_templates"` } diff --git a/typedapi/indices/getmapping/get_mapping.go b/typedapi/indices/getmapping/get_mapping.go index 9fbab26a46..80faff97d2 100644 --- a/typedapi/indices/getmapping/get_mapping.go +++ b/typedapi/indices/getmapping/get_mapping.go @@ -16,10 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get mapping definitions. -// Retrieves mapping definitions for one or more indices. // For data streams, the API retrieves mappings for the stream’s backing // indices. package getmapping @@ -79,11 +78,10 @@ func NewGetMappingFunc(tp elastictransport.Interface) NewGetMapping { } // Get mapping definitions. -// Retrieves mapping definitions for one or more indices. // For data streams, the API retrieves mappings for the stream’s backing // indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping func New(tp elastictransport.Interface) *GetMapping { r := &GetMapping{ transport: tp, diff --git a/typedapi/indices/getmapping/response.go b/typedapi/indices/getmapping/response.go index 9c8cb76b7a..d6379dcc63 100644 --- a/typedapi/indices/getmapping/response.go +++ b/typedapi/indices/getmapping/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getmapping @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getmapping // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/get_mapping/IndicesGetMappingResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_mapping/IndicesGetMappingResponse.ts#L24-L27 type Response map[string]types.IndexMappingRecord diff --git a/typedapi/indices/getmigratereindexstatus/get_migrate_reindex_status.go b/typedapi/indices/getmigratereindexstatus/get_migrate_reindex_status.go new file mode 100644 index 0000000000..a4aa8248c6 --- /dev/null +++ b/typedapi/indices/getmigratereindexstatus/get_migrate_reindex_status.go @@ -0,0 +1,356 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Get the migration reindexing status. +// +// Get the status of a migration reindex attempt for a data stream or index. +package getmigratereindexstatus + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetMigrateReindexStatus struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetMigrateReindexStatus type alias for index. +type NewGetMigrateReindexStatus func(index string) *GetMigrateReindexStatus + +// NewGetMigrateReindexStatusFunc returns a new instance of GetMigrateReindexStatus with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetMigrateReindexStatusFunc(tp elastictransport.Interface) NewGetMigrateReindexStatus { + return func(index string) *GetMigrateReindexStatus { + n := New(tp) + + n._index(index) + + return n + } +} + +// Get the migration reindexing status. +// +// Get the status of a migration reindex attempt for a data stream or index. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html +func New(tp elastictransport.Interface) *GetMigrateReindexStatus { + r := &GetMigrateReindexStatus{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetMigrateReindexStatus) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("reindex") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_status") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetMigrateReindexStatus) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get_migrate_reindex_status") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_migrate_reindex_status") + if reader := instrument.RecordRequestBody(ctx, "indices.get_migrate_reindex_status", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_migrate_reindex_status") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetMigrateReindexStatus query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getmigratereindexstatus.Response +func (r GetMigrateReindexStatus) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_migrate_reindex_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetMigrateReindexStatus) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_migrate_reindex_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetMigrateReindexStatus query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetMigrateReindexStatus headers map. +func (r *GetMigrateReindexStatus) Header(key, value string) *GetMigrateReindexStatus { + r.headers.Set(key, value) + + return r +} + +// Index The index or data stream name. +// API Name: index +func (r *GetMigrateReindexStatus) _index(index string) *GetMigrateReindexStatus { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetMigrateReindexStatus) ErrorTrace(errortrace bool) *GetMigrateReindexStatus { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetMigrateReindexStatus) FilterPath(filterpaths ...string) *GetMigrateReindexStatus { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetMigrateReindexStatus) Human(human bool) *GetMigrateReindexStatus { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetMigrateReindexStatus) Pretty(pretty bool) *GetMigrateReindexStatus { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/indices/getmigratereindexstatus/response.go b/typedapi/indices/getmigratereindexstatus/response.go new file mode 100644 index 0000000000..699d75eb9c --- /dev/null +++ b/typedapi/indices/getmigratereindexstatus/response.go @@ -0,0 +1,183 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package getmigratereindexstatus + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package getmigratereindexstatus +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_migrate_reindex_status/MigrateGetReindexStatusResponse.ts#L23-L36 +type Response struct { + Complete bool `json:"complete"` + Errors []types.StatusError `json:"errors"` + Exception *string `json:"exception,omitempty"` + InProgress []types.StatusInProgress `json:"in_progress"` + Pending int `json:"pending"` + StartTime types.DateTime `json:"start_time,omitempty"` + StartTimeMillis int64 `json:"start_time_millis"` + Successes int `json:"successes"` + TotalIndicesInDataStream int `json:"total_indices_in_data_stream"` + TotalIndicesRequiringUpgrade int `json:"total_indices_requiring_upgrade"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "complete": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Complete", err) + } + s.Complete = value + case bool: + s.Complete = v + } + + case "errors": + if err := dec.Decode(&s.Errors); err != nil { + return fmt.Errorf("%s | %w", "Errors", err) + } + + case "exception": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Exception", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Exception = &o + + case "in_progress": + if err := dec.Decode(&s.InProgress); err != nil { + return fmt.Errorf("%s | %w", "InProgress", err) + } + + case "pending": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Pending", err) + } + s.Pending = value + case float64: + f := int(v) + s.Pending = f + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + + case "start_time_millis": + if err := dec.Decode(&s.StartTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "StartTimeMillis", err) + } + + case "successes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Successes", err) + } + s.Successes = value + case float64: + f := int(v) + s.Successes = f + } + + case "total_indices_in_data_stream": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalIndicesInDataStream", err) + } + s.TotalIndicesInDataStream = value + case float64: + f := int(v) + s.TotalIndicesInDataStream = f + } + + case "total_indices_requiring_upgrade": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalIndicesRequiringUpgrade", err) + } + s.TotalIndicesRequiringUpgrade = value + case float64: + f := int(v) + s.TotalIndicesRequiringUpgrade = f + } + + } + } + return nil +} diff --git a/typedapi/indices/getsettings/get_settings.go b/typedapi/indices/getsettings/get_settings.go index b630bb88d9..181f2dfa9c 100644 --- a/typedapi/indices/getsettings/get_settings.go +++ b/typedapi/indices/getsettings/get_settings.go @@ -16,11 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get index settings. -// Returns setting information for one or more indices. For data streams, -// returns setting information for the stream’s backing indices. +// Get setting information for one or more indices. +// For data streams, it returns setting information for the stream's backing +// indices. package getsettings import ( @@ -81,10 +82,11 @@ func NewGetSettingsFunc(tp elastictransport.Interface) NewGetSettings { } // Get index settings. -// Returns setting information for one or more indices. For data streams, -// returns setting information for the stream’s backing indices. +// Get setting information for one or more indices. +// For data streams, it returns setting information for the stream's backing +// indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings func New(tp elastictransport.Interface) *GetSettings { r := &GetSettings{ transport: tp, diff --git a/typedapi/indices/getsettings/response.go b/typedapi/indices/getsettings/response.go index 54c8642af0..53036d20f4 100644 --- a/typedapi/indices/getsettings/response.go +++ b/typedapi/indices/getsettings/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getsettings @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getsettings // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/get_settings/IndicesGetSettingsResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_settings/IndicesGetSettingsResponse.ts#L24-L27 type Response map[string]types.IndexState diff --git a/typedapi/indices/gettemplate/get_template.go b/typedapi/indices/gettemplate/get_template.go index 6891c01a51..af1732a7f2 100644 --- a/typedapi/indices/gettemplate/get_template.go +++ b/typedapi/indices/gettemplate/get_template.go @@ -16,10 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get index templates. -// Retrieves information about one or more index templates. +// Get information about one or more index templates. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. package gettemplate import ( @@ -76,9 +80,13 @@ func NewGetTemplateFunc(tp elastictransport.Interface) NewGetTemplate { } // Get index templates. -// Retrieves information about one or more index templates. +// Get information about one or more index templates. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-template-v1.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template func New(tp elastictransport.Interface) *GetTemplate { r := &GetTemplate{ transport: tp, diff --git a/typedapi/indices/gettemplate/response.go b/typedapi/indices/gettemplate/response.go index dc26e1638d..d5a8fc8cd6 100644 --- a/typedapi/indices/gettemplate/response.go +++ b/typedapi/indices/gettemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package gettemplate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/get_template/IndicesGetTemplateResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_template/IndicesGetTemplateResponse.ts#L23-L26 type Response map[string]types.TemplateMapping diff --git a/typedapi/indices/migratereindex/migrate_reindex.go b/typedapi/indices/migratereindex/migrate_reindex.go new file mode 100644 index 0000000000..b823fcf0a2 --- /dev/null +++ b/typedapi/indices/migratereindex/migrate_reindex.go @@ -0,0 +1,370 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Reindex legacy backing indices. +// +// Reindex all legacy backing indices for a data stream. +// This operation occurs in a persistent task. +// The persistent task ID is returned immediately and the reindexing work is +// completed in that task. +package migratereindex + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/modeenum" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type MigrateReindex struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMigrateReindex type alias for index. +type NewMigrateReindex func() *MigrateReindex + +// NewMigrateReindexFunc returns a new instance of MigrateReindex with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMigrateReindexFunc(tp elastictransport.Interface) NewMigrateReindex { + return func() *MigrateReindex { + n := New(tp) + + return n + } +} + +// Reindex legacy backing indices. +// +// Reindex all legacy backing indices for a data stream. +// This operation occurs in a persistent task. +// The persistent task ID is returned immediately and the reindexing work is +// completed in that task. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html +func New(tp elastictransport.Interface) *MigrateReindex { + r := &MigrateReindex{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *MigrateReindex) Raw(raw io.Reader) *MigrateReindex { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *MigrateReindex) Request(req *Request) *MigrateReindex { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *MigrateReindex) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for MigrateReindex: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("reindex") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r MigrateReindex) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.migrate_reindex") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.migrate_reindex") + if reader := instrument.RecordRequestBody(ctx, "indices.migrate_reindex", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.migrate_reindex") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the MigrateReindex query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a migratereindex.Response +func (r MigrateReindex) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.migrate_reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the MigrateReindex headers map. +func (r *MigrateReindex) Header(key, value string) *MigrateReindex { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *MigrateReindex) ErrorTrace(errortrace bool) *MigrateReindex { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *MigrateReindex) FilterPath(filterpaths ...string) *MigrateReindex { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *MigrateReindex) Human(human bool) *MigrateReindex { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *MigrateReindex) Pretty(pretty bool) *MigrateReindex { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Reindex mode. Currently only 'upgrade' is supported. +// API name: mode +func (r *MigrateReindex) Mode(mode modeenum.ModeEnum) *MigrateReindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Mode = mode + return r +} + +// The source index or data stream (only data streams are currently supported). +// API name: source +func (r *MigrateReindex) Source(source types.SourceIndexVariant) *MigrateReindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source = *source.SourceIndexCaster() + + return r +} diff --git a/typedapi/indices/migratereindex/request.go b/typedapi/indices/migratereindex/request.go new file mode 100644 index 0000000000..cfd98f5a95 --- /dev/null +++ b/typedapi/indices/migratereindex/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package migratereindex + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package migratereindex +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/migrate_reindex/MigrateReindexRequest.ts#L23-L37 +type Request = types.MigrateReindex + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewMigrateReindex() + + return r +} diff --git a/typedapi/indices/migratereindex/response.go b/typedapi/indices/migratereindex/response.go new file mode 100644 index 0000000000..922e8b16e4 --- /dev/null +++ b/typedapi/indices/migratereindex/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package migratereindex + +// Response holds the response body struct for the package migratereindex +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/migrate_reindex/MigrateReindexResponse.ts#L22-L24 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/indices/migratetodatastream/migrate_to_data_stream.go b/typedapi/indices/migratetodatastream/migrate_to_data_stream.go index dfa6fb992f..e23baa6a11 100644 --- a/typedapi/indices/migratetodatastream/migrate_to_data_stream.go +++ b/typedapi/indices/migratetodatastream/migrate_to_data_stream.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Convert an index alias to a data stream. // Converts an index alias to a data stream. @@ -102,7 +102,7 @@ func NewMigrateToDataStreamFunc(tp elastictransport.Interface) NewMigrateToDataS // The indices for the alias become hidden backing indices for the stream. // The write index for the alias becomes the write index for the stream. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-to-data-stream func New(tp elastictransport.Interface) *MigrateToDataStream { r := &MigrateToDataStream{ transport: tp, diff --git a/typedapi/indices/migratetodatastream/response.go b/typedapi/indices/migratetodatastream/response.go index 31264acdd9..17012b74b3 100644 --- a/typedapi/indices/migratetodatastream/response.go +++ b/typedapi/indices/migratetodatastream/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package migratetodatastream // Response holds the response body struct for the package migratetodatastream // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/migrate_to_data_stream/IndicesMigrateToDataStreamResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/migrate_to_data_stream/IndicesMigrateToDataStreamResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/modifydatastream/modify_data_stream.go b/typedapi/indices/modifydatastream/modify_data_stream.go index 8246eb4977..34be6d2cd8 100644 --- a/typedapi/indices/modifydatastream/modify_data_stream.go +++ b/typedapi/indices/modifydatastream/modify_data_stream.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update data streams. // Performs one or more data stream modification actions in a single atomic @@ -79,7 +79,7 @@ func NewModifyDataStreamFunc(tp elastictransport.Interface) NewModifyDataStream // Performs one or more data stream modification actions in a single atomic // operation. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-modify-data-stream func New(tp elastictransport.Interface) *ModifyDataStream { r := &ModifyDataStream{ transport: tp, @@ -87,8 +87,6 @@ func New(tp elastictransport.Interface) *ModifyDataStream { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -340,10 +338,17 @@ func (r *ModifyDataStream) Pretty(pretty bool) *ModifyDataStream { return r } -// Actions Actions to perform. +// Actions to perform. // API name: actions -func (r *ModifyDataStream) Actions(actions ...types.IndicesModifyAction) *ModifyDataStream { - r.req.Actions = actions +func (r *ModifyDataStream) Actions(actions ...types.IndicesModifyActionVariant) *ModifyDataStream { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range actions { + r.req.Actions = append(r.req.Actions, *v.IndicesModifyActionCaster()) + + } return r } diff --git a/typedapi/indices/modifydatastream/request.go b/typedapi/indices/modifydatastream/request.go index e1c4a2f5ba..b52ee09640 100644 --- a/typedapi/indices/modifydatastream/request.go +++ b/typedapi/indices/modifydatastream/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package modifydatastream @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package modifydatastream // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/modify_data_stream/IndicesModifyDataStreamRequest.ts#L23-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/modify_data_stream/IndicesModifyDataStreamRequest.ts#L23-L45 type Request struct { // Actions Actions to perform. diff --git a/typedapi/indices/modifydatastream/response.go b/typedapi/indices/modifydatastream/response.go index 20a370e3c2..0751f8d109 100644 --- a/typedapi/indices/modifydatastream/response.go +++ b/typedapi/indices/modifydatastream/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package modifydatastream // Response holds the response body struct for the package modifydatastream // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/modify_data_stream/IndicesModifyDataStreamResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/modify_data_stream/IndicesModifyDataStreamResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/open/open.go b/typedapi/indices/open/open.go index 3a17c09605..a1b0078496 100644 --- a/typedapi/indices/open/open.go +++ b/typedapi/indices/open/open.go @@ -16,10 +16,44 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Opens a closed index. +// Open a closed index. // For data streams, the API opens any closed backing indices. +// +// A closed index is blocked for read/write operations and does not allow all +// operations that opened indices allow. +// It is not possible to index documents or to search for documents in a closed +// index. +// This allows closed indices to not have to maintain internal data structures +// for indexing or searching documents, resulting in a smaller overhead on the +// cluster. +// +// When opening or closing an index, the master is responsible for restarting +// the index shards to reflect the new state of the index. +// The shards will then go through the normal recovery process. +// The data of opened or closed indices is automatically replicated by the +// cluster to ensure that enough shard copies are safely kept around at all +// times. +// +// You can open and close multiple indices. +// An error is thrown if the request explicitly refers to a missing index. +// This behavior can be turned off by using the `ignore_unavailable=true` +// parameter. +// +// By default, you must explicitly name the indices you are opening or closing. +// To open or close indices with `_all`, `*`, or other wildcard expressions, +// change the `action.destructive_requires_name` setting to `false`. +// This setting can also be changed with the cluster update settings API. +// +// Closed indices consume a significant amount of disk-space which can cause +// problems in managed environments. +// Closing indices can be turned off with the cluster settings API by setting +// `cluster.indices.close.enable` to `false`. +// +// Because opening or closing an index allocates its shards, the +// `wait_for_active_shards` setting on index creation applies to the `_open` and +// `_close` index actions as well. package open import ( @@ -78,10 +112,44 @@ func NewOpenFunc(tp elastictransport.Interface) NewOpen { } } -// Opens a closed index. +// Open a closed index. // For data streams, the API opens any closed backing indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html +// A closed index is blocked for read/write operations and does not allow all +// operations that opened indices allow. +// It is not possible to index documents or to search for documents in a closed +// index. +// This allows closed indices to not have to maintain internal data structures +// for indexing or searching documents, resulting in a smaller overhead on the +// cluster. +// +// When opening or closing an index, the master is responsible for restarting +// the index shards to reflect the new state of the index. +// The shards will then go through the normal recovery process. +// The data of opened or closed indices is automatically replicated by the +// cluster to ensure that enough shard copies are safely kept around at all +// times. +// +// You can open and close multiple indices. +// An error is thrown if the request explicitly refers to a missing index. +// This behavior can be turned off by using the `ignore_unavailable=true` +// parameter. +// +// By default, you must explicitly name the indices you are opening or closing. +// To open or close indices with `_all`, `*`, or other wildcard expressions, +// change the `action.destructive_requires_name` setting to `false`. +// This setting can also be changed with the cluster update settings API. +// +// Closed indices consume a significant amount of disk-space which can cause +// problems in managed environments. +// Closing indices can be turned off with the cluster settings API by setting +// `cluster.indices.close.enable` to `false`. +// +// Because opening or closing an index allocates its shards, the +// `wait_for_active_shards` setting on index creation applies to the `_open` and +// `_close` index actions as well. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open func New(tp elastictransport.Interface) *Open { r := &Open{ transport: tp, diff --git a/typedapi/indices/open/response.go b/typedapi/indices/open/response.go index 91e3befff2..56ac75563c 100644 --- a/typedapi/indices/open/response.go +++ b/typedapi/indices/open/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package open // Response holds the response body struct for the package open // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/open/IndicesOpenResponse.ts#L20-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/open/IndicesOpenResponse.ts#L20-L25 type Response struct { Acknowledged bool `json:"acknowledged"` ShardsAcknowledged bool `json:"shards_acknowledged"` diff --git a/typedapi/indices/promotedatastream/promote_data_stream.go b/typedapi/indices/promotedatastream/promote_data_stream.go index 64eec75d85..1ad07d351f 100644 --- a/typedapi/indices/promotedatastream/promote_data_stream.go +++ b/typedapi/indices/promotedatastream/promote_data_stream.go @@ -16,10 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Promotes a data stream from a replicated data stream managed by CCR to a -// regular data stream +// Promote a data stream. +// Promote a data stream from a replicated data stream managed by cross-cluster +// replication (CCR) to a regular data stream. +// +// With CCR auto following, a data stream from a remote cluster can be +// replicated to the local cluster. +// These data streams can't be rolled over in the local cluster. +// These replicated data streams roll over only if the upstream data stream +// rolls over. +// In the event that the remote cluster is no longer available, the data stream +// in the local cluster can be promoted to a regular data stream, which allows +// these data streams to be rolled over in the local cluster. +// +// NOTE: When promoting a data stream, ensure the local cluster has a data +// stream enabled index template that matches the data stream. +// If this is missing, the data stream will not be able to roll over until a +// matching index template is created. +// This will affect the lifecycle management of the data stream and interfere +// with the data stream size and retention. package promotedatastream import ( @@ -77,10 +94,27 @@ func NewPromoteDataStreamFunc(tp elastictransport.Interface) NewPromoteDataStrea } } -// Promotes a data stream from a replicated data stream managed by CCR to a -// regular data stream +// Promote a data stream. +// Promote a data stream from a replicated data stream managed by cross-cluster +// replication (CCR) to a regular data stream. +// +// With CCR auto following, a data stream from a remote cluster can be +// replicated to the local cluster. +// These data streams can't be rolled over in the local cluster. +// These replicated data streams roll over only if the upstream data stream +// rolls over. +// In the event that the remote cluster is no longer available, the data stream +// in the local cluster can be promoted to a regular data stream, which allows +// these data streams to be rolled over in the local cluster. +// +// NOTE: When promoting a data stream, ensure the local cluster has a data +// stream enabled index template that matches the data stream. +// If this is missing, the data stream will not be able to roll over until a +// matching index template is created. +// This will affect the lifecycle management of the data stream and interfere +// with the data stream size and retention. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-promote-data-stream func New(tp elastictransport.Interface) *PromoteDataStream { r := &PromoteDataStream{ transport: tp, diff --git a/typedapi/indices/promotedatastream/response.go b/typedapi/indices/promotedatastream/response.go index e646931282..2f060601cc 100644 --- a/typedapi/indices/promotedatastream/response.go +++ b/typedapi/indices/promotedatastream/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package promotedatastream @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package promotedatastream // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/promote_data_stream/IndicesPromoteDataStreamResponse.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/promote_data_stream/IndicesPromoteDataStreamResponse.ts#L22-L25 type Response = json.RawMessage diff --git a/typedapi/indices/putalias/put_alias.go b/typedapi/indices/putalias/put_alias.go index 1e8b0c48cf..942db1eeb8 100644 --- a/typedapi/indices/putalias/put_alias.go +++ b/typedapi/indices/putalias/put_alias.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create or update an alias. // Adds a data stream or index to an alias. @@ -90,7 +90,7 @@ func NewPutAliasFunc(tp elastictransport.Interface) NewPutAlias { // Create or update an alias. // Adds a data stream or index to an alias. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-alias func New(tp elastictransport.Interface) *PutAlias { r := &PutAlias{ transport: tp, @@ -98,8 +98,6 @@ func New(tp elastictransport.Interface) *PutAlias { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -420,26 +418,35 @@ func (r *PutAlias) Pretty(pretty bool) *PutAlias { return r } -// Filter Query used to limit documents the alias can access. +// Query used to limit documents the alias can access. // API name: filter -func (r *PutAlias) Filter(filter *types.Query) *PutAlias { +func (r *PutAlias) Filter(filter types.QueryVariant) *PutAlias { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Filter = filter + r.req.Filter = filter.QueryCaster() return r } -// IndexRouting Value used to route indexing operations to a specific shard. +// Value used to route indexing operations to a specific shard. // If specified, this overwrites the `routing` value for indexing operations. // Data stream aliases don’t support this parameter. // API name: index_routing func (r *PutAlias) IndexRouting(routing string) *PutAlias { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexRouting = &routing return r } -// IsWriteIndex If `true`, sets the write index or data stream for the alias. +// If `true`, sets the write index or data stream for the alias. // If an alias points to multiple indices or data streams and `is_write_index` // isn’t set, the alias rejects write requests. // If an index alias points to one index and `is_write_index` isn’t set, the @@ -448,25 +455,40 @@ func (r *PutAlias) IndexRouting(routing string) *PutAlias { // alias points to one data stream. // API name: is_write_index func (r *PutAlias) IsWriteIndex(iswriteindex bool) *PutAlias { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IsWriteIndex = &iswriteindex return r } -// Routing Value used to route indexing and search operations to a specific shard. +// Value used to route indexing and search operations to a specific shard. // Data stream aliases don’t support this parameter. // API name: routing func (r *PutAlias) Routing(routing string) *PutAlias { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Routing = &routing return r } -// SearchRouting Value used to route search operations to a specific shard. +// Value used to route search operations to a specific shard. // If specified, this overwrites the `routing` value for search operations. // Data stream aliases don’t support this parameter. // API name: search_routing func (r *PutAlias) SearchRouting(routing string) *PutAlias { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.SearchRouting = &routing return r diff --git a/typedapi/indices/putalias/request.go b/typedapi/indices/putalias/request.go index a819f905bf..dbd96497a5 100644 --- a/typedapi/indices/putalias/request.go +++ b/typedapi/indices/putalias/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putalias @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putalias // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/put_alias/IndicesPutAliasRequest.ts#L25-L92 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/put_alias/IndicesPutAliasRequest.ts#L25-L103 type Request struct { // Filter Query used to limit documents the alias can access. diff --git a/typedapi/indices/putalias/response.go b/typedapi/indices/putalias/response.go index 9ff89d7f94..1331287f27 100644 --- a/typedapi/indices/putalias/response.go +++ b/typedapi/indices/putalias/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putalias // Response holds the response body struct for the package putalias // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/put_alias/IndicesPutAliasResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/put_alias/IndicesPutAliasResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/putdatalifecycle/put_data_lifecycle.go b/typedapi/indices/putdatalifecycle/put_data_lifecycle.go index 2219eb190c..87f93fdcdd 100644 --- a/typedapi/indices/putdatalifecycle/put_data_lifecycle.go +++ b/typedapi/indices/putdatalifecycle/put_data_lifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update data stream lifecycles. // Update the data stream lifecycle of the specified data streams. @@ -86,7 +86,7 @@ func NewPutDataLifecycleFunc(tp elastictransport.Interface) NewPutDataLifecycle // Update data stream lifecycles. // Update the data stream lifecycle of the specified data streams. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-put-lifecycle.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-lifecycle func New(tp elastictransport.Interface) *PutDataLifecycle { r := &PutDataLifecycle{ transport: tp, @@ -94,8 +94,6 @@ func New(tp elastictransport.Interface) *PutDataLifecycle { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -189,6 +187,12 @@ func (r *PutDataLifecycle) HttpRequest(ctx context.Context) (*http.Request, erro req.Header = r.headers.Clone() + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + if req.Header.Get("Accept") == "" { req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") } @@ -392,32 +396,46 @@ func (r *PutDataLifecycle) Pretty(pretty bool) *PutDataLifecycle { return r } -// DataRetention If defined, every document added to this data stream will be stored at least +// If defined, every document added to this data stream will be stored at least // for this time frame. // Any time after this duration the document could be deleted. // When empty, every document in this data stream will be stored indefinitely. // API name: data_retention -func (r *PutDataLifecycle) DataRetention(duration types.Duration) *PutDataLifecycle { - r.req.DataRetention = duration +func (r *PutDataLifecycle) DataRetention(duration types.DurationVariant) *PutDataLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DataRetention = *duration.DurationCaster() return r } -// Downsampling The downsampling configuration to execute for the managed backing index after +// The downsampling configuration to execute for the managed backing index after // rollover. // API name: downsampling -func (r *PutDataLifecycle) Downsampling(downsampling *types.DataStreamLifecycleDownsampling) *PutDataLifecycle { +func (r *PutDataLifecycle) Downsampling(downsampling types.DataStreamLifecycleDownsamplingVariant) *PutDataLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Downsampling = downsampling + r.req.Downsampling = downsampling.DataStreamLifecycleDownsamplingCaster() return r } -// Enabled If defined, it turns data stream lifecycle on/off (`true`/`false`) for this +// If defined, it turns data stream lifecycle on/off (`true`/`false`) for this // data stream. A data stream lifecycle // that's disabled (enabled: `false`) will have no effect on the data stream. // API name: enabled func (r *PutDataLifecycle) Enabled(enabled bool) *PutDataLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Enabled = &enabled return r diff --git a/typedapi/indices/putdatalifecycle/request.go b/typedapi/indices/putdatalifecycle/request.go index ee7b5a05ab..ddf025ca06 100644 --- a/typedapi/indices/putdatalifecycle/request.go +++ b/typedapi/indices/putdatalifecycle/request.go @@ -16,22 +16,98 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putdatalifecycle import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Request holds the request body struct for the package putdatalifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/put_data_lifecycle/IndicesPutDataLifecycleRequest.ts#L25-L67 -type Request = types.DataStreamLifecycle +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/put_data_lifecycle/IndicesPutDataLifecycleRequest.ts#L25-L93 +type Request struct { + + // DataRetention If defined, every document added to this data stream will be stored at least + // for this time frame. + // Any time after this duration the document could be deleted. + // When empty, every document in this data stream will be stored indefinitely. + DataRetention types.Duration `json:"data_retention,omitempty"` + // Downsampling The downsampling configuration to execute for the managed backing index after + // rollover. + Downsampling *types.DataStreamLifecycleDownsampling `json:"downsampling,omitempty"` + // Enabled If defined, it turns data stream lifecycle on/off (`true`/`false`) for this + // data stream. A data stream lifecycle + // that's disabled (enabled: `false`) will have no effect on the data stream. + Enabled *bool `json:"enabled,omitempty"` +} // NewRequest returns a Request func NewRequest() *Request { - r := types.NewDataStreamLifecycle() + r := &Request{} return r } + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putdatalifecycle request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data_retention": + if err := dec.Decode(&s.DataRetention); err != nil { + return fmt.Errorf("%s | %w", "DataRetention", err) + } + + case "downsampling": + if err := dec.Decode(&s.Downsampling); err != nil { + return fmt.Errorf("%s | %w", "Downsampling", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + } + } + return nil +} diff --git a/typedapi/indices/putdatalifecycle/response.go b/typedapi/indices/putdatalifecycle/response.go index 0b431000ef..b55ce3d4b8 100644 --- a/typedapi/indices/putdatalifecycle/response.go +++ b/typedapi/indices/putdatalifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putdatalifecycle // Response holds the response body struct for the package putdatalifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/put_data_lifecycle/IndicesPutDataLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/put_data_lifecycle/IndicesPutDataLifecycleResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/putindextemplate/put_index_template.go b/typedapi/indices/putindextemplate/put_index_template.go index f1d97fb916..7257e47565 100644 --- a/typedapi/indices/putindextemplate/put_index_template.go +++ b/typedapi/indices/putindextemplate/put_index_template.go @@ -16,11 +16,54 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create or update an index template. // Index templates define settings, mappings, and aliases that can be applied // automatically to new indices. +// +// Elasticsearch applies templates to new indices based on an wildcard pattern +// that matches the index name. +// Index templates are applied during data stream or index creation. +// For data streams, these settings and mappings are applied when the stream's +// backing indices are created. +// Settings and mappings specified in a create index API request override any +// settings or mappings specified in an index template. +// Changes to index templates do not affect existing indices, including the +// existing backing indices of a data stream. +// +// You can use C-style `/* *\/` block comments in index templates. +// You can include comments anywhere in the request body, except before the +// opening curly bracket. +// +// **Multiple matching templates** +// +// If multiple index templates match the name of a new index or data stream, the +// template with the highest priority is used. +// +// Multiple templates with overlapping index patterns at the same priority are +// not allowed and an error will be thrown when attempting to create a template +// matching an existing index template at identical priorities. +// +// **Composing aliases, mappings, and settings** +// +// When multiple component templates are specified in the `composed_of` field +// for an index template, they are merged in the order specified, meaning that +// later component templates override earlier component templates. +// Any mappings, settings, or aliases from the parent index template are merged +// in next. +// Finally, any configuration on the index request itself is merged. +// Mapping definitions are merged recursively, which means that later mapping +// components can introduce new field mappings and update the mapping +// configuration. +// If a field mapping is already contained in an earlier component, its +// definition will be completely overwritten by the later one. +// This recursive merging strategy applies not only to field mappings, but also +// root options like `dynamic_templates` and `meta`. +// If an earlier component contains a `dynamic_templates` block, then by default +// new `dynamic_templates` entries are appended onto the end. +// If an entry already exists with the same key, then it is overwritten by the +// new definition. package putindextemplate import ( @@ -87,7 +130,50 @@ func NewPutIndexTemplateFunc(tp elastictransport.Interface) NewPutIndexTemplate // Index templates define settings, mappings, and aliases that can be applied // automatically to new indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-template.html +// Elasticsearch applies templates to new indices based on an wildcard pattern +// that matches the index name. +// Index templates are applied during data stream or index creation. +// For data streams, these settings and mappings are applied when the stream's +// backing indices are created. +// Settings and mappings specified in a create index API request override any +// settings or mappings specified in an index template. +// Changes to index templates do not affect existing indices, including the +// existing backing indices of a data stream. +// +// You can use C-style `/* *\/` block comments in index templates. +// You can include comments anywhere in the request body, except before the +// opening curly bracket. +// +// **Multiple matching templates** +// +// If multiple index templates match the name of a new index or data stream, the +// template with the highest priority is used. +// +// Multiple templates with overlapping index patterns at the same priority are +// not allowed and an error will be thrown when attempting to create a template +// matching an existing index template at identical priorities. +// +// **Composing aliases, mappings, and settings** +// +// When multiple component templates are specified in the `composed_of` field +// for an index template, they are merged in the order specified, meaning that +// later component templates override earlier component templates. +// Any mappings, settings, or aliases from the parent index template are merged +// in next. +// Finally, any configuration on the index request itself is merged. +// Mapping definitions are merged recursively, which means that later mapping +// components can introduce new field mappings and update the mapping +// configuration. +// If a field mapping is already contained in an earlier component, its +// definition will be completely overwritten by the later one. +// This recursive merging strategy applies not only to field mappings, but also +// root options like `dynamic_templates` and `meta`. +// If an earlier component contains a `dynamic_templates` block, then by default +// new `dynamic_templates` entries are appended onto the end. +// If an entry already exists with the same key, then it is overwritten by the +// new definition. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template func New(tp elastictransport.Interface) *PutIndexTemplate { r := &PutIndexTemplate{ transport: tp, @@ -95,8 +181,6 @@ func New(tp elastictransport.Interface) *PutIndexTemplate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -387,7 +471,7 @@ func (r *PutIndexTemplate) Pretty(pretty bool) *PutIndexTemplate { return r } -// AllowAutoCreate This setting overrides the value of the `action.auto_create_index` cluster +// This setting overrides the value of the `action.auto_create_index` cluster // setting. // If set to `true` in a template, then indices can be automatically created // using that template even if auto-creation of indices is disabled via @@ -396,73 +480,114 @@ func (r *PutIndexTemplate) Pretty(pretty bool) *PutIndexTemplate { // always be explicitly created, and may never be automatically created. // API name: allow_auto_create func (r *PutIndexTemplate) AllowAutoCreate(allowautocreate bool) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowAutoCreate = &allowautocreate return r } -// ComposedOf An ordered list of component template names. +// An ordered list of component template names. // Component templates are merged in the order specified, meaning that the last // component template specified has the highest precedence. // API name: composed_of func (r *PutIndexTemplate) ComposedOf(composedofs ...string) *PutIndexTemplate { - r.req.ComposedOf = composedofs + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range composedofs { + + r.req.ComposedOf = append(r.req.ComposedOf, v) + } return r } -// DataStream If this object is included, the template is used to create data streams and +// If this object is included, the template is used to create data streams and // their backing indices. // Supports an empty object. // Data streams require a matching index template with a `data_stream` object. // API name: data_stream -func (r *PutIndexTemplate) DataStream(datastream *types.DataStreamVisibility) *PutIndexTemplate { +func (r *PutIndexTemplate) DataStream(datastream types.DataStreamVisibilityVariant) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.DataStream = datastream + r.req.DataStream = datastream.DataStreamVisibilityCaster() return r } -// Deprecated Marks this index template as deprecated. When creating or updating a +// Marks this index template as deprecated. When creating or updating a // non-deprecated index template // that uses deprecated components, Elasticsearch will emit a deprecation // warning. // API name: deprecated func (r *PutIndexTemplate) Deprecated(deprecated bool) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Deprecated = &deprecated return r } -// IgnoreMissingComponentTemplates The configuration option ignore_missing_component_templates can be used when +// The configuration option ignore_missing_component_templates can be used when // an index template // references a component template that might not exist // API name: ignore_missing_component_templates func (r *PutIndexTemplate) IgnoreMissingComponentTemplates(ignoremissingcomponenttemplates ...string) *PutIndexTemplate { - r.req.IgnoreMissingComponentTemplates = ignoremissingcomponenttemplates + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ignoremissingcomponenttemplates { + + r.req.IgnoreMissingComponentTemplates = append(r.req.IgnoreMissingComponentTemplates, v) + } return r } -// IndexPatterns Name of the index template to create. +// Name of the index template to create. // API name: index_patterns func (r *PutIndexTemplate) IndexPatterns(indices ...string) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexPatterns = indices return r } -// Meta_ Optional user metadata about the index template. -// May have any contents. -// This map is not automatically generated by Elasticsearch. +// Optional user metadata about the index template. +// It may have any contents. +// It is not automatically generated or used by Elasticsearch. +// This user-defined object is stored in the cluster state, so keeping it short +// is preferable +// To unset the metadata, replace the template without specifying it. // API name: _meta -func (r *PutIndexTemplate) Meta_(metadata types.Metadata) *PutIndexTemplate { - r.req.Meta_ = metadata +func (r *PutIndexTemplate) Meta_(metadata types.MetadataVariant) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() return r } -// Priority Priority to determine index template precedence when a new data stream or +// Priority to determine index template precedence when a new data stream or // index is created. // The index template with the highest priority is chosen. // If no priority is specified the template is treated as though it is of @@ -470,27 +595,43 @@ func (r *PutIndexTemplate) Meta_(metadata types.Metadata) *PutIndexTemplate { // This number is not automatically generated by Elasticsearch. // API name: priority func (r *PutIndexTemplate) Priority(priority int64) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Priority = &priority return r } -// Template Template to be applied. +// Template to be applied. // It may optionally include an `aliases`, `mappings`, or `settings` // configuration. // API name: template -func (r *PutIndexTemplate) Template(template *types.IndexTemplateMapping) *PutIndexTemplate { +func (r *PutIndexTemplate) Template(template types.IndexTemplateMappingVariant) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Template = template + r.req.Template = template.IndexTemplateMappingCaster() return r } -// Version Version number used to manage index templates externally. +// Version number used to manage index templates externally. // This number is not automatically generated by Elasticsearch. +// External systems can use these version numbers to simplify template +// management. +// To unset a version, replace the template without specifying one. // API name: version func (r *PutIndexTemplate) Version(versionnumber int64) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &versionnumber return r diff --git a/typedapi/indices/putindextemplate/request.go b/typedapi/indices/putindextemplate/request.go index b9715c1534..ca855e72ea 100644 --- a/typedapi/indices/putindextemplate/request.go +++ b/typedapi/indices/putindextemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putindextemplate @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putindextemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L37-L119 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L37-L157 type Request struct { // AllowAutoCreate This setting overrides the value of the `action.auto_create_index` cluster @@ -65,8 +65,11 @@ type Request struct { // IndexPatterns Name of the index template to create. IndexPatterns []string `json:"index_patterns,omitempty"` // Meta_ Optional user metadata about the index template. - // May have any contents. - // This map is not automatically generated by Elasticsearch. + // It may have any contents. + // It is not automatically generated or used by Elasticsearch. + // This user-defined object is stored in the cluster state, so keeping it short + // is preferable + // To unset the metadata, replace the template without specifying it. Meta_ types.Metadata `json:"_meta,omitempty"` // Priority Priority to determine index template precedence when a new data stream or // index is created. @@ -81,6 +84,9 @@ type Request struct { Template *types.IndexTemplateMapping `json:"template,omitempty"` // Version Version number used to manage index templates externally. // This number is not automatically generated by Elasticsearch. + // External systems can use these version numbers to simplify template + // management. + // To unset a version, replace the template without specifying one. Version *int64 `json:"version,omitempty"` } diff --git a/typedapi/indices/putindextemplate/response.go b/typedapi/indices/putindextemplate/response.go index 3d5cfa1840..0557eea785 100644 --- a/typedapi/indices/putindextemplate/response.go +++ b/typedapi/indices/putindextemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putindextemplate // Response holds the response body struct for the package putindextemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/put_index_template/IndicesPutIndexTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/put_index_template/IndicesPutIndexTemplateResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/putmapping/put_mapping.go b/typedapi/indices/putmapping/put_mapping.go index 0deab9fdb4..0f9970cc94 100644 --- a/typedapi/indices/putmapping/put_mapping.go +++ b/typedapi/indices/putmapping/put_mapping.go @@ -16,13 +16,47 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update field mappings. -// Adds new fields to an existing data stream or index. -// You can also use this API to change the search settings of existing fields. +// Add new fields to an existing data stream or index. +// You can also use this API to change the search settings of existing fields +// and add new properties to existing object fields. // For data streams, these changes are applied to all backing indices by // default. +// +// **Add multi-fields to an existing field** +// +// Multi-fields let you index the same field in different ways. +// You can use this API to update the fields mapping parameter and enable +// multi-fields for an existing field. +// WARNING: If an index (or data stream) contains documents when you add a +// multi-field, those documents will not have values for the new multi-field. +// You can populate the new multi-field with the update by query API. +// +// **Change supported mapping parameters for an existing field** +// +// The documentation for each mapping parameter indicates whether you can update +// it for an existing field using this API. +// For example, you can use the update mapping API to update the `ignore_above` +// parameter. +// +// **Change the mapping of an existing field** +// +// Except for supported mapping parameters, you can't change the mapping or +// field type of an existing field. +// Changing an existing field could invalidate data that's already indexed. +// +// If you need to change the mapping of a field in a data stream's backing +// indices, refer to documentation about modifying data streams. +// If you need to change the mapping of a field in other indices, create a new +// index with the correct mapping and reindex your data into that index. +// +// **Rename a field** +// +// Renaming a field would invalidate data already indexed under the old field +// name. +// Instead, add an alias field to create an alternate field name. package putmapping import ( @@ -88,12 +122,46 @@ func NewPutMappingFunc(tp elastictransport.Interface) NewPutMapping { } // Update field mappings. -// Adds new fields to an existing data stream or index. -// You can also use this API to change the search settings of existing fields. +// Add new fields to an existing data stream or index. +// You can also use this API to change the search settings of existing fields +// and add new properties to existing object fields. // For data streams, these changes are applied to all backing indices by // default. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html +// **Add multi-fields to an existing field** +// +// Multi-fields let you index the same field in different ways. +// You can use this API to update the fields mapping parameter and enable +// multi-fields for an existing field. +// WARNING: If an index (or data stream) contains documents when you add a +// multi-field, those documents will not have values for the new multi-field. +// You can populate the new multi-field with the update by query API. +// +// **Change supported mapping parameters for an existing field** +// +// The documentation for each mapping parameter indicates whether you can update +// it for an existing field using this API. +// For example, you can use the update mapping API to update the `ignore_above` +// parameter. +// +// **Change the mapping of an existing field** +// +// Except for supported mapping parameters, you can't change the mapping or +// field type of an existing field. +// Changing an existing field could invalidate data that's already indexed. +// +// If you need to change the mapping of a field in a data stream's backing +// indices, refer to documentation about modifying data streams. +// If you need to change the mapping of a field in other indices, create a new +// index with the correct mapping and reindex your data into that index. +// +// **Rename a field** +// +// Renaming a field would invalidate data already indexed under the old field +// name. +// Instead, add an alias field to create an alternate field name. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping func New(tp elastictransport.Interface) *PutMapping { r := &PutMapping{ transport: tp, @@ -101,8 +169,6 @@ func New(tp elastictransport.Interface) *PutMapping { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -432,102 +498,170 @@ func (r *PutMapping) Pretty(pretty bool) *PutMapping { return r } -// DateDetection Controls whether dynamic date detection is enabled. +// Controls whether dynamic date detection is enabled. // API name: date_detection func (r *PutMapping) DateDetection(datedetection bool) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.DateDetection = &datedetection return r } -// Dynamic Controls whether new fields are added dynamically. +// Controls whether new fields are added dynamically. // API name: dynamic func (r *PutMapping) Dynamic(dynamic dynamicmapping.DynamicMapping) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Dynamic = &dynamic - return r } -// DynamicDateFormats If date detection is enabled then new string fields are checked +// If date detection is enabled then new string fields are checked // against 'dynamic_date_formats' and if the value matches then // a new date field is added instead of string. // API name: dynamic_date_formats func (r *PutMapping) DynamicDateFormats(dynamicdateformats ...string) *PutMapping { - r.req.DynamicDateFormats = dynamicdateformats + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range dynamicdateformats { + r.req.DynamicDateFormats = append(r.req.DynamicDateFormats, v) + + } return r } -// DynamicTemplates Specify dynamic templates for the mapping. +// Specify dynamic templates for the mapping. // API name: dynamic_templates func (r *PutMapping) DynamicTemplates(dynamictemplates []map[string]types.DynamicTemplate) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.DynamicTemplates = dynamictemplates return r } -// FieldNames_ Control whether field names are enabled for the index. +// Control whether field names are enabled for the index. // API name: _field_names -func (r *PutMapping) FieldNames_(fieldnames_ *types.FieldNamesField) *PutMapping { +func (r *PutMapping) FieldNames_(fieldnames_ types.FieldNamesFieldVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.FieldNames_ = fieldnames_ + r.req.FieldNames_ = fieldnames_.FieldNamesFieldCaster() return r } -// Meta_ A mapping type can have custom meta data associated with it. These are +// A mapping type can have custom meta data associated with it. These are // not used at all by Elasticsearch, but can be used to store // application-specific metadata. // API name: _meta -func (r *PutMapping) Meta_(metadata types.Metadata) *PutMapping { - r.req.Meta_ = metadata +func (r *PutMapping) Meta_(metadata types.MetadataVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() return r } -// NumericDetection Automatically map strings into numeric data types for all fields. +// Automatically map strings into numeric data types for all fields. // API name: numeric_detection func (r *PutMapping) NumericDetection(numericdetection bool) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.NumericDetection = &numericdetection return r } -// Properties Mapping for a field. For new fields, this mapping can include: +// Mapping for a field. For new fields, this mapping can include: // // - Field name // - Field data type // - Mapping parameters // API name: properties func (r *PutMapping) Properties(properties map[string]types.Property) *PutMapping { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Properties = properties + return r +} + +func (r *PutMapping) AddProperty(key string, value types.PropertyVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Property + if r.req.Properties == nil { + r.req.Properties = make(map[string]types.Property) + } else { + tmp = r.req.Properties + } + tmp[key] = *value.PropertyCaster() + + r.req.Properties = tmp return r } -// Routing_ Enable making a routing value required on indexed documents. +// Enable making a routing value required on indexed documents. // API name: _routing -func (r *PutMapping) Routing_(routing_ *types.RoutingField) *PutMapping { +func (r *PutMapping) Routing_(routing_ types.RoutingFieldVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Routing_ = routing_ + r.req.Routing_ = routing_.RoutingFieldCaster() return r } -// Runtime Mapping of runtime fields for the index. +// Mapping of runtime fields for the index. // API name: runtime -func (r *PutMapping) Runtime(runtimefields types.RuntimeFields) *PutMapping { - r.req.Runtime = runtimefields +func (r *PutMapping) Runtime(runtimefields types.RuntimeFieldsVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Runtime = *runtimefields.RuntimeFieldsCaster() return r } -// Source_ Control whether the _source field is enabled on the index. +// Control whether the _source field is enabled on the index. // API name: _source -func (r *PutMapping) Source_(source_ *types.SourceField) *PutMapping { +func (r *PutMapping) Source_(source_ types.SourceFieldVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Source_ = source_ + r.req.Source_ = source_.SourceFieldCaster() return r } diff --git a/typedapi/indices/putmapping/request.go b/typedapi/indices/putmapping/request.go index 52acd828cd..18b79aa9b4 100644 --- a/typedapi/indices/putmapping/request.go +++ b/typedapi/indices/putmapping/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putmapping @@ -34,7 +34,7 @@ import ( // Request holds the request body struct for the package putmapping // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/put_mapping/IndicesPutMappingRequest.ts#L41-L149 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/put_mapping/IndicesPutMappingRequest.ts#L41-L181 type Request struct { // DateDetection Controls whether dynamic date detection is enabled. @@ -129,24 +129,8 @@ func (s *Request) UnmarshalJSON(data []byte) error { } case "dynamic_templates": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]types.DynamicTemplate, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "DynamicTemplates", err) - } - s.DynamicTemplates = append(s.DynamicTemplates, o) - case '[': - o := make([]map[string]types.DynamicTemplate, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "DynamicTemplates", err) - } - s.DynamicTemplates = o + if err := dec.Decode(&s.DynamicTemplates); err != nil { + return fmt.Errorf("%s | %w", "DynamicTemplates", err) } case "_field_names": @@ -192,301 +176,313 @@ func (s *Request) UnmarshalJSON(data []byte) error { case "binary": oo := types.NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := types.NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := types.NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := types.NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := types.NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := types.NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := types.NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := types.NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := types.NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := types.NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := types.NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := types.NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := types.NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := types.NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := types.NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := types.NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := types.NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := types.NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := types.NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := types.NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := types.NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := types.NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := types.NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := types.NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := types.NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := types.NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := types.NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := types.NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := types.NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := types.NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := types.NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := types.NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := types.NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := types.NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := types.NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := types.NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := types.NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := types.NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := types.NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := types.NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := types.NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := types.NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := types.NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := types.NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := types.NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := types.NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := types.NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := types.NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := types.NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := types.NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := types.NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(types.Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(types.Property) | %w", err) } s.Properties[key] = oo } diff --git a/typedapi/indices/putmapping/response.go b/typedapi/indices/putmapping/response.go index 97b2452e5f..2b6d5d0fdf 100644 --- a/typedapi/indices/putmapping/response.go +++ b/typedapi/indices/putmapping/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putmapping @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putmapping // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/put_mapping/IndicesPutMappingResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/put_mapping/IndicesPutMappingResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/putsettings/put_settings.go b/typedapi/indices/putsettings/put_settings.go index bc2b901fd0..7e9af02723 100644 --- a/typedapi/indices/putsettings/put_settings.go +++ b/typedapi/indices/putsettings/put_settings.go @@ -16,11 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update index settings. -// Changes dynamic index settings in real time. For data streams, index setting -// changes are applied to all backing indices by default. +// Changes dynamic index settings in real time. +// For data streams, index setting changes are applied to all backing indices by +// default. +// +// To revert a setting to the default value, use a null value. +// The list of per-index settings that can be updated dynamically on live +// indices can be found in index module documentation. +// To preserve existing settings from being updated, set the `preserve_existing` +// parameter to `true`. +// +// NOTE: You can only define new analyzers on closed indices. +// To add an analyzer, you must close the index, define the analyzer, and reopen +// the index. +// You cannot close the write index of a data stream. +// To update the analyzer for a data stream's write index and future backing +// indices, update the analyzer in the index template used by the stream. +// Then roll over the data stream to apply the new analyzer to the stream's +// write index and future backing indices. +// This affects searches and any new data added to the stream after the +// rollover. +// However, it does not affect the data stream's backing indices or their +// existing data. +// To change the analyzer for existing backing indices, you must create a new +// data stream and reindex your data into it. package putsettings import ( @@ -84,10 +106,32 @@ func NewPutSettingsFunc(tp elastictransport.Interface) NewPutSettings { } // Update index settings. -// Changes dynamic index settings in real time. For data streams, index setting -// changes are applied to all backing indices by default. +// Changes dynamic index settings in real time. +// For data streams, index setting changes are applied to all backing indices by +// default. +// +// To revert a setting to the default value, use a null value. +// The list of per-index settings that can be updated dynamically on live +// indices can be found in index module documentation. +// To preserve existing settings from being updated, set the `preserve_existing` +// parameter to `true`. +// +// NOTE: You can only define new analyzers on closed indices. +// To add an analyzer, you must close the index, define the analyzer, and reopen +// the index. +// You cannot close the write index of a data stream. +// To update the analyzer for a data stream's write index and future backing +// indices, update the analyzer in the index template used by the stream. +// Then roll over the data stream to apply the new analyzer to the stream's +// write index and future backing indices. +// This affects searches and any new data added to the stream after the +// rollover. +// However, it does not affect the data stream's backing indices or their +// existing data. +// To change the analyzer for existing backing indices, you must create a new +// data stream and reindex your data into it. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings func New(tp elastictransport.Interface) *PutSettings { r := &PutSettings{ transport: tp, @@ -95,8 +139,6 @@ func New(tp elastictransport.Interface) *PutSettings { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -441,48 +483,71 @@ func (r *PutSettings) Pretty(pretty bool) *PutSettings { } // API name: analysis -func (r *PutSettings) Analysis(analysis *types.IndexSettingsAnalysis) *PutSettings { +func (r *PutSettings) Analysis(analysis types.IndexSettingsAnalysisVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Analysis = analysis + r.req.Analysis = analysis.IndexSettingsAnalysisCaster() return r } -// Analyze Settings to define analyzers, tokenizers, token filters and character +// Settings to define analyzers, tokenizers, token filters and character // filters. // API name: analyze -func (r *PutSettings) Analyze(analyze *types.SettingsAnalyze) *PutSettings { +func (r *PutSettings) Analyze(analyze types.SettingsAnalyzeVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Analyze = analyze + r.req.Analyze = analyze.SettingsAnalyzeCaster() return r } // API name: auto_expand_replicas -func (r *PutSettings) AutoExpandReplicas(autoexpandreplicas string) *PutSettings { +func (r *PutSettings) AutoExpandReplicas(autoexpandreplicas any) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AutoExpandReplicas = &autoexpandreplicas + r.req.AutoExpandReplicas = autoexpandreplicas return r } // API name: blocks -func (r *PutSettings) Blocks(blocks *types.IndexSettingBlocks) *PutSettings { +func (r *PutSettings) Blocks(blocks types.IndexSettingBlocksVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Blocks = blocks + r.req.Blocks = blocks.IndexSettingBlocksCaster() return r } // API name: check_on_startup func (r *PutSettings) CheckOnStartup(checkonstartup indexcheckonstartup.IndexCheckOnStartup) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.CheckOnStartup = &checkonstartup - return r } // API name: codec func (r *PutSettings) Codec(codec string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Codec = &codec @@ -490,21 +555,36 @@ func (r *PutSettings) Codec(codec string) *PutSettings { } // API name: creation_date -func (r *PutSettings) CreationDate(stringifiedepochtimeunitmillis types.StringifiedEpochTimeUnitMillis) *PutSettings { - r.req.CreationDate = stringifiedepochtimeunitmillis +func (r *PutSettings) CreationDate(stringifiedepochtimeunitmillis types.StringifiedEpochTimeUnitMillisVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.CreationDate = *stringifiedepochtimeunitmillis.StringifiedEpochTimeUnitMillisCaster() return r } // API name: creation_date_string -func (r *PutSettings) CreationDateString(datetime types.DateTime) *PutSettings { - r.req.CreationDateString = datetime +func (r *PutSettings) CreationDateString(datetime types.DateTimeVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.CreationDateString = *datetime.DateTimeCaster() return r } // API name: default_pipeline func (r *PutSettings) DefaultPipeline(pipelinename string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.DefaultPipeline = &pipelinename return r @@ -512,6 +592,11 @@ func (r *PutSettings) DefaultPipeline(pipelinename string) *PutSettings { // API name: final_pipeline func (r *PutSettings) FinalPipeline(pipelinename string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FinalPipeline = &pipelinename return r @@ -519,92 +604,162 @@ func (r *PutSettings) FinalPipeline(pipelinename string) *PutSettings { // API name: format func (r *PutSettings) Format(format string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Format = format return r } // API name: gc_deletes -func (r *PutSettings) GcDeletes(duration types.Duration) *PutSettings { - r.req.GcDeletes = duration +func (r *PutSettings) GcDeletes(duration types.DurationVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.GcDeletes = *duration.DurationCaster() return r } // API name: hidden func (r *PutSettings) Hidden(hidden string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Hidden = hidden return r } // API name: highlight -func (r *PutSettings) Highlight(highlight *types.SettingsHighlight) *PutSettings { +func (r *PutSettings) Highlight(highlight types.SettingsHighlightVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Highlight = highlight + r.req.Highlight = highlight.SettingsHighlightCaster() return r } // API name: index -func (r *PutSettings) Index(index *types.IndexSettings) *PutSettings { +func (r *PutSettings) Index(index types.IndexSettingsVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Index = index + r.req.Index = index.IndexSettingsCaster() return r } // API name: IndexSettings func (r *PutSettings) IndexSettings(indexsettings map[string]json.RawMessage) *PutSettings { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.IndexSettings = indexsettings + return r +} + +func (r *PutSettings) AddIndexSetting(key string, value json.RawMessage) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.IndexSettings == nil { + r.req.IndexSettings = make(map[string]json.RawMessage) + } else { + tmp = r.req.IndexSettings + } + tmp[key] = value + + r.req.IndexSettings = tmp return r } -// IndexingPressure Configure indexing back pressure limits. +// Configure indexing back pressure limits. // API name: indexing_pressure -func (r *PutSettings) IndexingPressure(indexingpressure *types.IndicesIndexingPressure) *PutSettings { +func (r *PutSettings) IndexingPressure(indexingpressure types.IndicesIndexingPressureVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.IndexingPressure = indexingpressure + r.req.IndexingPressure = indexingpressure.IndicesIndexingPressureCaster() return r } // API name: indexing.slowlog -func (r *PutSettings) IndexingSlowlog(indexingslowlog *types.IndexingSlowlogSettings) *PutSettings { +func (r *PutSettings) IndexingSlowlog(indexingslowlog types.IndexingSlowlogSettingsVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.IndexingSlowlog = indexingslowlog + r.req.IndexingSlowlog = indexingslowlog.IndexingSlowlogSettingsCaster() return r } // API name: lifecycle -func (r *PutSettings) Lifecycle(lifecycle *types.IndexSettingsLifecycle) *PutSettings { +func (r *PutSettings) Lifecycle(lifecycle types.IndexSettingsLifecycleVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Lifecycle = lifecycle + r.req.Lifecycle = lifecycle.IndexSettingsLifecycleCaster() return r } // API name: load_fixed_bitset_filters_eagerly func (r *PutSettings) LoadFixedBitsetFiltersEagerly(loadfixedbitsetfilterseagerly bool) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.LoadFixedBitsetFiltersEagerly = &loadfixedbitsetfilterseagerly return r } -// Mapping Enable or disable dynamic mapping for an index. +// Enable or disable dynamic mapping for an index. // API name: mapping -func (r *PutSettings) Mapping(mapping *types.MappingLimitSettings) *PutSettings { +func (r *PutSettings) Mapping(mapping types.MappingLimitSettingsVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Mapping = mapping + r.req.Mapping = mapping.MappingLimitSettingsCaster() return r } // API name: max_docvalue_fields_search func (r *PutSettings) MaxDocvalueFieldsSearch(maxdocvaluefieldssearch int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxDocvalueFieldsSearch = &maxdocvaluefieldssearch return r @@ -612,6 +767,11 @@ func (r *PutSettings) MaxDocvalueFieldsSearch(maxdocvaluefieldssearch int) *PutS // API name: max_inner_result_window func (r *PutSettings) MaxInnerResultWindow(maxinnerresultwindow int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxInnerResultWindow = &maxinnerresultwindow return r @@ -619,6 +779,11 @@ func (r *PutSettings) MaxInnerResultWindow(maxinnerresultwindow int) *PutSetting // API name: max_ngram_diff func (r *PutSettings) MaxNgramDiff(maxngramdiff int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxNgramDiff = &maxngramdiff return r @@ -626,6 +791,11 @@ func (r *PutSettings) MaxNgramDiff(maxngramdiff int) *PutSettings { // API name: max_refresh_listeners func (r *PutSettings) MaxRefreshListeners(maxrefreshlisteners int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxRefreshListeners = &maxrefreshlisteners return r @@ -633,6 +803,11 @@ func (r *PutSettings) MaxRefreshListeners(maxrefreshlisteners int) *PutSettings // API name: max_regex_length func (r *PutSettings) MaxRegexLength(maxregexlength int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxRegexLength = &maxregexlength return r @@ -640,6 +815,11 @@ func (r *PutSettings) MaxRegexLength(maxregexlength int) *PutSettings { // API name: max_rescore_window func (r *PutSettings) MaxRescoreWindow(maxrescorewindow int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxRescoreWindow = &maxrescorewindow return r @@ -647,6 +827,11 @@ func (r *PutSettings) MaxRescoreWindow(maxrescorewindow int) *PutSettings { // API name: max_result_window func (r *PutSettings) MaxResultWindow(maxresultwindow int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxResultWindow = &maxresultwindow return r @@ -654,6 +839,11 @@ func (r *PutSettings) MaxResultWindow(maxresultwindow int) *PutSettings { // API name: max_script_fields func (r *PutSettings) MaxScriptFields(maxscriptfields int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxScriptFields = &maxscriptfields return r @@ -661,6 +851,11 @@ func (r *PutSettings) MaxScriptFields(maxscriptfields int) *PutSettings { // API name: max_shingle_diff func (r *PutSettings) MaxShingleDiff(maxshinglediff int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxShingleDiff = &maxshinglediff return r @@ -668,6 +863,11 @@ func (r *PutSettings) MaxShingleDiff(maxshinglediff int) *PutSettings { // API name: max_slices_per_scroll func (r *PutSettings) MaxSlicesPerScroll(maxslicesperscroll int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxSlicesPerScroll = &maxslicesperscroll return r @@ -675,21 +875,34 @@ func (r *PutSettings) MaxSlicesPerScroll(maxslicesperscroll int) *PutSettings { // API name: max_terms_count func (r *PutSettings) MaxTermsCount(maxtermscount int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxTermsCount = &maxtermscount return r } // API name: merge -func (r *PutSettings) Merge(merge *types.Merge) *PutSettings { +func (r *PutSettings) Merge(merge types.MergeVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Merge = merge + r.req.Merge = merge.MergeCaster() return r } // API name: mode func (r *PutSettings) Mode(mode string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Mode = &mode @@ -698,6 +911,11 @@ func (r *PutSettings) Mode(mode string) *PutSettings { // API name: number_of_replicas func (r *PutSettings) NumberOfReplicas(numberofreplicas string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.NumberOfReplicas = numberofreplicas return r @@ -705,6 +923,11 @@ func (r *PutSettings) NumberOfReplicas(numberofreplicas string) *PutSettings { // API name: number_of_routing_shards func (r *PutSettings) NumberOfRoutingShards(numberofroutingshards int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.NumberOfRoutingShards = &numberofroutingshards return r @@ -712,6 +935,11 @@ func (r *PutSettings) NumberOfRoutingShards(numberofroutingshards int) *PutSetti // API name: number_of_shards func (r *PutSettings) NumberOfShards(numberofshards string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.NumberOfShards = numberofshards return r @@ -719,6 +947,11 @@ func (r *PutSettings) NumberOfShards(numberofshards string) *PutSettings { // API name: priority func (r *PutSettings) Priority(priority string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Priority = priority return r @@ -726,133 +959,224 @@ func (r *PutSettings) Priority(priority string) *PutSettings { // API name: provided_name func (r *PutSettings) ProvidedName(name string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ProvidedName = &name return r } // API name: queries -func (r *PutSettings) Queries(queries *types.Queries) *PutSettings { +func (r *PutSettings) Queries(queries types.QueriesVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Queries = queries + r.req.Queries = queries.QueriesCaster() return r } // API name: query_string -func (r *PutSettings) QueryString(querystring *types.SettingsQueryString) *PutSettings { +func (r *PutSettings) QueryString(querystring types.SettingsQueryStringVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.QueryString = querystring + r.req.QueryString = querystring.SettingsQueryStringCaster() return r } // API name: refresh_interval -func (r *PutSettings) RefreshInterval(duration types.Duration) *PutSettings { - r.req.RefreshInterval = duration +func (r *PutSettings) RefreshInterval(duration types.DurationVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RefreshInterval = *duration.DurationCaster() return r } // API name: routing -func (r *PutSettings) Routing(routing *types.IndexRouting) *PutSettings { +func (r *PutSettings) Routing(routing types.IndexRoutingVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Routing = routing + r.req.Routing = routing.IndexRoutingCaster() return r } // API name: routing_partition_size -func (r *PutSettings) RoutingPartitionSize(stringifiedinteger types.Stringifiedinteger) *PutSettings { - r.req.RoutingPartitionSize = stringifiedinteger +func (r *PutSettings) RoutingPartitionSize(stringifiedinteger types.StringifiedintegerVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RoutingPartitionSize = *stringifiedinteger.StringifiedintegerCaster() return r } // API name: routing_path func (r *PutSettings) RoutingPath(routingpaths ...string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RoutingPath = make([]string, len(routingpaths)) r.req.RoutingPath = routingpaths return r } // API name: search -func (r *PutSettings) Search(search *types.SettingsSearch) *PutSettings { +func (r *PutSettings) Search(search types.SettingsSearchVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Search = search + r.req.Search = search.SettingsSearchCaster() return r } // API name: settings -func (r *PutSettings) Settings(settings *types.IndexSettings) *PutSettings { +func (r *PutSettings) Settings(settings types.IndexSettingsVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Settings = settings + r.req.Settings = settings.IndexSettingsCaster() return r } -// Similarity Configure custom similarity settings to customize how search results are +// Configure custom similarity settings to customize how search results are // scored. // API name: similarity func (r *PutSettings) Similarity(similarity map[string]types.SettingsSimilarity) *PutSettings { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Similarity = similarity + return r +} + +func (r *PutSettings) AddSimilarity(key string, value types.SettingsSimilarityVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.SettingsSimilarity + if r.req.Similarity == nil { + r.req.Similarity = make(map[string]types.SettingsSimilarity) + } else { + tmp = r.req.Similarity + } + tmp[key] = *value.SettingsSimilarityCaster() + + r.req.Similarity = tmp return r } // API name: soft_deletes -func (r *PutSettings) SoftDeletes(softdeletes *types.SoftDeletes) *PutSettings { +func (r *PutSettings) SoftDeletes(softdeletes types.SoftDeletesVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.SoftDeletes = softdeletes + r.req.SoftDeletes = softdeletes.SoftDeletesCaster() return r } // API name: sort -func (r *PutSettings) Sort(sort *types.IndexSegmentSort) *PutSettings { +func (r *PutSettings) Sort(sort types.IndexSegmentSortVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Sort = sort + r.req.Sort = sort.IndexSegmentSortCaster() return r } -// Store The store module allows you to control how index data is stored and accessed +// The store module allows you to control how index data is stored and accessed // on disk. // API name: store -func (r *PutSettings) Store(store *types.Storage) *PutSettings { +func (r *PutSettings) Store(store types.StorageVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Store = store + r.req.Store = store.StorageCaster() return r } // API name: time_series -func (r *PutSettings) TimeSeries(timeseries *types.IndexSettingsTimeSeries) *PutSettings { +func (r *PutSettings) TimeSeries(timeseries types.IndexSettingsTimeSeriesVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.TimeSeries = timeseries + r.req.TimeSeries = timeseries.IndexSettingsTimeSeriesCaster() return r } // API name: top_metrics_max_size func (r *PutSettings) TopMetricsMaxSize(topmetricsmaxsize int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TopMetricsMaxSize = &topmetricsmaxsize return r } // API name: translog -func (r *PutSettings) Translog(translog *types.Translog) *PutSettings { +func (r *PutSettings) Translog(translog types.TranslogVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Translog = translog + r.req.Translog = translog.TranslogCaster() return r } // API name: uuid func (r *PutSettings) Uuid(uuid string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Uuid = &uuid return r @@ -860,15 +1184,24 @@ func (r *PutSettings) Uuid(uuid string) *PutSettings { // API name: verified_before_close func (r *PutSettings) VerifiedBeforeClose(verifiedbeforeclose string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.VerifiedBeforeClose = verifiedbeforeclose return r } // API name: version -func (r *PutSettings) Version(version *types.IndexVersioning) *PutSettings { +func (r *PutSettings) Version(version types.IndexVersioningVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Version = version + r.req.Version = version.IndexVersioningCaster() return r } diff --git a/typedapi/indices/putsettings/request.go b/typedapi/indices/putsettings/request.go index f789b83567..ad101899de 100644 --- a/typedapi/indices/putsettings/request.go +++ b/typedapi/indices/putsettings/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putsettings @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package putsettings // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/put_settings/IndicesPutSettingsRequest.ts#L25-L93 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/put_settings/IndicesPutSettingsRequest.ts#L25-L118 type Request = types.IndexSettings // NewRequest returns a Request diff --git a/typedapi/indices/putsettings/response.go b/typedapi/indices/putsettings/response.go index 5602ccda75..b7135fa088 100644 --- a/typedapi/indices/putsettings/response.go +++ b/typedapi/indices/putsettings/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putsettings // Response holds the response body struct for the package putsettings // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/put_settings/IndicesPutSettingsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/put_settings/IndicesPutSettingsResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/puttemplate/put_template.go b/typedapi/indices/puttemplate/put_template.go index 54eb786831..a465d99b4a 100644 --- a/typedapi/indices/puttemplate/put_template.go +++ b/typedapi/indices/puttemplate/put_template.go @@ -16,11 +16,40 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create or update an index template. // Index templates define settings, mappings, and aliases that can be applied // automatically to new indices. +// Elasticsearch applies templates to new indices based on an index pattern that +// matches the index name. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. +// +// Composable templates always take precedence over legacy templates. +// If no composable template matches a new index, matching legacy templates are +// applied according to their order. +// +// Index templates are only applied during index creation. +// Changes to index templates do not affect existing indices. +// Settings and mappings specified in create index API requests override any +// settings or mappings specified in an index template. +// +// You can use C-style `/* *\/` block comments in index templates. +// You can include comments anywhere in the request body, except before the +// opening curly bracket. +// +// **Indices matching multiple templates** +// +// Multiple index templates can potentially match an index, in this case, both +// the settings and mappings are merged into the final configuration of the +// index. +// The order of the merging can be controlled using the order parameter, with +// lower order being applied first, and higher orders overriding them. +// NOTE: Multiple matching templates with the same order value will result in a +// non-deterministic merging order. package puttemplate import ( @@ -86,8 +115,37 @@ func NewPutTemplateFunc(tp elastictransport.Interface) NewPutTemplate { // Create or update an index template. // Index templates define settings, mappings, and aliases that can be applied // automatically to new indices. +// Elasticsearch applies templates to new indices based on an index pattern that +// matches the index name. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. +// +// Composable templates always take precedence over legacy templates. +// If no composable template matches a new index, matching legacy templates are +// applied according to their order. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates-v1.html +// Index templates are only applied during index creation. +// Changes to index templates do not affect existing indices. +// Settings and mappings specified in create index API requests override any +// settings or mappings specified in an index template. +// +// You can use C-style `/* *\/` block comments in index templates. +// You can include comments anywhere in the request body, except before the +// opening curly bracket. +// +// **Indices matching multiple templates** +// +// Multiple index templates can potentially match an index, in this case, both +// the settings and mappings are merged into the final configuration of the +// index. +// The order of the merging can be controlled using the order parameter, with +// lower order being applied first, and higher orders overriding them. +// NOTE: Multiple matching templates with the same order value will result in a +// non-deterministic merging order. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template func New(tp elastictransport.Interface) *PutTemplate { r := &PutTemplate{ transport: tp, @@ -95,8 +153,6 @@ func New(tp elastictransport.Interface) *PutTemplate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -385,58 +441,103 @@ func (r *PutTemplate) Pretty(pretty bool) *PutTemplate { return r } -// Aliases Aliases for the index. +// Aliases for the index. // API name: aliases func (r *PutTemplate) Aliases(aliases map[string]types.Alias) *PutTemplate { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aliases = aliases + return r +} + +func (r *PutTemplate) AddAlias(key string, value types.AliasVariant) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + r.req.Aliases = tmp return r } -// IndexPatterns Array of wildcard expressions used to match the names +// Array of wildcard expressions used to match the names // of indices during creation. // API name: index_patterns func (r *PutTemplate) IndexPatterns(indexpatterns ...string) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexPatterns = make([]string, len(indexpatterns)) r.req.IndexPatterns = indexpatterns return r } -// Mappings Mapping for fields in the index. +// Mapping for fields in the index. // API name: mappings -func (r *PutTemplate) Mappings(mappings *types.TypeMapping) *PutTemplate { +func (r *PutTemplate) Mappings(mappings types.TypeMappingVariant) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Mappings = mappings + r.req.Mappings = mappings.TypeMappingCaster() return r } -// Order Order in which Elasticsearch applies this template if index +// Order in which Elasticsearch applies this template if index // matches multiple templates. // // Templates with lower 'order' values are merged first. Templates with higher // 'order' values are merged later, overriding templates with lower values. // API name: order func (r *PutTemplate) Order(order int) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Order = &order return r } -// Settings Configuration options for the index. +// Configuration options for the index. // API name: settings -func (r *PutTemplate) Settings(settings *types.IndexSettings) *PutTemplate { +func (r *PutTemplate) Settings(settings types.IndexSettingsVariant) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Settings = settings + r.req.Settings = settings.IndexSettingsCaster() return r } -// Version Version number used to manage index templates externally. This number +// Version number used to manage index templates externally. This number // is not automatically generated by Elasticsearch. +// To unset a version, replace the template without specifying one. // API name: version func (r *PutTemplate) Version(versionnumber int64) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &versionnumber return r diff --git a/typedapi/indices/puttemplate/request.go b/typedapi/indices/puttemplate/request.go index 91b8224fae..529cfa2616 100644 --- a/typedapi/indices/puttemplate/request.go +++ b/typedapi/indices/puttemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package puttemplate @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package puttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/put_template/IndicesPutTemplateRequest.ts#L29-L94 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/put_template/IndicesPutTemplateRequest.ts#L29-L123 type Request struct { // Aliases Aliases for the index. @@ -53,6 +53,7 @@ type Request struct { Settings *types.IndexSettings `json:"settings,omitempty"` // Version Version number used to manage index templates externally. This number // is not automatically generated by Elasticsearch. + // To unset a version, replace the template without specifying one. Version *int64 `json:"version,omitempty"` } diff --git a/typedapi/indices/puttemplate/response.go b/typedapi/indices/puttemplate/response.go index 667361907b..8d10e3d6ab 100644 --- a/typedapi/indices/puttemplate/response.go +++ b/typedapi/indices/puttemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package puttemplate // Response holds the response body struct for the package puttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/put_template/IndicesPutTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/put_template/IndicesPutTemplateResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/recovery/recovery.go b/typedapi/indices/recovery/recovery.go index 69309b98e5..a7d5799a43 100644 --- a/typedapi/indices/recovery/recovery.go +++ b/typedapi/indices/recovery/recovery.go @@ -16,12 +16,44 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information about ongoing and completed shard recoveries for one or -// more indices. -// For data streams, the API returns information for the stream’s backing +// Get index recovery information. +// Get information about ongoing and completed shard recoveries for one or more // indices. +// For data streams, the API returns information for the stream's backing +// indices. +// +// All recoveries, whether ongoing or complete, are kept in the cluster state +// and may be reported on at any time. +// +// Shard recovery is the process of initializing a shard copy, such as restoring +// a primary shard from a snapshot or creating a replica shard from a primary +// shard. +// When a shard recovery completes, the recovered shard is available for search +// and indexing. +// +// Recovery automatically occurs during the following processes: +// +// * When creating an index for the first time. +// * When a node rejoins the cluster and starts up any missing primary shard +// copies using the data that it holds in its data path. +// * Creation of new replica shard copies from the primary. +// * Relocation of a shard copy to a different node in the same cluster. +// * A snapshot restore operation. +// * A clone, shrink, or split operation. +// +// You can determine the cause of a shard recovery using the recovery or cat +// recovery APIs. +// +// The index recovery API reports information about completed recoveries only +// for shard copies that currently exist in the cluster. +// It only reports the last recovery for each shard copy and does not report +// historical information about earlier recoveries, nor does it report +// information about the recoveries of shard copies that no longer exist. +// This means that if a shard copy completes a recovery and then Elasticsearch +// relocates it onto a different node then the information about the original +// recovery will not be shown in the recovery API. package recovery import ( @@ -77,12 +109,44 @@ func NewRecoveryFunc(tp elastictransport.Interface) NewRecovery { } } -// Returns information about ongoing and completed shard recoveries for one or -// more indices. -// For data streams, the API returns information for the stream’s backing +// Get index recovery information. +// Get information about ongoing and completed shard recoveries for one or more // indices. +// For data streams, the API returns information for the stream's backing +// indices. +// +// All recoveries, whether ongoing or complete, are kept in the cluster state +// and may be reported on at any time. +// +// Shard recovery is the process of initializing a shard copy, such as restoring +// a primary shard from a snapshot or creating a replica shard from a primary +// shard. +// When a shard recovery completes, the recovered shard is available for search +// and indexing. +// +// Recovery automatically occurs during the following processes: +// +// * When creating an index for the first time. +// * When a node rejoins the cluster and starts up any missing primary shard +// copies using the data that it holds in its data path. +// * Creation of new replica shard copies from the primary. +// * Relocation of a shard copy to a different node in the same cluster. +// * A snapshot restore operation. +// * A clone, shrink, or split operation. +// +// You can determine the cause of a shard recovery using the recovery or cat +// recovery APIs. +// +// The index recovery API reports information about completed recoveries only +// for shard copies that currently exist in the cluster. +// It only reports the last recovery for each shard copy and does not report +// historical information about earlier recoveries, nor does it report +// information about the recoveries of shard copies that no longer exist. +// This means that if a shard copy completes a recovery and then Elasticsearch +// relocates it onto a different node then the information about the original +// recovery will not be shown in the recovery API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-recovery.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery func New(tp elastictransport.Interface) *Recovery { r := &Recovery{ transport: tp, diff --git a/typedapi/indices/recovery/response.go b/typedapi/indices/recovery/response.go index eb3da0a19b..c44729b858 100644 --- a/typedapi/indices/recovery/response.go +++ b/typedapi/indices/recovery/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package recovery @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package recovery // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/recovery/IndicesRecoveryResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/recovery/IndicesRecoveryResponse.ts#L24-L27 type Response map[string]types.RecoveryStatus diff --git a/typedapi/indices/refresh/refresh.go b/typedapi/indices/refresh/refresh.go index 6cb45989ef..6f594c7fac 100644 --- a/typedapi/indices/refresh/refresh.go +++ b/typedapi/indices/refresh/refresh.go @@ -16,13 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Refresh an index. // A refresh makes recent operations performed on one or more indices available // for search. // For data streams, the API runs the refresh operation on the stream’s backing // indices. +// +// By default, Elasticsearch periodically refreshes indices every second, but +// only on indices that have received one search request or more in the last 30 +// seconds. +// You can change this default interval with the `index.refresh_interval` +// setting. +// +// Refresh requests are synchronous and do not return a response until the +// refresh operation completes. +// +// Refreshes are resource-intensive. +// To ensure good cluster performance, it's recommended to wait for +// Elasticsearch's periodic refresh rather than performing an explicit refresh +// when possible. +// +// If your application workflow indexes documents and then runs a search to +// retrieve the indexed document, it's recommended to use the index API's +// `refresh=wait_for` query parameter option. +// This option ensures the indexing operation waits for a periodic refresh +// before running the search. package refresh import ( @@ -85,7 +105,27 @@ func NewRefreshFunc(tp elastictransport.Interface) NewRefresh { // For data streams, the API runs the refresh operation on the stream’s backing // indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html +// By default, Elasticsearch periodically refreshes indices every second, but +// only on indices that have received one search request or more in the last 30 +// seconds. +// You can change this default interval with the `index.refresh_interval` +// setting. +// +// Refresh requests are synchronous and do not return a response until the +// refresh operation completes. +// +// Refreshes are resource-intensive. +// To ensure good cluster performance, it's recommended to wait for +// Elasticsearch's periodic refresh rather than performing an explicit refresh +// when possible. +// +// If your application workflow indexes documents and then runs a search to +// retrieve the indexed document, it's recommended to use the index API's +// `refresh=wait_for` query parameter option. +// This option ensures the indexing operation waits for a periodic refresh +// before running the search. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh func New(tp elastictransport.Interface) *Refresh { r := &Refresh{ transport: tp, diff --git a/typedapi/indices/refresh/response.go b/typedapi/indices/refresh/response.go index c1c2ad890c..6d994ad863 100644 --- a/typedapi/indices/refresh/response.go +++ b/typedapi/indices/refresh/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package refresh @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package refresh // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/refresh/IndicesRefreshResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/refresh/IndicesRefreshResponse.ts#L22-L24 type Response struct { Shards_ *types.ShardStatistics `json:"_shards,omitempty"` } diff --git a/typedapi/indices/reloadsearchanalyzers/reload_search_analyzers.go b/typedapi/indices/reloadsearchanalyzers/reload_search_analyzers.go index dd86d45e79..69718899ba 100644 --- a/typedapi/indices/reloadsearchanalyzers/reload_search_analyzers.go +++ b/typedapi/indices/reloadsearchanalyzers/reload_search_analyzers.go @@ -16,9 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Reloads an index's search analyzers and their resources. +// Reload search analyzers. +// Reload an index's search analyzers and their resources. +// For data streams, the API reloads search analyzers and resources for the +// stream's backing indices. +// +// IMPORTANT: After reloading the search analyzers you should clear the request +// cache to make sure it doesn't contain responses derived from the previous +// versions of the analyzer. +// +// You can use the reload search analyzers API to pick up changes to synonym +// files used in the `synonym_graph` or `synonym` token filter of a search +// analyzer. +// To be eligible, the token filter must have an `updateable` flag of `true` and +// only be used in search analyzers. +// +// NOTE: This API does not perform a reload for each shard of an index. +// Instead, it performs a reload for each node containing index shards. +// As a result, the total shard count returned by the API can differ from the +// number of index shards. +// Because reloading affects every node with an index shard, it is important to +// update the synonym file on every data node in the cluster--including nodes +// that don't contain a shard replica--before using this API. +// This ensures the synonym file is updated everywhere in the cluster in case +// shards are relocated in the future. package reloadsearchanalyzers import ( @@ -77,9 +100,32 @@ func NewReloadSearchAnalyzersFunc(tp elastictransport.Interface) NewReloadSearch } } -// Reloads an index's search analyzers and their resources. +// Reload search analyzers. +// Reload an index's search analyzers and their resources. +// For data streams, the API reloads search analyzers and resources for the +// stream's backing indices. +// +// IMPORTANT: After reloading the search analyzers you should clear the request +// cache to make sure it doesn't contain responses derived from the previous +// versions of the analyzer. +// +// You can use the reload search analyzers API to pick up changes to synonym +// files used in the `synonym_graph` or `synonym` token filter of a search +// analyzer. +// To be eligible, the token filter must have an `updateable` flag of `true` and +// only be used in search analyzers. +// +// NOTE: This API does not perform a reload for each shard of an index. +// Instead, it performs a reload for each node containing index shards. +// As a result, the total shard count returned by the API can differ from the +// number of index shards. +// Because reloading affects every node with an index shard, it is important to +// update the synonym file on every data node in the cluster--including nodes +// that don't contain a shard replica--before using this API. +// This ensures the synonym file is updated everywhere in the cluster in case +// shards are relocated in the future. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-reload-analyzers.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers func New(tp elastictransport.Interface) *ReloadSearchAnalyzers { r := &ReloadSearchAnalyzers{ transport: tp, diff --git a/typedapi/indices/reloadsearchanalyzers/response.go b/typedapi/indices/reloadsearchanalyzers/response.go index ed3ae683b5..cac22df852 100644 --- a/typedapi/indices/reloadsearchanalyzers/response.go +++ b/typedapi/indices/reloadsearchanalyzers/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package reloadsearchanalyzers @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package reloadsearchanalyzers // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/reload_search_analyzers/ReloadSearchAnalyzersResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/reload_search_analyzers/ReloadSearchAnalyzersResponse.ts#L22-L24 type Response struct { ReloadDetails []types.ReloadDetails `json:"reload_details"` Shards_ types.ShardStatistics `json:"_shards"` diff --git a/typedapi/indices/resolvecluster/resolve_cluster.go b/typedapi/indices/resolvecluster/resolve_cluster.go index 6ba5d83867..8d3405c75e 100644 --- a/typedapi/indices/resolvecluster/resolve_cluster.go +++ b/typedapi/indices/resolvecluster/resolve_cluster.go @@ -16,12 +16,90 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Resolves the specified index expressions to return information about each -// cluster, including -// the local cluster, if included. -// Multiple patterns and remote clusters are supported. +// Resolve the cluster. +// +// Resolve the specified index expressions to return information about each +// cluster, including the local "querying" cluster, if included. +// If no index expression is provided, the API will return information about all +// the remote clusters that are configured on the querying cluster. +// +// This endpoint is useful before doing a cross-cluster search in order to +// determine which remote clusters should be included in a search. +// +// You use the same index expression with this endpoint as you would for +// cross-cluster search. +// Index and cluster exclusions are also supported with this endpoint. +// +// For each cluster in the index expression, information is returned about: +// +// * Whether the querying ("local") cluster is currently connected to each +// remote cluster specified in the index expression. Note that this endpoint +// actively attempts to contact the remote clusters, unlike the `remote/info` +// endpoint. +// * Whether each remote cluster is configured with `skip_unavailable` as `true` +// or `false`. +// * Whether there are any indices, aliases, or data streams on that cluster +// that match the index expression. +// * Whether the search is likely to have errors returned when you do the +// cross-cluster search (including any authorization errors if you do not have +// permission to query the index). +// * Cluster version information, including the Elasticsearch server version. +// +// For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns +// information about the local cluster and all remotely configured clusters that +// start with the alias `cluster*`. +// Each cluster returns information about whether it has any indices, aliases or +// data streams that match `my-index-*`. +// +// ## Note on backwards compatibility +// The ability to query without an index expression was added in version 8.18, +// so when +// querying remote clusters older than that, the local cluster will send the +// index +// expression `dummy*` to those remote clusters. Thus, if an errors occur, you +// may see a reference +// to that index expression even though you didn't request it. If it causes a +// problem, you can +// instead include an index expression like `*:*` to bypass the issue. +// +// ## Advantages of using this endpoint before a cross-cluster search +// +// You may want to exclude a cluster or index from a search when: +// +// * A remote cluster is not currently connected and is configured with +// `skip_unavailable=false`. Running a cross-cluster search under those +// conditions will cause the entire search to fail. +// * A cluster has no matching indices, aliases or data streams for the index +// expression (or your user does not have permissions to search them). For +// example, suppose your index expression is `logs*,remote1:logs*` and the +// remote1 cluster has no indices, aliases or data streams that match `logs*`. +// In that case, that cluster will return no results from that cluster if you +// include it in a cross-cluster search. +// * The index expression (combined with any query parameters you specify) will +// likely cause an exception to be thrown when you do the search. In these +// cases, the "error" field in the `_resolve/cluster` response will be present. +// (This is also where security/permission errors will be shown.) +// * A remote cluster is an older version that does not support the feature you +// want to use in your search. +// +// ## Test availability of remote clusters +// +// The `remote/info` endpoint is commonly used to test whether the "local" +// cluster (the cluster being queried) is connected to its remote clusters, but +// it does not necessarily reflect whether the remote cluster is available or +// not. +// The remote cluster may be available, while the local cluster is not currently +// connected to it. +// +// You can use the `_resolve/cluster` API to attempt to reconnect to remote +// clusters. +// For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. +// The `connected` field in the response will indicate whether it was +// successful. +// If a connection was (re-)established, this will also cause the `remote/info` +// endpoint to now indicate a connected status. package resolvecluster import ( @@ -66,26 +144,102 @@ type ResolveCluster struct { } // NewResolveCluster type alias for index. -type NewResolveCluster func(name string) *ResolveCluster +type NewResolveCluster func() *ResolveCluster // NewResolveClusterFunc returns a new instance of ResolveCluster with the provided transport. // Used in the index of the library this allows to retrieve every apis in once place. func NewResolveClusterFunc(tp elastictransport.Interface) NewResolveCluster { - return func(name string) *ResolveCluster { + return func() *ResolveCluster { n := New(tp) - n._name(name) - return n } } -// Resolves the specified index expressions to return information about each -// cluster, including -// the local cluster, if included. -// Multiple patterns and remote clusters are supported. +// Resolve the cluster. +// +// Resolve the specified index expressions to return information about each +// cluster, including the local "querying" cluster, if included. +// If no index expression is provided, the API will return information about all +// the remote clusters that are configured on the querying cluster. +// +// This endpoint is useful before doing a cross-cluster search in order to +// determine which remote clusters should be included in a search. +// +// You use the same index expression with this endpoint as you would for +// cross-cluster search. +// Index and cluster exclusions are also supported with this endpoint. +// +// For each cluster in the index expression, information is returned about: +// +// * Whether the querying ("local") cluster is currently connected to each +// remote cluster specified in the index expression. Note that this endpoint +// actively attempts to contact the remote clusters, unlike the `remote/info` +// endpoint. +// * Whether each remote cluster is configured with `skip_unavailable` as `true` +// or `false`. +// * Whether there are any indices, aliases, or data streams on that cluster +// that match the index expression. +// * Whether the search is likely to have errors returned when you do the +// cross-cluster search (including any authorization errors if you do not have +// permission to query the index). +// * Cluster version information, including the Elasticsearch server version. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-resolve-cluster-api.html +// For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns +// information about the local cluster and all remotely configured clusters that +// start with the alias `cluster*`. +// Each cluster returns information about whether it has any indices, aliases or +// data streams that match `my-index-*`. +// +// ## Note on backwards compatibility +// The ability to query without an index expression was added in version 8.18, +// so when +// querying remote clusters older than that, the local cluster will send the +// index +// expression `dummy*` to those remote clusters. Thus, if an errors occur, you +// may see a reference +// to that index expression even though you didn't request it. If it causes a +// problem, you can +// instead include an index expression like `*:*` to bypass the issue. +// +// ## Advantages of using this endpoint before a cross-cluster search +// +// You may want to exclude a cluster or index from a search when: +// +// * A remote cluster is not currently connected and is configured with +// `skip_unavailable=false`. Running a cross-cluster search under those +// conditions will cause the entire search to fail. +// * A cluster has no matching indices, aliases or data streams for the index +// expression (or your user does not have permissions to search them). For +// example, suppose your index expression is `logs*,remote1:logs*` and the +// remote1 cluster has no indices, aliases or data streams that match `logs*`. +// In that case, that cluster will return no results from that cluster if you +// include it in a cross-cluster search. +// * The index expression (combined with any query parameters you specify) will +// likely cause an exception to be thrown when you do the search. In these +// cases, the "error" field in the `_resolve/cluster` response will be present. +// (This is also where security/permission errors will be shown.) +// * A remote cluster is an older version that does not support the feature you +// want to use in your search. +// +// ## Test availability of remote clusters +// +// The `remote/info` endpoint is commonly used to test whether the "local" +// cluster (the cluster being queried) is connected to its remote clusters, but +// it does not necessarily reflect whether the remote cluster is available or +// not. +// The remote cluster may be available, while the local cluster is not currently +// connected to it. +// +// You can use the `_resolve/cluster` API to attempt to reconnect to remote +// clusters. +// For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. +// The `connected` field in the response will indicate whether it was +// successful. +// If a connection was (re-)established, this will also cause the `remote/info` +// endpoint to now indicate a connected status. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster func New(tp elastictransport.Interface) *ResolveCluster { r := &ResolveCluster{ transport: tp, @@ -114,6 +268,13 @@ func (r *ResolveCluster) HttpRequest(ctx context.Context) (*http.Request, error) r.path.Scheme = "http" switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_resolve") + path.WriteString("/") + path.WriteString("cluster") + + method = http.MethodGet case r.paramSet == nameMask: path.WriteString("/") path.WriteString("_resolve") @@ -297,12 +458,16 @@ func (r *ResolveCluster) Header(key, value string) *ResolveCluster { return r } -// Name Comma-separated name(s) or index pattern(s) of the indices, aliases, and data -// streams to resolve. +// Name A comma-separated list of names or index patterns for the indices, aliases, +// and data streams to resolve. // Resources on remote clusters can be specified using the ``:`` // syntax. +// Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. +// If no index expression is specified, information about all remote clusters +// configured on the local cluster +// is returned without doing any index matching // API Name: name -func (r *ResolveCluster) _name(name string) *ResolveCluster { +func (r *ResolveCluster) Name(name string) *ResolveCluster { r.paramSet |= nameMask r.name = name @@ -310,11 +475,15 @@ func (r *ResolveCluster) _name(name string) *ResolveCluster { } // AllowNoIndices If false, the request returns an error if any wildcard expression, index -// alias, or _all value targets only missing +// alias, or `_all` value targets only missing // or closed indices. This behavior applies even if the request targets other // open indices. For example, a request -// targeting foo*,bar* returns an error if an index starts with foo but no index -// starts with bar. +// targeting `foo*,bar*` returns an error if an index starts with `foo` but no +// index starts with `bar`. +// NOTE: This option is only supported when specifying an index expression. You +// will get an error if you specify index +// options to the `_resolve/cluster` API endpoint that takes no index +// expression. // API name: allow_no_indices func (r *ResolveCluster) AllowNoIndices(allownoindices bool) *ResolveCluster { r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) @@ -327,6 +496,10 @@ func (r *ResolveCluster) AllowNoIndices(allownoindices bool) *ResolveCluster { // wildcard expressions match hidden data streams. // Supports comma-separated values, such as `open,hidden`. // Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +// NOTE: This option is only supported when specifying an index expression. You +// will get an error if you specify index +// options to the `_resolve/cluster` API endpoint that takes no index +// expression. // API name: expand_wildcards func (r *ResolveCluster) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ResolveCluster { tmp := []string{} @@ -338,8 +511,11 @@ func (r *ResolveCluster) ExpandWildcards(expandwildcards ...expandwildcard.Expan return r } -// IgnoreThrottled If true, concrete, expanded or aliased indices are ignored when frozen. -// Defaults to false. +// IgnoreThrottled If true, concrete, expanded, or aliased indices are ignored when frozen. +// NOTE: This option is only supported when specifying an index expression. You +// will get an error if you specify index +// options to the `_resolve/cluster` API endpoint that takes no index +// expression. // API name: ignore_throttled func (r *ResolveCluster) IgnoreThrottled(ignorethrottled bool) *ResolveCluster { r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) @@ -348,7 +524,11 @@ func (r *ResolveCluster) IgnoreThrottled(ignorethrottled bool) *ResolveCluster { } // IgnoreUnavailable If false, the request returns an error if it targets a missing or closed -// index. Defaults to false. +// index. +// NOTE: This option is only supported when specifying an index expression. You +// will get an error if you specify index +// options to the `_resolve/cluster` API endpoint that takes no index +// expression. // API name: ignore_unavailable func (r *ResolveCluster) IgnoreUnavailable(ignoreunavailable bool) *ResolveCluster { r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) @@ -356,6 +536,23 @@ func (r *ResolveCluster) IgnoreUnavailable(ignoreunavailable bool) *ResolveClust return r } +// Timeout The maximum time to wait for remote clusters to respond. +// If a remote cluster does not respond within this timeout period, the API +// response +// will show the cluster as not connected and include an error message that the +// request timed out. +// +// The default timeout is unset and the query can take +// as long as the networking layer is configured to wait for remote clusters +// that are +// not responding (typically 30 seconds). +// API name: timeout +func (r *ResolveCluster) Timeout(duration string) *ResolveCluster { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/indices/resolvecluster/response.go b/typedapi/indices/resolvecluster/response.go index 0a9caf4ebe..56fe743a7c 100644 --- a/typedapi/indices/resolvecluster/response.go +++ b/typedapi/indices/resolvecluster/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package resolvecluster @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package resolvecluster // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/resolve_cluster/ResolveClusterResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/resolve_cluster/ResolveClusterResponse.ts#L24-L27 type Response map[string]types.ResolveClusterInfo diff --git a/typedapi/indices/resolveindex/resolve_index.go b/typedapi/indices/resolveindex/resolve_index.go index 5ec8fe6d4a..0906a44edf 100644 --- a/typedapi/indices/resolveindex/resolve_index.go +++ b/typedapi/indices/resolveindex/resolve_index.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Resolves the specified name(s) and/or index patterns for indices, aliases, -// and data streams. +// Resolve indices. +// Resolve the names and/or index patterns for indices, aliases, and data +// streams. // Multiple patterns and remote clusters are supported. package resolveindex @@ -79,11 +80,12 @@ func NewResolveIndexFunc(tp elastictransport.Interface) NewResolveIndex { } } -// Resolves the specified name(s) and/or index patterns for indices, aliases, -// and data streams. +// Resolve indices. +// Resolve the names and/or index patterns for indices, aliases, and data +// streams. // Multiple patterns and remote clusters are supported. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-resolve-index-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index func New(tp elastictransport.Interface) *ResolveIndex { r := &ResolveIndex{ transport: tp, diff --git a/typedapi/indices/resolveindex/response.go b/typedapi/indices/resolveindex/response.go index 61016d9f23..08283e9bb9 100644 --- a/typedapi/indices/resolveindex/response.go +++ b/typedapi/indices/resolveindex/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package resolveindex @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package resolveindex // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/resolve_index/ResolveIndexResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/resolve_index/ResolveIndexResponse.ts#L22-L28 type Response struct { Aliases []types.ResolveIndexAliasItem `json:"aliases"` DataStreams []types.ResolveIndexDataStreamsItem `json:"data_streams"` diff --git a/typedapi/indices/rollover/request.go b/typedapi/indices/rollover/request.go index 3bf113d649..85431847f7 100644 --- a/typedapi/indices/rollover/request.go +++ b/typedapi/indices/rollover/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package rollover @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package rollover // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/rollover/IndicesRolloverRequest.ts#L29-L100 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/rollover/IndicesRolloverRequest.ts#L29-L147 type Request struct { // Aliases Aliases for the target index. diff --git a/typedapi/indices/rollover/response.go b/typedapi/indices/rollover/response.go index 3a8e786489..a3c1b862dd 100644 --- a/typedapi/indices/rollover/response.go +++ b/typedapi/indices/rollover/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package rollover // Response holds the response body struct for the package rollover // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/rollover/IndicesRolloverResponse.ts#L22-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/rollover/IndicesRolloverResponse.ts#L22-L32 type Response struct { Acknowledged bool `json:"acknowledged"` Conditions map[string]bool `json:"conditions"` diff --git a/typedapi/indices/rollover/rollover.go b/typedapi/indices/rollover/rollover.go index e493bb4bc9..2dea9c183b 100644 --- a/typedapi/indices/rollover/rollover.go +++ b/typedapi/indices/rollover/rollover.go @@ -16,10 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Roll over to a new index. -// Creates a new index for a data stream or index alias. +// TIP: It is recommended to use the index lifecycle rollover action to automate +// rollovers. +// +// The rollover API creates a new index for a data stream or index alias. +// The API behavior depends on the rollover target. +// +// **Roll over a data stream** +// +// If you roll over a data stream, the API creates a new write index for the +// stream. +// The stream's previous write index becomes a regular backing index. +// A rollover also increments the data stream's generation. +// +// **Roll over an index alias with a write index** +// +// TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a +// write index to manage time series data. +// Data streams replace this functionality, require less maintenance, and +// automatically integrate with data tiers. +// +// If an index alias points to multiple indices, one of the indices must be a +// write index. +// The rollover API creates a new write index for the alias with +// `is_write_index` set to `true`. +// The API also `sets is_write_index` to `false` for the previous write index. +// +// **Roll over an index alias with one index** +// +// If you roll over an index alias that points to only one index, the API +// creates a new index for the alias and removes the original index from the +// alias. +// +// NOTE: A rollover creates a new index and is subject to the +// `wait_for_active_shards` setting. +// +// **Increment index names for an alias** +// +// When you roll over an index alias, you can specify a name for the new index. +// If you don't specify a name and the current index ends with `-` and a number, +// such as `my-index-000001` or `my-index-3`, the new index name increments that +// number. +// For example, if you roll over an alias with a current index of +// `my-index-000001`, the rollover creates a new index named `my-index-000002`. +// This number is always six characters and zero-padded, regardless of the +// previous index's name. +// +// If you use an index alias for time series data, you can use date math in the +// index name to track the rollover date. +// For example, you can create an alias that points to an index named +// ``. +// If you create the index on May 6, 2099, the index's name is +// `my-index-2099.05.06-000001`. +// If you roll over the alias on May 7, 2099, the new index's name is +// `my-index-2099.05.07-000002`. package rollover import ( @@ -86,9 +139,62 @@ func NewRolloverFunc(tp elastictransport.Interface) NewRollover { } // Roll over to a new index. -// Creates a new index for a data stream or index alias. +// TIP: It is recommended to use the index lifecycle rollover action to automate +// rollovers. +// +// The rollover API creates a new index for a data stream or index alias. +// The API behavior depends on the rollover target. +// +// **Roll over a data stream** +// +// If you roll over a data stream, the API creates a new write index for the +// stream. +// The stream's previous write index becomes a regular backing index. +// A rollover also increments the data stream's generation. +// +// **Roll over an index alias with a write index** +// +// TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a +// write index to manage time series data. +// Data streams replace this functionality, require less maintenance, and +// automatically integrate with data tiers. +// +// If an index alias points to multiple indices, one of the indices must be a +// write index. +// The rollover API creates a new write index for the alias with +// `is_write_index` set to `true`. +// The API also `sets is_write_index` to `false` for the previous write index. +// +// **Roll over an index alias with one index** +// +// If you roll over an index alias that points to only one index, the API +// creates a new index for the alias and removes the original index from the +// alias. +// +// NOTE: A rollover creates a new index and is subject to the +// `wait_for_active_shards` setting. +// +// **Increment index names for an alias** +// +// When you roll over an index alias, you can specify a name for the new index. +// If you don't specify a name and the current index ends with `-` and a number, +// such as `my-index-000001` or `my-index-3`, the new index name increments that +// number. +// For example, if you roll over an alias with a current index of +// `my-index-000001`, the rollover creates a new index named `my-index-000002`. +// This number is always six characters and zero-padded, regardless of the +// previous index's name. +// +// If you use an index alias for time series data, you can use date math in the +// index name to track the rollover date. +// For example, you can create an alias that points to an index named +// ``. +// If you create the index on May 6, 2099, the index's name is +// `my-index-2099.05.06-000001`. +// If you roll over the alias on May 7, 2099, the new index's name is +// `my-index-2099.05.07-000002`. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-rollover-index.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover func New(tp elastictransport.Interface) *Rollover { r := &Rollover{ transport: tp, @@ -96,8 +202,6 @@ func New(tp elastictransport.Interface) *Rollover { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -430,17 +534,38 @@ func (r *Rollover) Pretty(pretty bool) *Rollover { return r } -// Aliases Aliases for the target index. +// Aliases for the target index. // Data streams do not support this parameter. // API name: aliases func (r *Rollover) Aliases(aliases map[string]types.Alias) *Rollover { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aliases = aliases + return r +} + +func (r *Rollover) AddAlias(key string, value types.AliasVariant) *Rollover { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + r.req.Aliases = tmp return r } -// Conditions Conditions for the rollover. +// Conditions for the rollover. // If specified, Elasticsearch only performs the rollover if the current index // satisfies these conditions. // If this parameter is not specified, Elasticsearch performs the rollover @@ -450,30 +575,59 @@ func (r *Rollover) Aliases(aliases map[string]types.Alias) *Rollover { // The index will rollover if any `max_*` condition is satisfied and all `min_*` // conditions are satisfied. // API name: conditions -func (r *Rollover) Conditions(conditions *types.RolloverConditions) *Rollover { +func (r *Rollover) Conditions(conditions types.RolloverConditionsVariant) *Rollover { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Conditions = conditions + r.req.Conditions = conditions.RolloverConditionsCaster() return r } -// Mappings Mapping for fields in the index. +// Mapping for fields in the index. // If specified, this mapping can include field names, field data types, and // mapping paramaters. // API name: mappings -func (r *Rollover) Mappings(mappings *types.TypeMapping) *Rollover { +func (r *Rollover) Mappings(mappings types.TypeMappingVariant) *Rollover { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Mappings = mappings + r.req.Mappings = mappings.TypeMappingCaster() return r } -// Settings Configuration options for the index. +// Configuration options for the index. // Data streams do not support this parameter. // API name: settings func (r *Rollover) Settings(settings map[string]json.RawMessage) *Rollover { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Settings = settings + return r +} + +func (r *Rollover) AddSetting(key string, value json.RawMessage) *Rollover { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Settings == nil { + r.req.Settings = make(map[string]json.RawMessage) + } else { + tmp = r.req.Settings + } + + tmp[key] = value + r.req.Settings = tmp return r } diff --git a/typedapi/indices/segments/response.go b/typedapi/indices/segments/response.go index 8096058d7d..3375efd111 100644 --- a/typedapi/indices/segments/response.go +++ b/typedapi/indices/segments/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package segments @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package segments // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/segments/IndicesSegmentsResponse.ts#L24-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/segments/IndicesSegmentsResponse.ts#L24-L29 type Response struct { Indices map[string]types.IndexSegment `json:"indices"` Shards_ types.ShardStatistics `json:"_shards"` diff --git a/typedapi/indices/segments/segments.go b/typedapi/indices/segments/segments.go index f7749abb2f..0eb0dd6278 100644 --- a/typedapi/indices/segments/segments.go +++ b/typedapi/indices/segments/segments.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns low-level information about the Lucene segments in index shards. -// For data streams, the API returns information about the stream’s backing +// Get index segments. +// Get low-level information about the Lucene segments in index shards. +// For data streams, the API returns information about the stream's backing // indices. package segments @@ -77,11 +78,12 @@ func NewSegmentsFunc(tp elastictransport.Interface) NewSegments { } } -// Returns low-level information about the Lucene segments in index shards. -// For data streams, the API returns information about the stream’s backing +// Get index segments. +// Get low-level information about the Lucene segments in index shards. +// For data streams, the API returns information about the stream's backing // indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-segments.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments func New(tp elastictransport.Interface) *Segments { r := &Segments{ transport: tp, diff --git a/typedapi/indices/shardstores/response.go b/typedapi/indices/shardstores/response.go index 5dba49ed31..4d7ed6e5d9 100644 --- a/typedapi/indices/shardstores/response.go +++ b/typedapi/indices/shardstores/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package shardstores @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package shardstores // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/shard_stores/IndicesShardStoresResponse.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/shard_stores/IndicesShardStoresResponse.ts#L24-L26 type Response struct { Indices map[string]types.IndicesShardStores `json:"indices"` } diff --git a/typedapi/indices/shardstores/shard_stores.go b/typedapi/indices/shardstores/shard_stores.go index 5781ea2f1e..573b172db3 100644 --- a/typedapi/indices/shardstores/shard_stores.go +++ b/typedapi/indices/shardstores/shard_stores.go @@ -16,11 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves store information about replica shards in one or more indices. -// For data streams, the API retrieves store information for the stream’s +// Get index shard stores. +// Get store information about replica shards in one or more indices. +// For data streams, the API retrieves store information for the stream's // backing indices. +// +// The index shard stores API returns the following information: +// +// * The node on which each replica shard exists. +// * The allocation ID for each replica shard. +// * A unique ID for each replica shard. +// * Any errors encountered while opening the shard index or from an earlier +// failure. +// +// By default, the API returns store information only for primary shards that +// are unassigned or have one or more unassigned replica shards. package shardstores import ( @@ -78,11 +90,23 @@ func NewShardStoresFunc(tp elastictransport.Interface) NewShardStores { } } -// Retrieves store information about replica shards in one or more indices. -// For data streams, the API retrieves store information for the stream’s +// Get index shard stores. +// Get store information about replica shards in one or more indices. +// For data streams, the API retrieves store information for the stream's // backing indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html +// The index shard stores API returns the following information: +// +// * The node on which each replica shard exists. +// * The allocation ID for each replica shard. +// * A unique ID for each replica shard. +// * Any errors encountered while opening the shard index or from an earlier +// failure. +// +// By default, the API returns store information only for primary shards that +// are unassigned or have one or more unassigned replica shards. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores func New(tp elastictransport.Interface) *ShardStores { r := &ShardStores{ transport: tp, diff --git a/typedapi/indices/shrink/request.go b/typedapi/indices/shrink/request.go index 7e1ded8b4f..bf27a7d848 100644 --- a/typedapi/indices/shrink/request.go +++ b/typedapi/indices/shrink/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package shrink @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package shrink // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/shrink/IndicesShrinkRequest.ts#L27-L75 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/shrink/IndicesShrinkRequest.ts#L27-L113 type Request struct { // Aliases The key is the alias name. diff --git a/typedapi/indices/shrink/response.go b/typedapi/indices/shrink/response.go index 8b5482183b..2e661bef04 100644 --- a/typedapi/indices/shrink/response.go +++ b/typedapi/indices/shrink/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package shrink // Response holds the response body struct for the package shrink // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/shrink/IndicesShrinkResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/shrink/IndicesShrinkResponse.ts#L22-L28 type Response struct { Acknowledged bool `json:"acknowledged"` Index string `json:"index"` diff --git a/typedapi/indices/shrink/shrink.go b/typedapi/indices/shrink/shrink.go index 137ae99c1f..928b6f0ac4 100644 --- a/typedapi/indices/shrink/shrink.go +++ b/typedapi/indices/shrink/shrink.go @@ -16,9 +16,64 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Shrinks an existing index into a new index with fewer primary shards. +// Shrink an index. +// Shrink an index into a new index with fewer primary shards. +// +// Before you can shrink an index: +// +// * The index must be read-only. +// * A copy of every shard in the index must reside on the same node. +// * The index must have a green health status. +// +// To make shard allocation easier, we recommend you also remove the index's +// replica shards. +// You can later re-add replica shards as part of the shrink operation. +// +// The requested number of primary shards in the target index must be a factor +// of the number of shards in the source index. +// For example an index with 8 primary shards can be shrunk into 4, 2 or 1 +// primary shards or an index with 15 primary shards can be shrunk into 5, 3 or +// 1. +// If the number of shards in the index is a prime number it can only be shrunk +// into a single primary shard +// +// Before shrinking, a (primary or replica) copy of every shard in the index +// +// must be present on the same node. +// +// The current write index on a data stream cannot be shrunk. In order to shrink +// the current write index, the data stream must first be rolled over so that a +// new write index is created and then the previous write index can be shrunk. +// +// A shrink operation: +// +// * Creates a new target index with the same definition as the source index, +// but with a smaller number of primary shards. +// * Hard-links segments from the source index into the target index. If the +// file system does not support hard-linking, then all segments are copied into +// the new index, which is a much more time consuming process. Also if using +// multiple data paths, shards on different data paths require a full copy of +// segment files if they are not on the same disk since hardlinks do not work +// across disks. +// * Recovers the target index as though it were a closed index which had just +// been re-opened. Recovers shards to the +// `.routing.allocation.initial_recovery._id` index setting. +// +// IMPORTANT: Indices can only be shrunk if they satisfy the following +// requirements: +// +// * The target index must not exist. +// * The source index must have more primary shards than the target index. +// * The number of primary shards in the target index must be a factor of the +// number of primary shards in the source index. The source index must have more +// primary shards than the target index. +// * The index must not contain more than 2,147,483,519 documents in total +// across all shards that will be shrunk into a single shard on the target index +// as this is the maximum number of docs that can fit into a single shard. +// * The node handling the shrink process must have sufficient free disk space +// to accommodate a second copy of the existing index. package shrink import ( @@ -86,9 +141,64 @@ func NewShrinkFunc(tp elastictransport.Interface) NewShrink { } } -// Shrinks an existing index into a new index with fewer primary shards. +// Shrink an index. +// Shrink an index into a new index with fewer primary shards. +// +// Before you can shrink an index: +// +// * The index must be read-only. +// * A copy of every shard in the index must reside on the same node. +// * The index must have a green health status. +// +// To make shard allocation easier, we recommend you also remove the index's +// replica shards. +// You can later re-add replica shards as part of the shrink operation. +// +// The requested number of primary shards in the target index must be a factor +// of the number of shards in the source index. +// For example an index with 8 primary shards can be shrunk into 4, 2 or 1 +// primary shards or an index with 15 primary shards can be shrunk into 5, 3 or +// 1. +// If the number of shards in the index is a prime number it can only be shrunk +// into a single primary shard +// +// Before shrinking, a (primary or replica) copy of every shard in the index +// +// must be present on the same node. +// +// The current write index on a data stream cannot be shrunk. In order to shrink +// the current write index, the data stream must first be rolled over so that a +// new write index is created and then the previous write index can be shrunk. +// +// A shrink operation: +// +// * Creates a new target index with the same definition as the source index, +// but with a smaller number of primary shards. +// * Hard-links segments from the source index into the target index. If the +// file system does not support hard-linking, then all segments are copied into +// the new index, which is a much more time consuming process. Also if using +// multiple data paths, shards on different data paths require a full copy of +// segment files if they are not on the same disk since hardlinks do not work +// across disks. +// * Recovers the target index as though it were a closed index which had just +// been re-opened. Recovers shards to the +// `.routing.allocation.initial_recovery._id` index setting. +// +// IMPORTANT: Indices can only be shrunk if they satisfy the following +// requirements: +// +// * The target index must not exist. +// * The source index must have more primary shards than the target index. +// * The number of primary shards in the target index must be a factor of the +// number of primary shards in the source index. The source index must have more +// primary shards than the target index. +// * The index must not contain more than 2,147,483,519 documents in total +// across all shards that will be shrunk into a single shard on the target index +// as this is the maximum number of docs that can fit into a single shard. +// * The node handling the shrink process must have sufficient free disk space +// to accommodate a second copy of the existing index. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink func New(tp elastictransport.Interface) *Shrink { r := &Shrink{ transport: tp, @@ -96,8 +206,6 @@ func New(tp elastictransport.Interface) *Shrink { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -408,21 +516,63 @@ func (r *Shrink) Pretty(pretty bool) *Shrink { return r } -// Aliases The key is the alias name. +// The key is the alias name. // Index alias names support date math. // API name: aliases func (r *Shrink) Aliases(aliases map[string]types.Alias) *Shrink { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aliases = aliases + return r +} + +func (r *Shrink) AddAlias(key string, value types.AliasVariant) *Shrink { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + r.req.Aliases = tmp return r } -// Settings Configuration options for the target index. +// Configuration options for the target index. // API name: settings func (r *Shrink) Settings(settings map[string]json.RawMessage) *Shrink { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Settings = settings + return r +} + +func (r *Shrink) AddSetting(key string, value json.RawMessage) *Shrink { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Settings == nil { + r.req.Settings = make(map[string]json.RawMessage) + } else { + tmp = r.req.Settings + } + + tmp[key] = value + r.req.Settings = tmp return r } diff --git a/typedapi/indices/simulateindextemplate/response.go b/typedapi/indices/simulateindextemplate/response.go index 9aa3aa3106..1d340751a7 100644 --- a/typedapi/indices/simulateindextemplate/response.go +++ b/typedapi/indices/simulateindextemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package simulateindextemplate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package simulateindextemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateResponse.ts#L25-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateResponse.ts#L25-L30 type Response struct { Overlapping []types.Overlapping `json:"overlapping,omitempty"` Template types.Template `json:"template"` diff --git a/typedapi/indices/simulateindextemplate/simulate_index_template.go b/typedapi/indices/simulateindextemplate/simulate_index_template.go index 6d7cc05088..4be2df91f3 100644 --- a/typedapi/indices/simulateindextemplate/simulate_index_template.go +++ b/typedapi/indices/simulateindextemplate/simulate_index_template.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Simulate an index. -// Returns the index configuration that would be applied to the specified index -// from an existing index template. +// Get the index configuration that would be applied to the specified index from +// an existing index template. package simulateindextemplate import ( @@ -79,10 +79,10 @@ func NewSimulateIndexTemplateFunc(tp elastictransport.Interface) NewSimulateInde } // Simulate an index. -// Returns the index configuration that would be applied to the specified index -// from an existing index template. +// Get the index configuration that would be applied to the specified index from +// an existing index template. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-simulate-index.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template func New(tp elastictransport.Interface) *SimulateIndexTemplate { r := &SimulateIndexTemplate{ transport: tp, diff --git a/typedapi/indices/simulatetemplate/request.go b/typedapi/indices/simulatetemplate/request.go index 427cd976a2..4aca66f40c 100644 --- a/typedapi/indices/simulatetemplate/request.go +++ b/typedapi/indices/simulatetemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package simulatetemplate @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package simulatetemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/simulate_template/IndicesSimulateTemplateRequest.ts#L27-L120 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/simulate_template/IndicesSimulateTemplateRequest.ts#L27-L131 type Request struct { // AllowAutoCreate This setting overrides the value of the `action.auto_create_index` cluster diff --git a/typedapi/indices/simulatetemplate/response.go b/typedapi/indices/simulatetemplate/response.go index e31a71fa61..6e35ca6408 100644 --- a/typedapi/indices/simulatetemplate/response.go +++ b/typedapi/indices/simulatetemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package simulatetemplate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package simulatetemplate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L26-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L26-L31 type Response struct { Overlapping []types.Overlapping `json:"overlapping,omitempty"` Template types.Template `json:"template"` diff --git a/typedapi/indices/simulatetemplate/simulate_template.go b/typedapi/indices/simulatetemplate/simulate_template.go index 8313b2d713..45edded5d6 100644 --- a/typedapi/indices/simulatetemplate/simulate_template.go +++ b/typedapi/indices/simulatetemplate/simulate_template.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Simulate an index template. -// Returns the index configuration that would be applied by a particular index +// Get the index configuration that would be applied by a particular index // template. package simulatetemplate @@ -82,10 +82,10 @@ func NewSimulateTemplateFunc(tp elastictransport.Interface) NewSimulateTemplate } // Simulate an index template. -// Returns the index configuration that would be applied by a particular index +// Get the index configuration that would be applied by a particular index // template. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-simulate-template.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template func New(tp elastictransport.Interface) *SimulateTemplate { r := &SimulateTemplate{ transport: tp, @@ -93,8 +93,6 @@ func New(tp elastictransport.Interface) *SimulateTemplate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -398,7 +396,7 @@ func (r *SimulateTemplate) Pretty(pretty bool) *SimulateTemplate { return r } -// AllowAutoCreate This setting overrides the value of the `action.auto_create_index` cluster +// This setting overrides the value of the `action.auto_create_index` cluster // setting. // If set to `true` in a template, then indices can be automatically created // using that template even if auto-creation of indices is disabled via @@ -407,74 +405,112 @@ func (r *SimulateTemplate) Pretty(pretty bool) *SimulateTemplate { // always be explicitly created, and may never be automatically created. // API name: allow_auto_create func (r *SimulateTemplate) AllowAutoCreate(allowautocreate bool) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowAutoCreate = &allowautocreate return r } -// ComposedOf An ordered list of component template names. +// An ordered list of component template names. // Component templates are merged in the order specified, meaning that the last // component template specified has the highest precedence. // API name: composed_of func (r *SimulateTemplate) ComposedOf(composedofs ...string) *SimulateTemplate { - r.req.ComposedOf = composedofs + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range composedofs { + + r.req.ComposedOf = append(r.req.ComposedOf, v) + } return r } -// DataStream If this object is included, the template is used to create data streams and +// If this object is included, the template is used to create data streams and // their backing indices. // Supports an empty object. // Data streams require a matching index template with a `data_stream` object. // API name: data_stream -func (r *SimulateTemplate) DataStream(datastream *types.DataStreamVisibility) *SimulateTemplate { +func (r *SimulateTemplate) DataStream(datastream types.DataStreamVisibilityVariant) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.DataStream = datastream + r.req.DataStream = datastream.DataStreamVisibilityCaster() return r } -// Deprecated Marks this index template as deprecated. When creating or updating a +// Marks this index template as deprecated. When creating or updating a // non-deprecated index template // that uses deprecated components, Elasticsearch will emit a deprecation // warning. // API name: deprecated func (r *SimulateTemplate) Deprecated(deprecated bool) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Deprecated = &deprecated return r } -// IgnoreMissingComponentTemplates The configuration option ignore_missing_component_templates can be used when +// The configuration option ignore_missing_component_templates can be used when // an index template // references a component template that might not exist // API name: ignore_missing_component_templates func (r *SimulateTemplate) IgnoreMissingComponentTemplates(ignoremissingcomponenttemplates ...string) *SimulateTemplate { - r.req.IgnoreMissingComponentTemplates = ignoremissingcomponenttemplates + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ignoremissingcomponenttemplates { + r.req.IgnoreMissingComponentTemplates = append(r.req.IgnoreMissingComponentTemplates, v) + + } return r } -// IndexPatterns Array of wildcard (`*`) expressions used to match the names of data streams +// Array of wildcard (`*`) expressions used to match the names of data streams // and indices during creation. // API name: index_patterns func (r *SimulateTemplate) IndexPatterns(indices ...string) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexPatterns = indices return r } -// Meta_ Optional user metadata about the index template. +// Optional user metadata about the index template. // May have any contents. // This map is not automatically generated by Elasticsearch. // API name: _meta -func (r *SimulateTemplate) Meta_(metadata types.Metadata) *SimulateTemplate { - r.req.Meta_ = metadata +func (r *SimulateTemplate) Meta_(metadata types.MetadataVariant) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() return r } -// Priority Priority to determine index template precedence when a new data stream or +// Priority to determine index template precedence when a new data stream or // index is created. // The index template with the highest priority is chosen. // If no priority is specified the template is treated as though it is of @@ -482,27 +518,40 @@ func (r *SimulateTemplate) Meta_(metadata types.Metadata) *SimulateTemplate { // This number is not automatically generated by Elasticsearch. // API name: priority func (r *SimulateTemplate) Priority(priority int64) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Priority = &priority return r } -// Template Template to be applied. +// Template to be applied. // It may optionally include an `aliases`, `mappings`, or `settings` // configuration. // API name: template -func (r *SimulateTemplate) Template(template *types.IndexTemplateMapping) *SimulateTemplate { +func (r *SimulateTemplate) Template(template types.IndexTemplateMappingVariant) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Template = template + r.req.Template = template.IndexTemplateMappingCaster() return r } -// Version Version number used to manage index templates externally. +// Version number used to manage index templates externally. // This number is not automatically generated by Elasticsearch. // API name: version func (r *SimulateTemplate) Version(versionnumber int64) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &versionnumber return r diff --git a/typedapi/indices/split/request.go b/typedapi/indices/split/request.go index 00e88b66fb..b6c463dd5c 100644 --- a/typedapi/indices/split/request.go +++ b/typedapi/indices/split/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package split @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package split // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/split/IndicesSplitRequest.ts#L27-L74 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/split/IndicesSplitRequest.ts#L27-L113 type Request struct { // Aliases Aliases for the resulting index. diff --git a/typedapi/indices/split/response.go b/typedapi/indices/split/response.go index e42bba3c2b..1078e377f4 100644 --- a/typedapi/indices/split/response.go +++ b/typedapi/indices/split/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package split // Response holds the response body struct for the package split // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/split/IndicesSplitResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/split/IndicesSplitResponse.ts#L22-L28 type Response struct { Acknowledged bool `json:"acknowledged"` Index string `json:"index"` diff --git a/typedapi/indices/split/split.go b/typedapi/indices/split/split.go index f637470376..040f354221 100644 --- a/typedapi/indices/split/split.go +++ b/typedapi/indices/split/split.go @@ -16,9 +16,56 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Splits an existing index into a new index with more primary shards. +// Split an index. +// Split an index into a new index with more primary shards. +// * Before you can split an index: +// +// * The index must be read-only. +// * The cluster health status must be green. +// +// You can do make an index read-only with the following request using the add +// index block API: +// +// ``` +// PUT /my_source_index/_block/write +// ``` +// +// The current write index on a data stream cannot be split. +// In order to split the current write index, the data stream must first be +// rolled over so that a new write index is created and then the previous write +// index can be split. +// +// The number of times the index can be split (and the number of shards that +// each original shard can be split into) is determined by the +// `index.number_of_routing_shards` setting. +// The number of routing shards specifies the hashing space that is used +// internally to distribute documents across shards with consistent hashing. +// For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x +// 2 x 3) could be split by a factor of 2 or 3. +// +// A split operation: +// +// * Creates a new target index with the same definition as the source index, +// but with a larger number of primary shards. +// * Hard-links segments from the source index into the target index. If the +// file system doesn't support hard-linking, all segments are copied into the +// new index, which is a much more time consuming process. +// * Hashes all documents again, after low level files are created, to delete +// documents that belong to a different shard. +// * Recovers the target index as though it were a closed index which had just +// been re-opened. +// +// IMPORTANT: Indices can only be split if they satisfy the following +// requirements: +// +// * The target index must not exist. +// * The source index must have fewer primary shards than the target index. +// * The number of primary shards in the target index must be a multiple of the +// number of primary shards in the source index. +// * The node handling the split process must have sufficient free disk space to +// accommodate a second copy of the existing index. package split import ( @@ -86,9 +133,56 @@ func NewSplitFunc(tp elastictransport.Interface) NewSplit { } } -// Splits an existing index into a new index with more primary shards. +// Split an index. +// Split an index into a new index with more primary shards. +// * Before you can split an index: +// +// * The index must be read-only. +// * The cluster health status must be green. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-split-index.html +// You can do make an index read-only with the following request using the add +// index block API: +// +// ``` +// PUT /my_source_index/_block/write +// ``` +// +// The current write index on a data stream cannot be split. +// In order to split the current write index, the data stream must first be +// rolled over so that a new write index is created and then the previous write +// index can be split. +// +// The number of times the index can be split (and the number of shards that +// each original shard can be split into) is determined by the +// `index.number_of_routing_shards` setting. +// The number of routing shards specifies the hashing space that is used +// internally to distribute documents across shards with consistent hashing. +// For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x +// 2 x 3) could be split by a factor of 2 or 3. +// +// A split operation: +// +// * Creates a new target index with the same definition as the source index, +// but with a larger number of primary shards. +// * Hard-links segments from the source index into the target index. If the +// file system doesn't support hard-linking, all segments are copied into the +// new index, which is a much more time consuming process. +// * Hashes all documents again, after low level files are created, to delete +// documents that belong to a different shard. +// * Recovers the target index as though it were a closed index which had just +// been re-opened. +// +// IMPORTANT: Indices can only be split if they satisfy the following +// requirements: +// +// * The target index must not exist. +// * The source index must have fewer primary shards than the target index. +// * The number of primary shards in the target index must be a multiple of the +// number of primary shards in the source index. +// * The node handling the split process must have sufficient free disk space to +// accommodate a second copy of the existing index. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split func New(tp elastictransport.Interface) *Split { r := &Split{ transport: tp, @@ -96,8 +190,6 @@ func New(tp elastictransport.Interface) *Split { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -408,20 +500,62 @@ func (r *Split) Pretty(pretty bool) *Split { return r } -// Aliases Aliases for the resulting index. +// Aliases for the resulting index. // API name: aliases func (r *Split) Aliases(aliases map[string]types.Alias) *Split { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aliases = aliases + return r +} + +func (r *Split) AddAlias(key string, value types.AliasVariant) *Split { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + + r.req.Aliases = tmp return r } -// Settings Configuration options for the target index. +// Configuration options for the target index. // API name: settings func (r *Split) Settings(settings map[string]json.RawMessage) *Split { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Settings = settings + return r +} + +func (r *Split) AddSetting(key string, value json.RawMessage) *Split { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Settings == nil { + r.req.Settings = make(map[string]json.RawMessage) + } else { + tmp = r.req.Settings + } + + tmp[key] = value + r.req.Settings = tmp return r } diff --git a/typedapi/indices/stats/response.go b/typedapi/indices/stats/response.go index 9495c9a97b..e75d4c9c25 100644 --- a/typedapi/indices/stats/response.go +++ b/typedapi/indices/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package stats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/stats/IndicesStatsResponse.ts#L24-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/stats/IndicesStatsResponse.ts#L24-L30 type Response struct { All_ types.IndicesStats `json:"_all"` Indices map[string]types.IndicesStats `json:"indices,omitempty"` diff --git a/typedapi/indices/stats/stats.go b/typedapi/indices/stats/stats.go index 2c0095660a..0bca535fe9 100644 --- a/typedapi/indices/stats/stats.go +++ b/typedapi/indices/stats/stats.go @@ -16,11 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns statistics for one or more indices. -// For data streams, the API retrieves statistics for the stream’s backing +// Get index statistics. +// For data streams, the API retrieves statistics for the stream's backing // indices. +// +// By default, the returned statistics are index-level with `primaries` and +// `total` aggregations. +// `primaries` are the values for only the primary shards. +// `total` are the accumulated values for both primary and replica shards. +// +// To get shard-level statistics, set the `level` parameter to `shards`. +// +// NOTE: When moving to another node, the shard-level statistics for a shard are +// cleared. +// Although the shard is no longer part of the node, that node retains any +// node-level statistics to which the shard contributed. package stats import ( @@ -81,11 +93,23 @@ func NewStatsFunc(tp elastictransport.Interface) NewStats { } } -// Returns statistics for one or more indices. -// For data streams, the API retrieves statistics for the stream’s backing +// Get index statistics. +// For data streams, the API retrieves statistics for the stream's backing // indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html +// By default, the returned statistics are index-level with `primaries` and +// `total` aggregations. +// `primaries` are the values for only the primary shards. +// `total` are the accumulated values for both primary and replica shards. +// +// To get shard-level statistics, set the `level` parameter to `shards`. +// +// NOTE: When moving to another node, the shard-level statistics for a shard are +// cleared. +// Although the shard is no longer part of the node, that node retains any +// node-level statistics to which the shard contributed. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats func New(tp elastictransport.Interface) *Stats { r := &Stats{ transport: tp, diff --git a/typedapi/indices/updatealiases/request.go b/typedapi/indices/updatealiases/request.go index 3bc4fdc438..b043f2b3b9 100644 --- a/typedapi/indices/updatealiases/request.go +++ b/typedapi/indices/updatealiases/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatealiases @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package updatealiases // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/update_aliases/IndicesUpdateAliasesRequest.ts#L24-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/update_aliases/IndicesUpdateAliasesRequest.ts#L24-L59 type Request struct { // Actions Actions to perform. diff --git a/typedapi/indices/updatealiases/response.go b/typedapi/indices/updatealiases/response.go index cb10533715..f1d6553ee2 100644 --- a/typedapi/indices/updatealiases/response.go +++ b/typedapi/indices/updatealiases/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatealiases // Response holds the response body struct for the package updatealiases // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/update_aliases/IndicesUpdateAliasesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/update_aliases/IndicesUpdateAliasesResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/updatealiases/update_aliases.go b/typedapi/indices/updatealiases/update_aliases.go index c91cec0eb1..018adc2d8e 100644 --- a/typedapi/indices/updatealiases/update_aliases.go +++ b/typedapi/indices/updatealiases/update_aliases.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create or update an alias. // Adds a data stream or index to an alias. @@ -77,7 +77,7 @@ func NewUpdateAliasesFunc(tp elastictransport.Interface) NewUpdateAliases { // Create or update an alias. // Adds a data stream or index to an alias. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases func New(tp elastictransport.Interface) *UpdateAliases { r := &UpdateAliases{ transport: tp, @@ -85,8 +85,6 @@ func New(tp elastictransport.Interface) *UpdateAliases { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -356,10 +354,17 @@ func (r *UpdateAliases) Pretty(pretty bool) *UpdateAliases { return r } -// Actions Actions to perform. +// Actions to perform. // API name: actions -func (r *UpdateAliases) Actions(actions ...types.IndicesAction) *UpdateAliases { - r.req.Actions = actions +func (r *UpdateAliases) Actions(actions ...types.IndicesActionVariant) *UpdateAliases { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range actions { + r.req.Actions = append(r.req.Actions, *v.IndicesActionCaster()) + + } return r } diff --git a/typedapi/indices/validatequery/request.go b/typedapi/indices/validatequery/request.go index cf36ae8a12..ff0c6c63f5 100644 --- a/typedapi/indices/validatequery/request.go +++ b/typedapi/indices/validatequery/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package validatequery @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package validatequery // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/validate_query/IndicesValidateQueryRequest.ts#L25-L112 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/validate_query/IndicesValidateQueryRequest.ts#L25-L123 type Request struct { // Query Query in the Lucene query string syntax. diff --git a/typedapi/indices/validatequery/response.go b/typedapi/indices/validatequery/response.go index e71047ebb4..8b34c209aa 100644 --- a/typedapi/indices/validatequery/response.go +++ b/typedapi/indices/validatequery/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package validatequery @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package validatequery // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/validate_query/IndicesValidateQueryResponse.ts#L23-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/validate_query/IndicesValidateQueryResponse.ts#L23-L30 type Response struct { Error *string `json:"error,omitempty"` Explanations []types.IndicesValidationExplanation `json:"explanations,omitempty"` diff --git a/typedapi/indices/validatequery/validate_query.go b/typedapi/indices/validatequery/validate_query.go index 323924576a..0bb412de74 100644 --- a/typedapi/indices/validatequery/validate_query.go +++ b/typedapi/indices/validatequery/validate_query.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Validate a query. // Validates a query without running it. @@ -85,7 +85,7 @@ func NewValidateQueryFunc(tp elastictransport.Interface) NewValidateQuery { // Validate a query. // Validates a query without running it. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-validate.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-validate-query func New(tp elastictransport.Interface) *ValidateQuery { r := &ValidateQuery{ transport: tp, @@ -93,8 +93,6 @@ func New(tp elastictransport.Interface) *ValidateQuery { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -486,11 +484,15 @@ func (r *ValidateQuery) Pretty(pretty bool) *ValidateQuery { return r } -// Query Query in the Lucene query string syntax. +// Query in the Lucene query string syntax. // API name: query -func (r *ValidateQuery) Query(query *types.Query) *ValidateQuery { +func (r *ValidateQuery) Query(query types.QueryVariant) *ValidateQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } diff --git a/typedapi/inference/chatcompletionunified/chat_completion_unified.go b/typedapi/inference/chatcompletionunified/chat_completion_unified.go new file mode 100644 index 0000000000..9a76d4c4ef --- /dev/null +++ b/typedapi/inference/chatcompletionunified/chat_completion_unified.go @@ -0,0 +1,480 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Perform chat completion inference +package chatcompletionunified + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ChatCompletionUnified struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewChatCompletionUnified type alias for index. +type NewChatCompletionUnified func(inferenceid string) *ChatCompletionUnified + +// NewChatCompletionUnifiedFunc returns a new instance of ChatCompletionUnified with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewChatCompletionUnifiedFunc(tp elastictransport.Interface) NewChatCompletionUnified { + return func(inferenceid string) *ChatCompletionUnified { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform chat completion inference +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference +func New(tp elastictransport.Interface) *ChatCompletionUnified { + r := &ChatCompletionUnified{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ChatCompletionUnified) Raw(raw io.Reader) *ChatCompletionUnified { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ChatCompletionUnified) Request(req *Request) *ChatCompletionUnified { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ChatCompletionUnified) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ChatCompletionUnified: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("chat_completion") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + path.WriteString("/") + path.WriteString("_stream") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "text/event-stream") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ChatCompletionUnified) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.chat_completion_unified") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.chat_completion_unified") + if reader := instrument.RecordRequestBody(ctx, "inference.chat_completion_unified", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.chat_completion_unified") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ChatCompletionUnified query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a chatcompletionunified.Response +func (r ChatCompletionUnified) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.chat_completion_unified") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ChatCompletionUnified headers map. +func (r *ChatCompletionUnified) Header(key, value string) *ChatCompletionUnified { + r.headers.Set(key, value) + + return r +} + +// InferenceId The inference Id +// API Name: inferenceid +func (r *ChatCompletionUnified) _inferenceid(inferenceid string) *ChatCompletionUnified { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference request to complete. +// API name: timeout +func (r *ChatCompletionUnified) Timeout(duration string) *ChatCompletionUnified { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ChatCompletionUnified) ErrorTrace(errortrace bool) *ChatCompletionUnified { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ChatCompletionUnified) FilterPath(filterpaths ...string) *ChatCompletionUnified { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ChatCompletionUnified) Human(human bool) *ChatCompletionUnified { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ChatCompletionUnified) Pretty(pretty bool) *ChatCompletionUnified { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The upper bound limit for the number of tokens that can be generated for a +// completion request. +// API name: max_completion_tokens +func (r *ChatCompletionUnified) MaxCompletionTokens(maxcompletiontokens int64) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxCompletionTokens = &maxcompletiontokens + + return r +} + +// A list of objects representing the conversation. +// API name: messages +func (r *ChatCompletionUnified) Messages(messages ...types.MessageVariant) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range messages { + + r.req.Messages = append(r.req.Messages, *v.MessageCaster()) + + } + return r +} + +// The ID of the model to use. +// API name: model +func (r *ChatCompletionUnified) Model(model string) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Model = &model + + return r +} + +// A sequence of strings to control when the model should stop generating +// additional tokens. +// API name: stop +func (r *ChatCompletionUnified) Stop(stops ...string) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range stops { + + r.req.Stop = append(r.req.Stop, v) + + } + return r +} + +// The sampling temperature to use. +// API name: temperature +func (r *ChatCompletionUnified) Temperature(temperature float32) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Temperature = &temperature + + return r +} + +// Controls which tool is called by the model. +// API name: tool_choice +func (r *ChatCompletionUnified) ToolChoice(completiontooltype types.CompletionToolTypeVariant) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ToolChoice = *completiontooltype.CompletionToolTypeCaster() + + return r +} + +// A list of tools that the model can call. +// API name: tools +func (r *ChatCompletionUnified) Tools(tools ...types.CompletionToolVariant) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range tools { + + r.req.Tools = append(r.req.Tools, *v.CompletionToolCaster()) + + } + return r +} + +// Nucleus sampling, an alternative to sampling with temperature. +// API name: top_p +func (r *ChatCompletionUnified) TopP(topp float32) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TopP = &topp + + return r +} diff --git a/typedapi/inference/chatcompletionunified/request.go b/typedapi/inference/chatcompletionunified/request.go new file mode 100644 index 0000000000..15d01ab0fb --- /dev/null +++ b/typedapi/inference/chatcompletionunified/request.go @@ -0,0 +1,205 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package chatcompletionunified + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package chatcompletionunified +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/chat_completion_unified/UnifiedRequest.ts#L26-L87 +type Request struct { + + // MaxCompletionTokens The upper bound limit for the number of tokens that can be generated for a + // completion request. + MaxCompletionTokens *int64 `json:"max_completion_tokens,omitempty"` + // Messages A list of objects representing the conversation. + Messages []types.Message `json:"messages"` + // Model The ID of the model to use. + Model *string `json:"model,omitempty"` + // Stop A sequence of strings to control when the model should stop generating + // additional tokens. + Stop []string `json:"stop,omitempty"` + // Temperature The sampling temperature to use. + Temperature *float32 `json:"temperature,omitempty"` + // ToolChoice Controls which tool is called by the model. + ToolChoice types.CompletionToolType `json:"tool_choice,omitempty"` + // Tools A list of tools that the model can call. + Tools []types.CompletionTool `json:"tools,omitempty"` + // TopP Nucleus sampling, an alternative to sampling with temperature. + TopP *float32 `json:"top_p,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Chatcompletionunified request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_completion_tokens": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxCompletionTokens", err) + } + s.MaxCompletionTokens = &value + case float64: + f := int64(v) + s.MaxCompletionTokens = &f + } + + case "messages": + if err := dec.Decode(&s.Messages); err != nil { + return fmt.Errorf("%s | %w", "Messages", err) + } + + case "model": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Model", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Model = &o + + case "stop": + if err := dec.Decode(&s.Stop); err != nil { + return fmt.Errorf("%s | %w", "Stop", err) + } + + case "temperature": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Temperature", err) + } + f := float32(value) + s.Temperature = &f + case float64: + f := float32(v) + s.Temperature = &f + } + + case "tool_choice": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "ToolChoice", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + toolchoice_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "ToolChoice", err) + } + + switch t { + + case "function", "type": + o := types.NewCompletionToolChoice() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ToolChoice", err) + } + s.ToolChoice = o + break toolchoice_field + + } + } + if s.ToolChoice == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.ToolChoice); err != nil { + return fmt.Errorf("%s | %w", "ToolChoice", err) + } + } + + case "tools": + if err := dec.Decode(&s.Tools); err != nil { + return fmt.Errorf("%s | %w", "Tools", err) + } + + case "top_p": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "TopP", err) + } + f := float32(value) + s.TopP = &f + case float64: + f := float32(v) + s.TopP = &f + } + + } + } + return nil +} diff --git a/typedapi/inference/chatcompletionunified/response.go b/typedapi/inference/chatcompletionunified/response.go new file mode 100644 index 0000000000..e44fac2330 --- /dev/null +++ b/typedapi/inference/chatcompletionunified/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package chatcompletionunified + +// Response holds the response body struct for the package chatcompletionunified +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/chat_completion_unified/UnifiedResponse.ts#L22-L24 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/typedapi/inference/completion/completion.go b/typedapi/inference/completion/completion.go new file mode 100644 index 0000000000..5230f91e95 --- /dev/null +++ b/typedapi/inference/completion/completion.go @@ -0,0 +1,393 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Perform completion inference on the service +package completion + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Completion struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCompletion type alias for index. +type NewCompletion func(inferenceid string) *Completion + +// NewCompletionFunc returns a new instance of Completion with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCompletionFunc(tp elastictransport.Interface) NewCompletion { + return func(inferenceid string) *Completion { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform completion inference on the service +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference +func New(tp elastictransport.Interface) *Completion { + r := &Completion{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Completion) Raw(raw io.Reader) *Completion { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Completion) Request(req *Request) *Completion { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Completion) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Completion: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("completion") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Completion) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.completion") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.completion") + if reader := instrument.RecordRequestBody(ctx, "inference.completion", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.completion") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Completion query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a completion.Response +func (r Completion) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.completion") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Completion headers map. +func (r *Completion) Header(key, value string) *Completion { + r.headers.Set(key, value) + + return r +} + +// InferenceId The inference Id +// API Name: inferenceid +func (r *Completion) _inferenceid(inferenceid string) *Completion { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference request to complete. +// API name: timeout +func (r *Completion) Timeout(duration string) *Completion { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Completion) ErrorTrace(errortrace bool) *Completion { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Completion) FilterPath(filterpaths ...string) *Completion { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Completion) Human(human bool) *Completion { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Completion) Pretty(pretty bool) *Completion { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Inference input. +// Either a string or an array of strings. +// API name: input +func (r *Completion) Input(inputs ...string) *Completion { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Input = make([]string, len(inputs)) + r.req.Input = inputs + + return r +} + +// Optional task settings +// API name: task_settings +func (r *Completion) TaskSettings(tasksettings json.RawMessage) *Completion { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/typedapi/inference/inference/request.go b/typedapi/inference/completion/request.go similarity index 73% rename from typedapi/inference/inference/request.go rename to typedapi/inference/completion/request.go index 4fbb7fe2db..10bc316874 100644 --- a/typedapi/inference/inference/request.go +++ b/typedapi/inference/completion/request.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -package inference +package completion import ( "bytes" @@ -26,20 +26,16 @@ import ( "errors" "fmt" "io" - "strconv" ) -// Request holds the request body struct for the package inference +// Request holds the request body struct for the package completion // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/inference/InferenceRequest.ts#L26-L66 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/completion/CompletionRequest.ts#L25-L63 type Request struct { // Input Inference input. // Either a string or an array of strings. Input []string `json:"input"` - // Query Query input, required for rerank task. - // Not required for other tasks. - Query *string `json:"query,omitempty"` // TaskSettings Optional task settings TaskSettings json.RawMessage `json:"task_settings,omitempty"` } @@ -57,7 +53,7 @@ func (r *Request) FromJSON(data string) (*Request, error) { err := json.Unmarshal([]byte(data), &req) if err != nil { - return nil, fmt.Errorf("could not deserialise json into Inference request: %w", err) + return nil, fmt.Errorf("could not deserialise json into Completion request: %w", err) } return &req, nil @@ -93,18 +89,6 @@ func (s *Request) UnmarshalJSON(data []byte) error { } } - case "query": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Query", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Query = &o - case "task_settings": if err := dec.Decode(&s.TaskSettings); err != nil { return fmt.Errorf("%s | %w", "TaskSettings", err) diff --git a/typedapi/inference/completion/response.go b/typedapi/inference/completion/response.go new file mode 100644 index 0000000000..5bf7b0dcae --- /dev/null +++ b/typedapi/inference/completion/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package completion + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package completion +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/completion/CompletionResponse.ts#L22-L24 + +type Response []types.CompletionResult + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/typedapi/inference/delete/delete.go b/typedapi/inference/delete/delete.go index 3f35152ed7..6c959ed988 100644 --- a/typedapi/inference/delete/delete.go +++ b/typedapi/inference/delete/delete.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete an inference endpoint package delete @@ -81,7 +81,7 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { // Delete an inference endpoint // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-inference-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete func New(tp elastictransport.Interface) *Delete { r := &Delete{ transport: tp, @@ -317,7 +317,7 @@ func (r *Delete) TaskType(tasktype string) *Delete { return r } -// InferenceId The inference Id +// InferenceId The inference identifier. // API Name: inferenceid func (r *Delete) _inferenceid(inferenceid string) *Delete { r.paramSet |= inferenceidMask @@ -326,8 +326,8 @@ func (r *Delete) _inferenceid(inferenceid string) *Delete { return r } -// DryRun When true, the endpoint is not deleted, and a list of ingest processors which -// reference this endpoint is returned +// DryRun When true, the endpoint is not deleted and a list of ingest processors which +// reference this endpoint is returned. // API name: dry_run func (r *Delete) DryRun(dryrun bool) *Delete { r.values.Set("dry_run", strconv.FormatBool(dryrun)) @@ -336,7 +336,7 @@ func (r *Delete) DryRun(dryrun bool) *Delete { } // Force When true, the inference endpoint is forcefully deleted even if it is still -// being used by ingest processors or semantic text fields +// being used by ingest processors or semantic text fields. // API name: force func (r *Delete) Force(force bool) *Delete { r.values.Set("force", strconv.FormatBool(force)) diff --git a/typedapi/inference/delete/response.go b/typedapi/inference/delete/response.go index c5081449e1..ba1a0cf014 100644 --- a/typedapi/inference/delete/response.go +++ b/typedapi/inference/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/delete/DeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/delete/DeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/inference/get/get.go b/typedapi/inference/get/get.go index 50a226c3bb..574e5d2665 100644 --- a/typedapi/inference/get/get.go +++ b/typedapi/inference/get/get.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get an inference endpoint package get @@ -79,7 +79,7 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { // Get an inference endpoint // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-inference-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get func New(tp elastictransport.Interface) *Get { r := &Get{ transport: tp, diff --git a/typedapi/inference/get/response.go b/typedapi/inference/get/response.go index a574707286..c0241e0166 100644 --- a/typedapi/inference/get/response.go +++ b/typedapi/inference/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package get @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/get/GetResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/get/GetResponse.ts#L22-L26 type Response struct { Endpoints []types.InferenceEndpointInfo `json:"endpoints"` } diff --git a/typedapi/inference/put/put.go b/typedapi/inference/put/put.go index d95c3c6c88..0b12a43e2e 100644 --- a/typedapi/inference/put/put.go +++ b/typedapi/inference/put/put.go @@ -16,9 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// Create an inference endpoint +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Create an inference endpoint. +// When you create an inference endpoint, the associated machine learning model +// is automatically deployed if it is not already running. +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, +// Anthropic, Watsonx.ai, or Hugging Face. +// For built-in models and models uploaded through Eland, the inference APIs +// offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. package put import ( @@ -84,9 +103,28 @@ func NewPutFunc(tp elastictransport.Interface) NewPut { } } -// Create an inference endpoint +// Create an inference endpoint. +// When you create an inference endpoint, the associated machine learning model +// is automatically deployed if it is not already running. +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-inference-api.html +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, +// Anthropic, Watsonx.ai, or Hugging Face. +// For built-in models and models uploaded through Eland, the inference APIs +// offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put func New(tp elastictransport.Interface) *Put { r := &Put{ transport: tp, @@ -94,8 +132,6 @@ func New(tp elastictransport.Interface) *Put { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -386,26 +422,53 @@ func (r *Put) Pretty(pretty bool) *Put { return r } -// Service The service type +// Chunking configuration object +// API name: chunking_settings +func (r *Put) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The service type // API name: service func (r *Put) Service(service string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Service = service return r } -// ServiceSettings Settings specific to the service +// Settings specific to the service // API name: service_settings func (r *Put) ServiceSettings(servicesettings json.RawMessage) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ServiceSettings = servicesettings return r } -// TaskSettings Task settings specific to the service and task type +// Task settings specific to the service and task type // API name: task_settings func (r *Put) TaskSettings(tasksettings json.RawMessage) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TaskSettings = tasksettings return r diff --git a/typedapi/inference/put/request.go b/typedapi/inference/put/request.go index eaee4fb9c9..11b8e0c7f6 100644 --- a/typedapi/inference/put/request.go +++ b/typedapi/inference/put/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package put @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package put // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/put/PutRequest.ts#L25-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/put/PutRequest.ts#L25-L65 type Request = types.InferenceEndpoint // NewRequest returns a Request diff --git a/typedapi/inference/put/response.go b/typedapi/inference/put/response.go index 6744d793b9..d0c85320b5 100644 --- a/typedapi/inference/put/response.go +++ b/typedapi/inference/put/response.go @@ -16,21 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package put import ( "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tasktype" ) // Response holds the response body struct for the package put // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/put/PutResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/put/PutResponse.ts#L22-L24 type Response struct { + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` // InferenceId The inference Id InferenceId string `json:"inference_id"` // Service The service type diff --git a/typedapi/inference/putopenai/put_openai.go b/typedapi/inference/putopenai/put_openai.go new file mode 100644 index 0000000000..fe5c21ba66 --- /dev/null +++ b/typedapi/inference/putopenai/put_openai.go @@ -0,0 +1,458 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Create an OpenAI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `openai` +// service. +// +// When you create an inference endpoint, the associated machine learning model +// is automatically deployed if it is not already running. +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +package putopenai + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/servicetype" +) + +const ( + tasktypeMask = iota + 1 + + openaiinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutOpenai struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + openaiinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutOpenai type alias for index. +type NewPutOpenai func(tasktype, openaiinferenceid string) *PutOpenai + +// NewPutOpenaiFunc returns a new instance of PutOpenai with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutOpenaiFunc(tp elastictransport.Interface) NewPutOpenai { + return func(tasktype, openaiinferenceid string) *PutOpenai { + n := New(tp) + + n._tasktype(tasktype) + + n._openaiinferenceid(openaiinferenceid) + + return n + } +} + +// Create an OpenAI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `openai` +// service. +// +// When you create an inference endpoint, the associated machine learning model +// is automatically deployed if it is not already running. +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-openai.html +func New(tp elastictransport.Interface) *PutOpenai { + r := &PutOpenai{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutOpenai) Raw(raw io.Reader) *PutOpenai { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutOpenai) Request(req *Request) *PutOpenai { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutOpenai) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutOpenai: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|openaiinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "openaiinferenceid", r.openaiinferenceid) + } + path.WriteString(r.openaiinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutOpenai) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_openai") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_openai") + if reader := instrument.RecordRequestBody(ctx, "inference.put_openai", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_openai") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutOpenai query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putopenai.Response +func (r PutOpenai) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_openai") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutOpenai headers map. +func (r *PutOpenai) Header(key, value string) *PutOpenai { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// NOTE: The `chat_completion` task type only supports streaming and only +// through the _stream API. +// API Name: tasktype +func (r *PutOpenai) _tasktype(tasktype string) *PutOpenai { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// OpenaiInferenceId The unique identifier of the inference endpoint. +// API Name: openaiinferenceid +func (r *PutOpenai) _openaiinferenceid(openaiinferenceid string) *PutOpenai { + r.paramSet |= openaiinferenceidMask + r.openaiinferenceid = openaiinferenceid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutOpenai) ErrorTrace(errortrace bool) *PutOpenai { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutOpenai) FilterPath(filterpaths ...string) *PutOpenai { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutOpenai) Human(human bool) *PutOpenai { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutOpenai) Pretty(pretty bool) *PutOpenai { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutOpenai) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutOpenai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `openai`. +// API name: service +func (r *PutOpenai) Service(service servicetype.ServiceType) *PutOpenai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `openai` service. +// API name: service_settings +func (r *PutOpenai) ServiceSettings(servicesettings types.OpenAIServiceSettingsVariant) *PutOpenai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.OpenAIServiceSettingsCaster() + + return r +} + +// Settings to configure the inference task. +// These settings are specific to the task type you specified. +// API name: task_settings +func (r *PutOpenai) TaskSettings(tasksettings types.OpenAITaskSettingsVariant) *PutOpenai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings.OpenAITaskSettingsCaster() + + return r +} diff --git a/typedapi/inference/putopenai/request.go b/typedapi/inference/putopenai/request.go new file mode 100644 index 0000000000..3f74611ad1 --- /dev/null +++ b/typedapi/inference/putopenai/request.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package putopenai + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/servicetype" +) + +// Request holds the request body struct for the package putopenai +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/put_openai/PutOpenAiRequest.ts#L28-L82 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `openai`. + Service servicetype.ServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `openai` service. + ServiceSettings types.OpenAIServiceSettings `json:"service_settings"` + // TaskSettings Settings to configure the inference task. + // These settings are specific to the task type you specified. + TaskSettings *types.OpenAITaskSettings `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putopenai request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/inference/putopenai/response.go b/typedapi/inference/putopenai/response.go new file mode 100644 index 0000000000..c89501a24d --- /dev/null +++ b/typedapi/inference/putopenai/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package putopenai + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tasktype" +) + +// Response holds the response body struct for the package putopenai +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/put_openai/PutOpenAiResponse.ts#L22-L24 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktype.TaskType `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/inference/putwatsonx/put_watsonx.go b/typedapi/inference/putwatsonx/put_watsonx.go new file mode 100644 index 0000000000..cf64401d74 --- /dev/null +++ b/typedapi/inference/putwatsonx/put_watsonx.go @@ -0,0 +1,438 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Create a Watsonx inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `watsonxai` service. +// You need an IBM Cloud Databases for Elasticsearch deployment to use the +// `watsonxai` inference service. +// You can provision one through the IBM catalog, the Cloud Databases CLI +// plug-in, the Cloud Databases API, or Terraform. +// +// When you create an inference endpoint, the associated machine learning model +// is automatically deployed if it is not already running. +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +package putwatsonx + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/servicetype" +) + +const ( + tasktypeMask = iota + 1 + + watsonxinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutWatsonx struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + watsonxinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutWatsonx type alias for index. +type NewPutWatsonx func(tasktype, watsonxinferenceid string) *PutWatsonx + +// NewPutWatsonxFunc returns a new instance of PutWatsonx with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutWatsonxFunc(tp elastictransport.Interface) NewPutWatsonx { + return func(tasktype, watsonxinferenceid string) *PutWatsonx { + n := New(tp) + + n._tasktype(tasktype) + + n._watsonxinferenceid(watsonxinferenceid) + + return n + } +} + +// Create a Watsonx inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `watsonxai` service. +// You need an IBM Cloud Databases for Elasticsearch deployment to use the +// `watsonxai` inference service. +// You can provision one through the IBM catalog, the Cloud Databases CLI +// plug-in, the Cloud Databases API, or Terraform. +// +// When you create an inference endpoint, the associated machine learning model +// is automatically deployed if it is not already running. +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx +func New(tp elastictransport.Interface) *PutWatsonx { + r := &PutWatsonx{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutWatsonx) Raw(raw io.Reader) *PutWatsonx { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutWatsonx) Request(req *Request) *PutWatsonx { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutWatsonx) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutWatsonx: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|watsonxinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "watsonxinferenceid", r.watsonxinferenceid) + } + path.WriteString(r.watsonxinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutWatsonx) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_watsonx") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_watsonx") + if reader := instrument.RecordRequestBody(ctx, "inference.put_watsonx", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_watsonx") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutWatsonx query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putwatsonx.Response +func (r PutWatsonx) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_watsonx") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutWatsonx headers map. +func (r *PutWatsonx) Header(key, value string) *PutWatsonx { + r.headers.Set(key, value) + + return r +} + +// TaskType The task type. +// The only valid task type for the model to perform is `text_embedding`. +// API Name: tasktype +func (r *PutWatsonx) _tasktype(tasktype string) *PutWatsonx { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// WatsonxInferenceId The unique identifier of the inference endpoint. +// API Name: watsonxinferenceid +func (r *PutWatsonx) _watsonxinferenceid(watsonxinferenceid string) *PutWatsonx { + r.paramSet |= watsonxinferenceidMask + r.watsonxinferenceid = watsonxinferenceid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutWatsonx) ErrorTrace(errortrace bool) *PutWatsonx { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutWatsonx) FilterPath(filterpaths ...string) *PutWatsonx { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutWatsonx) Human(human bool) *PutWatsonx { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutWatsonx) Pretty(pretty bool) *PutWatsonx { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The type of service supported for the specified task type. In this case, +// `watsonxai`. +// API name: service +func (r *PutWatsonx) Service(service servicetype.ServiceType) *PutWatsonx { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `watsonxai` service. +// API name: service_settings +func (r *PutWatsonx) ServiceSettings(servicesettings types.WatsonxServiceSettingsVariant) *PutWatsonx { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.WatsonxServiceSettingsCaster() + + return r +} diff --git a/typedapi/inference/putwatsonx/request.go b/typedapi/inference/putwatsonx/request.go new file mode 100644 index 0000000000..059f58eab9 --- /dev/null +++ b/typedapi/inference/putwatsonx/request.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package putwatsonx + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/servicetype" +) + +// Request holds the request body struct for the package putwatsonx +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/put_watsonx/PutWatsonxRequest.ts#L24-L70 +type Request struct { + + // Service The type of service supported for the specified task type. In this case, + // `watsonxai`. + Service servicetype.ServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `watsonxai` service. + ServiceSettings types.WatsonxServiceSettings `json:"service_settings"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putwatsonx request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/inference/putwatsonx/response.go b/typedapi/inference/putwatsonx/response.go new file mode 100644 index 0000000000..8347540dec --- /dev/null +++ b/typedapi/inference/putwatsonx/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package putwatsonx + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tasktype" +) + +// Response holds the response body struct for the package putwatsonx +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/put_watsonx/PutWatsonxResponse.ts#L22-L24 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktype.TaskType `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/inference/rerank/request.go b/typedapi/inference/rerank/request.go new file mode 100644 index 0000000000..daf8cc0077 --- /dev/null +++ b/typedapi/inference/rerank/request.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package rerank + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Request holds the request body struct for the package rerank +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/rerank/RerankRequest.ts#L25-L72 +type Request struct { + + // Input The text on which you want to perform the inference task. + // It can be a single string or an array. + // + // > info + // > Inference endpoints for the `completion` task type currently only support a + // single string as input. + Input []string `json:"input"` + // Query Query input. + Query string `json:"query"` + // TaskSettings Task settings for the individual inference request. + // These settings are specific to the task type you specified and override the + // task settings specified when initializing the service. + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Rerank request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "input": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + s.Input = append(s.Input, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) + } + + } + } + return nil +} diff --git a/typedapi/inference/rerank/rerank.go b/typedapi/inference/rerank/rerank.go new file mode 100644 index 0000000000..25e8cf8050 --- /dev/null +++ b/typedapi/inference/rerank/rerank.go @@ -0,0 +1,412 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Perform rereanking inference on the service +package rerank + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Rerank struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRerank type alias for index. +type NewRerank func(inferenceid string) *Rerank + +// NewRerankFunc returns a new instance of Rerank with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRerankFunc(tp elastictransport.Interface) NewRerank { + return func(inferenceid string) *Rerank { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform rereanking inference on the service +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference +func New(tp elastictransport.Interface) *Rerank { + r := &Rerank{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Rerank) Raw(raw io.Reader) *Rerank { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Rerank) Request(req *Request) *Rerank { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Rerank) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Rerank: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("rerank") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Rerank) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.rerank") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.rerank") + if reader := instrument.RecordRequestBody(ctx, "inference.rerank", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.rerank") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Rerank query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a rerank.Response +func (r Rerank) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.rerank") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Rerank headers map. +func (r *Rerank) Header(key, value string) *Rerank { + r.headers.Set(key, value) + + return r +} + +// InferenceId The unique identifier for the inference endpoint. +// API Name: inferenceid +func (r *Rerank) _inferenceid(inferenceid string) *Rerank { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// Timeout The amount of time to wait for the inference request to complete. +// API name: timeout +func (r *Rerank) Timeout(duration string) *Rerank { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Rerank) ErrorTrace(errortrace bool) *Rerank { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Rerank) FilterPath(filterpaths ...string) *Rerank { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Rerank) Human(human bool) *Rerank { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Rerank) Pretty(pretty bool) *Rerank { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The text on which you want to perform the inference task. +// It can be a single string or an array. +// +// > info +// > Inference endpoints for the `completion` task type currently only support a +// single string as input. +// API name: input +func (r *Rerank) Input(inputs ...string) *Rerank { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Input = make([]string, len(inputs)) + r.req.Input = inputs + + return r +} + +// Query input. +// API name: query +func (r *Rerank) Query(query string) *Rerank { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query + + return r +} + +// Task settings for the individual inference request. +// These settings are specific to the task type you specified and override the +// task settings specified when initializing the service. +// API name: task_settings +func (r *Rerank) TaskSettings(tasksettings json.RawMessage) *Rerank { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/typedapi/inference/rerank/response.go b/typedapi/inference/rerank/response.go new file mode 100644 index 0000000000..b24a96b04c --- /dev/null +++ b/typedapi/inference/rerank/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package rerank + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package rerank +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/rerank/RerankResponse.ts#L22-L24 + +type Response []types.RankedDocument + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/typedapi/inference/sparseembedding/request.go b/typedapi/inference/sparseembedding/request.go new file mode 100644 index 0000000000..8de3f506f5 --- /dev/null +++ b/typedapi/inference/sparseembedding/request.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package sparseembedding + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Request holds the request body struct for the package sparseembedding +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/sparse_embedding/SparseEmbeddingRequest.ts#L25-L63 +type Request struct { + + // Input Inference input. + // Either a string or an array of strings. + Input []string `json:"input"` + // TaskSettings Optional task settings + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Sparseembedding request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "input": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + s.Input = append(s.Input, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + } + + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) + } + + } + } + return nil +} diff --git a/typedapi/inference/sparseembedding/response.go b/typedapi/inference/sparseembedding/response.go new file mode 100644 index 0000000000..2b121395a7 --- /dev/null +++ b/typedapi/inference/sparseembedding/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package sparseembedding + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package sparseembedding +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/sparse_embedding/SparseEmbeddingResponse.ts#L22-L24 + +type Response []types.SparseEmbeddingResult + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/typedapi/inference/sparseembedding/sparse_embedding.go b/typedapi/inference/sparseembedding/sparse_embedding.go new file mode 100644 index 0000000000..b3afc26725 --- /dev/null +++ b/typedapi/inference/sparseembedding/sparse_embedding.go @@ -0,0 +1,393 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Perform sparse embedding inference on the service +package sparseembedding + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SparseEmbedding struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSparseEmbedding type alias for index. +type NewSparseEmbedding func(inferenceid string) *SparseEmbedding + +// NewSparseEmbeddingFunc returns a new instance of SparseEmbedding with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSparseEmbeddingFunc(tp elastictransport.Interface) NewSparseEmbedding { + return func(inferenceid string) *SparseEmbedding { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform sparse embedding inference on the service +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference +func New(tp elastictransport.Interface) *SparseEmbedding { + r := &SparseEmbedding{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SparseEmbedding) Raw(raw io.Reader) *SparseEmbedding { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SparseEmbedding) Request(req *Request) *SparseEmbedding { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SparseEmbedding) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SparseEmbedding: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("sparse_embedding") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SparseEmbedding) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.sparse_embedding") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.sparse_embedding") + if reader := instrument.RecordRequestBody(ctx, "inference.sparse_embedding", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.sparse_embedding") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SparseEmbedding query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a sparseembedding.Response +func (r SparseEmbedding) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.sparse_embedding") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SparseEmbedding headers map. +func (r *SparseEmbedding) Header(key, value string) *SparseEmbedding { + r.headers.Set(key, value) + + return r +} + +// InferenceId The inference Id +// API Name: inferenceid +func (r *SparseEmbedding) _inferenceid(inferenceid string) *SparseEmbedding { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference request to complete. +// API name: timeout +func (r *SparseEmbedding) Timeout(duration string) *SparseEmbedding { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SparseEmbedding) ErrorTrace(errortrace bool) *SparseEmbedding { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SparseEmbedding) FilterPath(filterpaths ...string) *SparseEmbedding { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SparseEmbedding) Human(human bool) *SparseEmbedding { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SparseEmbedding) Pretty(pretty bool) *SparseEmbedding { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Inference input. +// Either a string or an array of strings. +// API name: input +func (r *SparseEmbedding) Input(inputs ...string) *SparseEmbedding { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Input = make([]string, len(inputs)) + r.req.Input = inputs + + return r +} + +// Optional task settings +// API name: task_settings +func (r *SparseEmbedding) TaskSettings(tasksettings json.RawMessage) *SparseEmbedding { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/typedapi/inference/streamcompletion/request.go b/typedapi/inference/streamcompletion/request.go new file mode 100644 index 0000000000..9861b6fddb --- /dev/null +++ b/typedapi/inference/streamcompletion/request.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package streamcompletion + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Request holds the request body struct for the package streamcompletion +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/stream_completion/StreamInferenceRequest.ts#L24-L63 +type Request struct { + + // Input The text on which you want to perform the inference task. + // It can be a single string or an array. + // + // NOTE: Inference endpoints for the completion task type currently only support + // a single string as input. + Input []string `json:"input"` + // TaskSettings Optional task settings + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Streamcompletion request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "input": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + s.Input = append(s.Input, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + } + + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) + } + + } + } + return nil +} diff --git a/typedapi/inference/streamcompletion/response.go b/typedapi/inference/streamcompletion/response.go new file mode 100644 index 0000000000..0d55186cb9 --- /dev/null +++ b/typedapi/inference/streamcompletion/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package streamcompletion + +// Response holds the response body struct for the package streamcompletion +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/stream_completion/StreamInferenceResponse.ts#L22-L24 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/typedapi/inference/streamcompletion/stream_completion.go b/typedapi/inference/streamcompletion/stream_completion.go new file mode 100644 index 0000000000..a95269de1c --- /dev/null +++ b/typedapi/inference/streamcompletion/stream_completion.go @@ -0,0 +1,422 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Perform streaming inference. +// Get real-time responses for completion tasks by delivering answers +// incrementally, reducing response times during computation. +// This API works only with the completion task type. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, +// Watsonx.ai, or Hugging Face. For built-in models and models uploaded through +// Eland, the inference APIs offer an alternative way to use and manage trained +// models. However, if you do not plan to use the inference APIs to use these +// models or if you want to use non-NLP models, use the machine learning trained +// model APIs. +// +// This API requires the `monitor_inference` cluster privilege (the built-in +// `inference_admin` and `inference_user` roles grant this privilege). You must +// use a client that supports streaming. +package streamcompletion + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type StreamCompletion struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStreamCompletion type alias for index. +type NewStreamCompletion func(inferenceid string) *StreamCompletion + +// NewStreamCompletionFunc returns a new instance of StreamCompletion with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStreamCompletionFunc(tp elastictransport.Interface) NewStreamCompletion { + return func(inferenceid string) *StreamCompletion { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform streaming inference. +// Get real-time responses for completion tasks by delivering answers +// incrementally, reducing response times during computation. +// This API works only with the completion task type. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, +// Watsonx.ai, or Hugging Face. For built-in models and models uploaded through +// Eland, the inference APIs offer an alternative way to use and manage trained +// models. However, if you do not plan to use the inference APIs to use these +// models or if you want to use non-NLP models, use the machine learning trained +// model APIs. +// +// This API requires the `monitor_inference` cluster privilege (the built-in +// `inference_admin` and `inference_user` roles grant this privilege). You must +// use a client that supports streaming. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference +func New(tp elastictransport.Interface) *StreamCompletion { + r := &StreamCompletion{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *StreamCompletion) Raw(raw io.Reader) *StreamCompletion { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *StreamCompletion) Request(req *Request) *StreamCompletion { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *StreamCompletion) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for StreamCompletion: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("completion") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + path.WriteString("/") + path.WriteString("_stream") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "text/event-stream") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r StreamCompletion) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.stream_completion") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.stream_completion") + if reader := instrument.RecordRequestBody(ctx, "inference.stream_completion", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.stream_completion") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the StreamCompletion query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a streamcompletion.Response +func (r StreamCompletion) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.stream_completion") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the StreamCompletion headers map. +func (r *StreamCompletion) Header(key, value string) *StreamCompletion { + r.headers.Set(key, value) + + return r +} + +// InferenceId The unique identifier for the inference endpoint. +// API Name: inferenceid +func (r *StreamCompletion) _inferenceid(inferenceid string) *StreamCompletion { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *StreamCompletion) ErrorTrace(errortrace bool) *StreamCompletion { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *StreamCompletion) FilterPath(filterpaths ...string) *StreamCompletion { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *StreamCompletion) Human(human bool) *StreamCompletion { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *StreamCompletion) Pretty(pretty bool) *StreamCompletion { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The text on which you want to perform the inference task. +// It can be a single string or an array. +// +// NOTE: Inference endpoints for the completion task type currently only support +// a single string as input. +// API name: input +func (r *StreamCompletion) Input(inputs ...string) *StreamCompletion { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Input = make([]string, len(inputs)) + r.req.Input = inputs + + return r +} + +// Optional task settings +// API name: task_settings +func (r *StreamCompletion) TaskSettings(tasksettings json.RawMessage) *StreamCompletion { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/typedapi/types/matrixaggregation.go b/typedapi/inference/textembedding/request.go similarity index 51% rename from typedapi/types/matrixaggregation.go rename to typedapi/inference/textembedding/request.go index 000b5d2540..e65c35c4be 100644 --- a/typedapi/types/matrixaggregation.go +++ b/typedapi/inference/textembedding/request.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -package types +package textembedding import ( "bytes" @@ -28,19 +28,38 @@ import ( "io" ) -// MatrixAggregation type. +// Request holds the request body struct for the package textembedding // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/matrix.ts#L26-L36 -type MatrixAggregation struct { - // Fields An array of fields for computing the statistics. - Fields []string `json:"fields,omitempty"` - // Missing The value to apply to documents that do not have a value. - // By default, documents without a value are ignored. - Missing map[string]Float64 `json:"missing,omitempty"` +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/text_embedding/TextEmbeddingRequest.ts#L25-L63 +type Request struct { + + // Input Inference input. + // Either a string or an array of strings. + Input []string `json:"input"` + // TaskSettings Optional task settings + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r } -func (s *MatrixAggregation) UnmarshalJSON(data []byte) error { +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Textembedding request: %w", err) + } + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -54,40 +73,28 @@ func (s *MatrixAggregation) UnmarshalJSON(data []byte) error { switch t { - case "fields": + case "input": rawMsg := json.RawMessage{} dec.Decode(&rawMsg) if !bytes.HasPrefix(rawMsg, []byte("[")) { o := new(string) if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Fields", err) + return fmt.Errorf("%s | %w", "Input", err) } - s.Fields = append(s.Fields, *o) + s.Input = append(s.Input, *o) } else { - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { - return fmt.Errorf("%s | %w", "Fields", err) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) } } - case "missing": - if s.Missing == nil { - s.Missing = make(map[string]Float64, 0) - } - if err := dec.Decode(&s.Missing); err != nil { - return fmt.Errorf("%s | %w", "Missing", err) + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) } } } return nil } - -// NewMatrixAggregation returns a MatrixAggregation. -func NewMatrixAggregation() *MatrixAggregation { - r := &MatrixAggregation{ - Missing: make(map[string]Float64, 0), - } - - return r -} diff --git a/typedapi/inference/textembedding/response.go b/typedapi/inference/textembedding/response.go new file mode 100644 index 0000000000..5a52443201 --- /dev/null +++ b/typedapi/inference/textembedding/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package textembedding + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package textembedding +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/text_embedding/TextEmbeddingResponse.ts#L22-L24 +type Response struct { + AdditionalTextEmbeddingInferenceResultProperty map[string]json.RawMessage `json:"-"` + TextEmbedding []types.TextEmbeddingResult `json:"text_embedding,omitempty"` + TextEmbeddingBits []types.TextEmbeddingByteResult `json:"text_embedding_bits,omitempty"` + TextEmbeddingBytes []types.TextEmbeddingByteResult `json:"text_embedding_bytes,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + AdditionalTextEmbeddingInferenceResultProperty: make(map[string]json.RawMessage, 0), + } + return r +} diff --git a/typedapi/inference/inference/inference.go b/typedapi/inference/textembedding/text_embedding.go similarity index 70% rename from typedapi/inference/inference/inference.go rename to typedapi/inference/textembedding/text_embedding.go index 885f5beaee..d2e47210fc 100644 --- a/typedapi/inference/inference/inference.go +++ b/typedapi/inference/textembedding/text_embedding.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Perform inference on the service -package inference +// Perform text embedding inference on the service +package textembedding import ( gobytes "bytes" @@ -38,15 +38,13 @@ import ( ) const ( - tasktypeMask = iota + 1 - - inferenceidMask + inferenceidMask = iota + 1 ) // ErrBuildPath is returned in case of missing parameters within the build of the request. var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") -type Inference struct { +type TextEmbedding struct { transport elastictransport.Interface headers http.Header @@ -61,7 +59,6 @@ type Inference struct { paramSet int - tasktype string inferenceid string spanStarted bool @@ -69,13 +66,13 @@ type Inference struct { instrument elastictransport.Instrumentation } -// NewInference type alias for index. -type NewInference func(inferenceid string) *Inference +// NewTextEmbedding type alias for index. +type NewTextEmbedding func(inferenceid string) *TextEmbedding -// NewInferenceFunc returns a new instance of Inference with the provided transport. +// NewTextEmbeddingFunc returns a new instance of TextEmbedding with the provided transport. // Used in the index of the library this allows to retrieve every apis in once place. -func NewInferenceFunc(tp elastictransport.Interface) NewInference { - return func(inferenceid string) *Inference { +func NewTextEmbeddingFunc(tp elastictransport.Interface) NewTextEmbedding { + return func(inferenceid string) *TextEmbedding { n := New(tp) n._inferenceid(inferenceid) @@ -84,18 +81,16 @@ func NewInferenceFunc(tp elastictransport.Interface) NewInference { } } -// Perform inference on the service +// Perform text embedding inference on the service // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/post-inference-api.html -func New(tp elastictransport.Interface) *Inference { - r := &Inference{ +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference +func New(tp elastictransport.Interface) *TextEmbedding { + r := &TextEmbedding{ transport: tp, values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -109,14 +104,14 @@ func New(tp elastictransport.Interface) *Inference { // Raw takes a json payload as input which is then passed to the http.Request // If specified Raw takes precedence on Request method. -func (r *Inference) Raw(raw io.Reader) *Inference { +func (r *TextEmbedding) Raw(raw io.Reader) *TextEmbedding { r.raw = raw return r } // Request allows to set the request property with the appropriate payload. -func (r *Inference) Request(req *Request) *Inference { +func (r *TextEmbedding) Request(req *Request) *TextEmbedding { r.req = req return r @@ -124,7 +119,7 @@ func (r *Inference) Request(req *Request) *Inference { // HttpRequest returns the http.Request object built from the // given parameters. -func (r *Inference) HttpRequest(ctx context.Context) (*http.Request, error) { +func (r *TextEmbedding) HttpRequest(ctx context.Context) (*http.Request, error) { var path strings.Builder var method string var req *http.Request @@ -145,7 +140,7 @@ func (r *Inference) HttpRequest(ctx context.Context) (*http.Request, error) { data, err := json.Marshal(r.req) if err != nil { - return nil, fmt.Errorf("could not serialise request for Inference: %w", err) + return nil, fmt.Errorf("could not serialise request for TextEmbedding: %w", err) } r.buf.Write(data) @@ -163,22 +158,7 @@ func (r *Inference) HttpRequest(ctx context.Context) (*http.Request, error) { path.WriteString("/") path.WriteString("_inference") path.WriteString("/") - - if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) - } - path.WriteString(r.inferenceid) - - method = http.MethodPost - case r.paramSet == tasktypeMask|inferenceidMask: - path.WriteString("/") - path.WriteString("_inference") - path.WriteString("/") - - if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.RecordPathPart(ctx, "tasktype", r.tasktype) - } - path.WriteString(r.tasktype) + path.WriteString("text_embedding") path.WriteString("/") if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -222,11 +202,11 @@ func (r *Inference) HttpRequest(ctx context.Context) (*http.Request, error) { } // Perform runs the http.Request through the provided transport and returns an http.Response. -func (r Inference) Perform(providedCtx context.Context) (*http.Response, error) { +func (r TextEmbedding) Perform(providedCtx context.Context) (*http.Response, error) { var ctx context.Context if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { if r.spanStarted == false { - ctx := instrument.Start(providedCtx, "inference.inference") + ctx := instrument.Start(providedCtx, "inference.text_embedding") defer instrument.Close(ctx) } } @@ -243,17 +223,17 @@ func (r Inference) Perform(providedCtx context.Context) (*http.Response, error) } if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.BeforeRequest(req, "inference.inference") - if reader := instrument.RecordRequestBody(ctx, "inference.inference", r.raw); reader != nil { + instrument.BeforeRequest(req, "inference.text_embedding") + if reader := instrument.RecordRequestBody(ctx, "inference.text_embedding", r.raw); reader != nil { req.Body = reader } } res, err := r.transport.Perform(req) if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.AfterRequest(req, "elasticsearch", "inference.inference") + instrument.AfterRequest(req, "elasticsearch", "inference.text_embedding") } if err != nil { - localErr := fmt.Errorf("an error happened during the Inference query execution: %w", err) + localErr := fmt.Errorf("an error happened during the TextEmbedding query execution: %w", err) if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, localErr) } @@ -263,12 +243,12 @@ func (r Inference) Perform(providedCtx context.Context) (*http.Response, error) return res, nil } -// Do runs the request through the transport, handle the response and returns a inference.Response -func (r Inference) Do(providedCtx context.Context) (*Response, error) { +// Do runs the request through the transport, handle the response and returns a textembedding.Response +func (r TextEmbedding) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - ctx = instrument.Start(providedCtx, "inference.inference") + ctx = instrument.Start(providedCtx, "inference.text_embedding") defer instrument.Close(ctx) } if ctx == nil { @@ -317,25 +297,16 @@ func (r Inference) Do(providedCtx context.Context) (*Response, error) { return nil, errorResponse } -// Header set a key, value pair in the Inference headers map. -func (r *Inference) Header(key, value string) *Inference { +// Header set a key, value pair in the TextEmbedding headers map. +func (r *TextEmbedding) Header(key, value string) *TextEmbedding { r.headers.Set(key, value) return r } -// TaskType The task type -// API Name: tasktype -func (r *Inference) TaskType(tasktype string) *Inference { - r.paramSet |= tasktypeMask - r.tasktype = tasktype - - return r -} - // InferenceId The inference Id // API Name: inferenceid -func (r *Inference) _inferenceid(inferenceid string) *Inference { +func (r *TextEmbedding) _inferenceid(inferenceid string) *TextEmbedding { r.paramSet |= inferenceidMask r.inferenceid = inferenceid @@ -344,7 +315,7 @@ func (r *Inference) _inferenceid(inferenceid string) *Inference { // Timeout Specifies the amount of time to wait for the inference request to complete. // API name: timeout -func (r *Inference) Timeout(duration string) *Inference { +func (r *TextEmbedding) Timeout(duration string) *TextEmbedding { r.values.Set("timeout", duration) return r @@ -353,7 +324,7 @@ func (r *Inference) Timeout(duration string) *Inference { // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace -func (r *Inference) ErrorTrace(errortrace bool) *Inference { +func (r *TextEmbedding) ErrorTrace(errortrace bool) *TextEmbedding { r.values.Set("error_trace", strconv.FormatBool(errortrace)) return r @@ -362,7 +333,7 @@ func (r *Inference) ErrorTrace(errortrace bool) *Inference { // FilterPath Comma-separated list of filters in dot notation which reduce the response // returned by Elasticsearch. // API name: filter_path -func (r *Inference) FilterPath(filterpaths ...string) *Inference { +func (r *TextEmbedding) FilterPath(filterpaths ...string) *TextEmbedding { tmp := []string{} for _, item := range filterpaths { tmp = append(tmp, fmt.Sprintf("%v", item)) @@ -379,7 +350,7 @@ func (r *Inference) FilterPath(filterpaths ...string) *Inference { // consumed // only by machines. // API name: human -func (r *Inference) Human(human bool) *Inference { +func (r *TextEmbedding) Human(human bool) *TextEmbedding { r.values.Set("human", strconv.FormatBool(human)) return r @@ -388,34 +359,34 @@ func (r *Inference) Human(human bool) *Inference { // Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use // this option for debugging only. // API name: pretty -func (r *Inference) Pretty(pretty bool) *Inference { +func (r *TextEmbedding) Pretty(pretty bool) *TextEmbedding { r.values.Set("pretty", strconv.FormatBool(pretty)) return r } -// Input Inference input. +// Inference input. // Either a string or an array of strings. // API name: input -func (r *Inference) Input(inputs ...string) *Inference { +func (r *TextEmbedding) Input(inputs ...string) *TextEmbedding { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Input = make([]string, len(inputs)) r.req.Input = inputs return r } -// Query Query input, required for rerank task. -// Not required for other tasks. -// API name: query -func (r *Inference) Query(query string) *Inference { - - r.req.Query = &query - - return r -} - -// TaskSettings Optional task settings +// Optional task settings // API name: task_settings -func (r *Inference) TaskSettings(tasksettings json.RawMessage) *Inference { +func (r *TextEmbedding) TaskSettings(tasksettings json.RawMessage) *TextEmbedding { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TaskSettings = tasksettings return r diff --git a/typedapi/inference/update/request.go b/typedapi/inference/update/request.go new file mode 100644 index 0000000000..74b1884edd --- /dev/null +++ b/typedapi/inference/update/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package update + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package update +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/update/UpdateInferenceRequest.ts#L25-L61 +type Request = types.InferenceEndpoint + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewInferenceEndpoint() + + return r +} diff --git a/typedapi/inference/update/response.go b/typedapi/inference/update/response.go new file mode 100644 index 0000000000..6d304d7974 --- /dev/null +++ b/typedapi/inference/update/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package update + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tasktype" +) + +// Response holds the response body struct for the package update +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/update/UpdateInferenceResponse.ts#L22-L24 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktype.TaskType `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/inference/update/update.go b/typedapi/inference/update/update.go new file mode 100644 index 0000000000..0a9ffa8a6d --- /dev/null +++ b/typedapi/inference/update/update.go @@ -0,0 +1,469 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Update an inference endpoint. +// +// Modify `task_settings`, secrets (within `service_settings`), or +// `num_allocations` for an inference endpoint, depending on the specific +// endpoint service and `task_type`. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, +// Watsonx.ai, or Hugging Face. +// For built-in models and models uploaded through Eland, the inference APIs +// offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. +package update + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 + + tasktypeMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Update struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + tasktype string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdate type alias for index. +type NewUpdate func(inferenceid string) *Update + +// NewUpdateFunc returns a new instance of Update with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateFunc(tp elastictransport.Interface) NewUpdate { + return func(inferenceid string) *Update { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Update an inference endpoint. +// +// Modify `task_settings`, secrets (within `service_settings`), or +// `num_allocations` for an inference endpoint, depending on the specific +// endpoint service and `task_type`. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, +// Watsonx.ai, or Hugging Face. +// For built-in models and models uploaded through Eland, the inference APIs +// offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update +func New(tp elastictransport.Interface) *Update { + r := &Update{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Update) Raw(raw io.Reader) *Update { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Update) Request(req *Request) *Update { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Update) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Update: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + path.WriteString("/") + path.WriteString("_update") + + method = http.MethodPut + case r.paramSet == tasktypeMask|inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + path.WriteString("/") + path.WriteString("_update") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Update) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.update") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.update") + if reader := instrument.RecordRequestBody(ctx, "inference.update", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.update") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Update query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a update.Response +func (r Update) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.update") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Update headers map. +func (r *Update) Header(key, value string) *Update { + r.headers.Set(key, value) + + return r +} + +// InferenceId The unique identifier of the inference endpoint. +// API Name: inferenceid +func (r *Update) _inferenceid(inferenceid string) *Update { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// TaskType The type of inference task that the model performs. +// API Name: tasktype +func (r *Update) TaskType(tasktype string) *Update { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Update) ErrorTrace(errortrace bool) *Update { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Update) FilterPath(filterpaths ...string) *Update { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Update) Human(human bool) *Update { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Update) Pretty(pretty bool) *Update { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Chunking configuration object +// API name: chunking_settings +func (r *Update) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The service type +// API name: service +func (r *Update) Service(service string) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Service = service + + return r +} + +// Settings specific to the service +// API name: service_settings +func (r *Update) ServiceSettings(servicesettings json.RawMessage) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = servicesettings + + return r +} + +// Task settings specific to the service and task type +// API name: task_settings +func (r *Update) TaskSettings(tasksettings json.RawMessage) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/typedapi/ingest/deletegeoipdatabase/delete_geoip_database.go b/typedapi/ingest/deletegeoipdatabase/delete_geoip_database.go index beb00d219d..6afe2e7a80 100644 --- a/typedapi/ingest/deletegeoipdatabase/delete_geoip_database.go +++ b/typedapi/ingest/deletegeoipdatabase/delete_geoip_database.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes a geoip database configuration. +// Delete GeoIP database configurations. +// +// Delete one or more IP geolocation database configurations. package deletegeoipdatabase import ( @@ -76,9 +78,11 @@ func NewDeleteGeoipDatabaseFunc(tp elastictransport.Interface) NewDeleteGeoipDat } } -// Deletes a geoip database configuration. +// Delete GeoIP database configurations. +// +// Delete one or more IP geolocation database configurations. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-geoip-database-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-geoip-database func New(tp elastictransport.Interface) *DeleteGeoipDatabase { r := &DeleteGeoipDatabase{ transport: tp, @@ -301,7 +305,7 @@ func (r *DeleteGeoipDatabase) _id(id string) *DeleteGeoipDatabase { return r } -// MasterTimeout Period to wait for a connection to the master node. +// MasterTimeout The period to wait for a connection to the master node. // If no response is received before the timeout expires, the request fails and // returns an error. // API name: master_timeout @@ -311,8 +315,8 @@ func (r *DeleteGeoipDatabase) MasterTimeout(duration string) *DeleteGeoipDatabas return r } -// Timeout Period to wait for a response. If no response is received before the timeout -// expires, the request fails and returns an error. +// Timeout The period to wait for a response. If no response is received before the +// timeout expires, the request fails and returns an error. // API name: timeout func (r *DeleteGeoipDatabase) Timeout(duration string) *DeleteGeoipDatabase { r.values.Set("timeout", duration) diff --git a/typedapi/ingest/deletegeoipdatabase/response.go b/typedapi/ingest/deletegeoipdatabase/response.go index 6392645454..8f7074b81c 100644 --- a/typedapi/ingest/deletegeoipdatabase/response.go +++ b/typedapi/ingest/deletegeoipdatabase/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletegeoipdatabase // Response holds the response body struct for the package deletegeoipdatabase // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/delete_geoip_database/DeleteGeoipDatabaseResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/delete_geoip_database/DeleteGeoipDatabaseResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/unfreeze/unfreeze.go b/typedapi/ingest/deleteiplocationdatabase/delete_ip_location_database.go similarity index 62% rename from typedapi/indices/unfreeze/unfreeze.go rename to typedapi/ingest/deleteiplocationdatabase/delete_ip_location_database.go index 8bfea1a162..4a874ca428 100644 --- a/typedapi/indices/unfreeze/unfreeze.go +++ b/typedapi/ingest/deleteiplocationdatabase/delete_ip_location_database.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Unfreezes an index. -package unfreeze +// Delete IP geolocation database configurations. +package deleteiplocationdatabase import ( "context" @@ -34,17 +34,16 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( - indexMask = iota + 1 + idMask = iota + 1 ) // ErrBuildPath is returned in case of missing parameters within the build of the request. var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") -type Unfreeze struct { +type DeleteIpLocationDatabase struct { transport elastictransport.Interface headers http.Header @@ -55,33 +54,33 @@ type Unfreeze struct { paramSet int - index string + id string spanStarted bool instrument elastictransport.Instrumentation } -// NewUnfreeze type alias for index. -type NewUnfreeze func(index string) *Unfreeze +// NewDeleteIpLocationDatabase type alias for index. +type NewDeleteIpLocationDatabase func(id string) *DeleteIpLocationDatabase -// NewUnfreezeFunc returns a new instance of Unfreeze with the provided transport. +// NewDeleteIpLocationDatabaseFunc returns a new instance of DeleteIpLocationDatabase with the provided transport. // Used in the index of the library this allows to retrieve every apis in once place. -func NewUnfreezeFunc(tp elastictransport.Interface) NewUnfreeze { - return func(index string) *Unfreeze { +func NewDeleteIpLocationDatabaseFunc(tp elastictransport.Interface) NewDeleteIpLocationDatabase { + return func(id string) *DeleteIpLocationDatabase { n := New(tp) - n._index(index) + n._id(id) return n } } -// Unfreezes an index. +// Delete IP geolocation database configurations. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/unfreeze-index-api.html -func New(tp elastictransport.Interface) *Unfreeze { - r := &Unfreeze{ +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database +func New(tp elastictransport.Interface) *DeleteIpLocationDatabase { + r := &DeleteIpLocationDatabase{ transport: tp, values: make(url.Values), headers: make(http.Header), @@ -98,7 +97,7 @@ func New(tp elastictransport.Interface) *Unfreeze { // HttpRequest returns the http.Request object built from the // given parameters. -func (r *Unfreeze) HttpRequest(ctx context.Context) (*http.Request, error) { +func (r *DeleteIpLocationDatabase) HttpRequest(ctx context.Context) (*http.Request, error) { var path strings.Builder var method string var req *http.Request @@ -108,17 +107,21 @@ func (r *Unfreeze) HttpRequest(ctx context.Context) (*http.Request, error) { r.path.Scheme = "http" switch { - case r.paramSet == indexMask: + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") path.WriteString("/") if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.RecordPathPart(ctx, "index", r.index) + instrument.RecordPathPart(ctx, "id", r.id) } - path.WriteString(r.index) - path.WriteString("/") - path.WriteString("_unfreeze") + path.WriteString(r.id) - method = http.MethodPost + method = http.MethodDelete } r.path.Path = path.String() @@ -148,11 +151,11 @@ func (r *Unfreeze) HttpRequest(ctx context.Context) (*http.Request, error) { } // Perform runs the http.Request through the provided transport and returns an http.Response. -func (r Unfreeze) Perform(providedCtx context.Context) (*http.Response, error) { +func (r DeleteIpLocationDatabase) Perform(providedCtx context.Context) (*http.Response, error) { var ctx context.Context if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { if r.spanStarted == false { - ctx := instrument.Start(providedCtx, "indices.unfreeze") + ctx := instrument.Start(providedCtx, "ingest.delete_ip_location_database") defer instrument.Close(ctx) } } @@ -169,17 +172,17 @@ func (r Unfreeze) Perform(providedCtx context.Context) (*http.Response, error) { } if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.BeforeRequest(req, "indices.unfreeze") - if reader := instrument.RecordRequestBody(ctx, "indices.unfreeze", r.raw); reader != nil { + instrument.BeforeRequest(req, "ingest.delete_ip_location_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.delete_ip_location_database", r.raw); reader != nil { req.Body = reader } } res, err := r.transport.Perform(req) if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.AfterRequest(req, "elasticsearch", "indices.unfreeze") + instrument.AfterRequest(req, "elasticsearch", "ingest.delete_ip_location_database") } if err != nil { - localErr := fmt.Errorf("an error happened during the Unfreeze query execution: %w", err) + localErr := fmt.Errorf("an error happened during the DeleteIpLocationDatabase query execution: %w", err) if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, localErr) } @@ -189,12 +192,12 @@ func (r Unfreeze) Perform(providedCtx context.Context) (*http.Response, error) { return res, nil } -// Do runs the request through the transport, handle the response and returns a unfreeze.Response -func (r Unfreeze) Do(providedCtx context.Context) (*Response, error) { +// Do runs the request through the transport, handle the response and returns a deleteiplocationdatabase.Response +func (r DeleteIpLocationDatabase) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - ctx = instrument.Start(providedCtx, "indices.unfreeze") + ctx = instrument.Start(providedCtx, "ingest.delete_ip_location_database") defer instrument.Close(ctx) } if ctx == nil { @@ -245,11 +248,11 @@ func (r Unfreeze) Do(providedCtx context.Context) (*Response, error) { // IsSuccess allows to run a query with a context and retrieve the result as a boolean. // This only exists for endpoints without a request payload and allows for quick control flow. -func (r Unfreeze) IsSuccess(providedCtx context.Context) (bool, error) { +func (r DeleteIpLocationDatabase) IsSuccess(providedCtx context.Context) (bool, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - ctx = instrument.Start(providedCtx, "indices.unfreeze") + ctx = instrument.Start(providedCtx, "ingest.delete_ip_location_database") defer instrument.Close(ctx) } if ctx == nil { @@ -272,7 +275,7 @@ func (r Unfreeze) IsSuccess(providedCtx context.Context) (bool, error) { } if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the Unfreeze query execution, status code: %d", res.StatusCode) + err := fmt.Errorf("an error happened during the DeleteIpLocationDatabase query execution, status code: %d", res.StatusCode) if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } @@ -282,92 +285,48 @@ func (r Unfreeze) IsSuccess(providedCtx context.Context) (bool, error) { return false, nil } -// Header set a key, value pair in the Unfreeze headers map. -func (r *Unfreeze) Header(key, value string) *Unfreeze { +// Header set a key, value pair in the DeleteIpLocationDatabase headers map. +func (r *DeleteIpLocationDatabase) Header(key, value string) *DeleteIpLocationDatabase { r.headers.Set(key, value) return r } -// Index Identifier for the index. -// API Name: index -func (r *Unfreeze) _index(index string) *Unfreeze { - r.paramSet |= indexMask - r.index = index - - return r -} - -// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index -// alias, or `_all` value targets only missing or closed indices. -// This behavior applies even if the request targets other open indices. -// API name: allow_no_indices -func (r *Unfreeze) AllowNoIndices(allownoindices bool) *Unfreeze { - r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) - - return r -} - -// ExpandWildcards Type of index that wildcard patterns can match. -// If the request can target data streams, this argument determines whether -// wildcard expressions match hidden data streams. -// Supports comma-separated values, such as `open,hidden`. -// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -// API name: expand_wildcards -func (r *Unfreeze) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Unfreeze { - tmp := []string{} - for _, item := range expandwildcards { - tmp = append(tmp, item.String()) - } - r.values.Set("expand_wildcards", strings.Join(tmp, ",")) +// Id A comma-separated list of IP location database configurations. +// API Name: id +func (r *DeleteIpLocationDatabase) _id(id string) *DeleteIpLocationDatabase { + r.paramSet |= idMask + r.id = id return r } -// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed -// index. -// API name: ignore_unavailable -func (r *Unfreeze) IgnoreUnavailable(ignoreunavailable bool) *Unfreeze { - r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) - - return r -} - -// MasterTimeout Period to wait for a connection to the master node. +// MasterTimeout The period to wait for a connection to the master node. // If no response is received before the timeout expires, the request fails and // returns an error. +// A value of `-1` indicates that the request should never time out. // API name: master_timeout -func (r *Unfreeze) MasterTimeout(duration string) *Unfreeze { +func (r *DeleteIpLocationDatabase) MasterTimeout(duration string) *DeleteIpLocationDatabase { r.values.Set("master_timeout", duration) return r } -// Timeout Period to wait for a response. +// Timeout The period to wait for a response. // If no response is received before the timeout expires, the request fails and // returns an error. +// A value of `-1` indicates that the request should never time out. // API name: timeout -func (r *Unfreeze) Timeout(duration string) *Unfreeze { +func (r *DeleteIpLocationDatabase) Timeout(duration string) *DeleteIpLocationDatabase { r.values.Set("timeout", duration) return r } -// WaitForActiveShards The number of shard copies that must be active before proceeding with the -// operation. -// Set to `all` or any positive integer up to the total number of shards in the -// index (`number_of_replicas+1`). -// API name: wait_for_active_shards -func (r *Unfreeze) WaitForActiveShards(waitforactiveshards string) *Unfreeze { - r.values.Set("wait_for_active_shards", waitforactiveshards) - - return r -} - // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace -func (r *Unfreeze) ErrorTrace(errortrace bool) *Unfreeze { +func (r *DeleteIpLocationDatabase) ErrorTrace(errortrace bool) *DeleteIpLocationDatabase { r.values.Set("error_trace", strconv.FormatBool(errortrace)) return r @@ -376,7 +335,7 @@ func (r *Unfreeze) ErrorTrace(errortrace bool) *Unfreeze { // FilterPath Comma-separated list of filters in dot notation which reduce the response // returned by Elasticsearch. // API name: filter_path -func (r *Unfreeze) FilterPath(filterpaths ...string) *Unfreeze { +func (r *DeleteIpLocationDatabase) FilterPath(filterpaths ...string) *DeleteIpLocationDatabase { tmp := []string{} for _, item := range filterpaths { tmp = append(tmp, fmt.Sprintf("%v", item)) @@ -393,7 +352,7 @@ func (r *Unfreeze) FilterPath(filterpaths ...string) *Unfreeze { // consumed // only by machines. // API name: human -func (r *Unfreeze) Human(human bool) *Unfreeze { +func (r *DeleteIpLocationDatabase) Human(human bool) *DeleteIpLocationDatabase { r.values.Set("human", strconv.FormatBool(human)) return r @@ -402,7 +361,7 @@ func (r *Unfreeze) Human(human bool) *Unfreeze { // Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use // this option for debugging only. // API name: pretty -func (r *Unfreeze) Pretty(pretty bool) *Unfreeze { +func (r *DeleteIpLocationDatabase) Pretty(pretty bool) *DeleteIpLocationDatabase { r.values.Set("pretty", strconv.FormatBool(pretty)) return r diff --git a/typedapi/ingest/deleteiplocationdatabase/response.go b/typedapi/ingest/deleteiplocationdatabase/response.go new file mode 100644 index 0000000000..d095135cea --- /dev/null +++ b/typedapi/ingest/deleteiplocationdatabase/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package deleteiplocationdatabase + +// Response holds the response body struct for the package deleteiplocationdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/delete_ip_location_database/DeleteIpLocationDatabaseResponse.ts#L22-L24 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/ingest/deletepipeline/delete_pipeline.go b/typedapi/ingest/deletepipeline/delete_pipeline.go index 0f640e8d30..2aa66e2be4 100644 --- a/typedapi/ingest/deletepipeline/delete_pipeline.go +++ b/typedapi/ingest/deletepipeline/delete_pipeline.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes one or more existing ingest pipeline. +// Delete pipelines. +// Delete one or more ingest pipelines. package deletepipeline import ( @@ -76,9 +77,10 @@ func NewDeletePipelineFunc(tp elastictransport.Interface) NewDeletePipeline { } } -// Deletes one or more existing ingest pipeline. +// Delete pipelines. +// Delete one or more ingest pipelines. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-pipeline-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline func New(tp elastictransport.Interface) *DeletePipeline { r := &DeletePipeline{ transport: tp, diff --git a/typedapi/ingest/deletepipeline/response.go b/typedapi/ingest/deletepipeline/response.go index 152ba37a58..19afb08dfb 100644 --- a/typedapi/ingest/deletepipeline/response.go +++ b/typedapi/ingest/deletepipeline/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletepipeline // Response holds the response body struct for the package deletepipeline // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/delete_pipeline/DeletePipelineResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/delete_pipeline/DeletePipelineResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ingest/geoipstats/geo_ip_stats.go b/typedapi/ingest/geoipstats/geo_ip_stats.go index 09d8a9cb5c..e5f18acf40 100644 --- a/typedapi/ingest/geoipstats/geo_ip_stats.go +++ b/typedapi/ingest/geoipstats/geo_ip_stats.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Gets download statistics for GeoIP2 databases used with the geoip processor. +// Get GeoIP statistics. +// Get download statistics for GeoIP2 databases that are used with the GeoIP +// processor. package geoipstats import ( @@ -68,7 +70,9 @@ func NewGeoIpStatsFunc(tp elastictransport.Interface) NewGeoIpStats { } } -// Gets download statistics for GeoIP2 databases used with the geoip processor. +// Get GeoIP statistics. +// Get download statistics for GeoIP2 databases that are used with the GeoIP +// processor. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/geoip-processor.html func New(tp elastictransport.Interface) *GeoIpStats { diff --git a/typedapi/ingest/geoipstats/response.go b/typedapi/ingest/geoipstats/response.go index 4fd3ab154c..7746332b5a 100644 --- a/typedapi/ingest/geoipstats/response.go +++ b/typedapi/ingest/geoipstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package geoipstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package geoipstats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/geo_ip_stats/IngestGeoIpStatsResponse.ts#L24-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/geo_ip_stats/IngestGeoIpStatsResponse.ts#L24-L31 type Response struct { // Nodes Downloaded GeoIP2 databases for each node. diff --git a/typedapi/ingest/getgeoipdatabase/get_geoip_database.go b/typedapi/ingest/getgeoipdatabase/get_geoip_database.go index 07ed9f7b1c..d396892cbf 100644 --- a/typedapi/ingest/getgeoipdatabase/get_geoip_database.go +++ b/typedapi/ingest/getgeoipdatabase/get_geoip_database.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information about one or more geoip database configurations. +// Get GeoIP database configurations. +// +// Get information about one or more IP geolocation database configurations. package getgeoipdatabase import ( @@ -74,9 +76,11 @@ func NewGetGeoipDatabaseFunc(tp elastictransport.Interface) NewGetGeoipDatabase } } -// Returns information about one or more geoip database configurations. +// Get GeoIP database configurations. +// +// Get information about one or more IP geolocation database configurations. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-geoip-database-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-geoip-database func New(tp elastictransport.Interface) *GetGeoipDatabase { r := &GetGeoipDatabase{ transport: tp, @@ -299,7 +303,7 @@ func (r *GetGeoipDatabase) Header(key, value string) *GetGeoipDatabase { return r } -// Id Comma-separated list of database configuration IDs to retrieve. +// Id A comma-separated list of database configuration IDs to retrieve. // Wildcard (`*`) expressions are supported. // To get all database configurations, omit this parameter or use `*`. // API Name: id @@ -310,16 +314,6 @@ func (r *GetGeoipDatabase) Id(id string) *GetGeoipDatabase { return r } -// MasterTimeout Period to wait for a connection to the master node. -// If no response is received before the timeout expires, the request fails and -// returns an error. -// API name: master_timeout -func (r *GetGeoipDatabase) MasterTimeout(duration string) *GetGeoipDatabase { - r.values.Set("master_timeout", duration) - - return r -} - // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ingest/getgeoipdatabase/response.go b/typedapi/ingest/getgeoipdatabase/response.go index afdf91e31c..cf5b19869f 100644 --- a/typedapi/ingest/getgeoipdatabase/response.go +++ b/typedapi/ingest/getgeoipdatabase/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getgeoipdatabase @@ -26,9 +26,9 @@ import ( // Response holds the response body struct for the package getgeoipdatabase // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/get_geoip_database/GetGeoipDatabaseResponse.ts#L25-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/get_geoip_database/GetGeoipDatabaseResponse.ts#L25-L27 type Response struct { - Databases []types.DatabaseConfigurationMetadata `json:"databases"` + Databases []types.GeoipDatabaseConfigurationMetadata `json:"databases"` } // NewResponse returns a Response diff --git a/typedapi/ingest/getiplocationdatabase/get_ip_location_database.go b/typedapi/ingest/getiplocationdatabase/get_ip_location_database.go new file mode 100644 index 0000000000..ddf38d9931 --- /dev/null +++ b/typedapi/ingest/getiplocationdatabase/get_ip_location_database.go @@ -0,0 +1,366 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Get IP geolocation database configurations. +package getiplocationdatabase + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetIpLocationDatabase struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetIpLocationDatabase type alias for index. +type NewGetIpLocationDatabase func() *GetIpLocationDatabase + +// NewGetIpLocationDatabaseFunc returns a new instance of GetIpLocationDatabase with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetIpLocationDatabaseFunc(tp elastictransport.Interface) NewGetIpLocationDatabase { + return func() *GetIpLocationDatabase { + n := New(tp) + + return n + } +} + +// Get IP geolocation database configurations. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database +func New(tp elastictransport.Interface) *GetIpLocationDatabase { + r := &GetIpLocationDatabase{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetIpLocationDatabase) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + + method = http.MethodGet + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetIpLocationDatabase) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.get_ip_location_database") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.get_ip_location_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.get_ip_location_database", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.get_ip_location_database") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetIpLocationDatabase query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getiplocationdatabase.Response +func (r GetIpLocationDatabase) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.get_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetIpLocationDatabase) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.get_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetIpLocationDatabase query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetIpLocationDatabase headers map. +func (r *GetIpLocationDatabase) Header(key, value string) *GetIpLocationDatabase { + r.headers.Set(key, value) + + return r +} + +// Id Comma-separated list of database configuration IDs to retrieve. +// Wildcard (`*`) expressions are supported. +// To get all database configurations, omit this parameter or use `*`. +// API Name: id +func (r *GetIpLocationDatabase) Id(id string) *GetIpLocationDatabase { + r.paramSet |= idMask + r.id = id + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// A value of `-1` indicates that the request should never time out. +// API name: master_timeout +func (r *GetIpLocationDatabase) MasterTimeout(duration string) *GetIpLocationDatabase { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetIpLocationDatabase) ErrorTrace(errortrace bool) *GetIpLocationDatabase { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetIpLocationDatabase) FilterPath(filterpaths ...string) *GetIpLocationDatabase { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetIpLocationDatabase) Human(human bool) *GetIpLocationDatabase { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetIpLocationDatabase) Pretty(pretty bool) *GetIpLocationDatabase { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/inference/inference/response.go b/typedapi/ingest/getiplocationdatabase/response.go similarity index 57% rename from typedapi/inference/inference/response.go rename to typedapi/ingest/getiplocationdatabase/response.go index 261ac57acc..56439de8c4 100644 --- a/typedapi/inference/inference/response.go +++ b/typedapi/ingest/getiplocationdatabase/response.go @@ -16,23 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -package inference +package getiplocationdatabase import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) -// Response holds the response body struct for the package inference +// Response holds the response body struct for the package getiplocationdatabase // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/inference/InferenceResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/get_ip_location_database/GetIpLocationDatabaseResponse.ts#L24-L26 type Response struct { - Completion []types.CompletionResult `json:"completion,omitempty"` - Rerank []types.RankedDocument `json:"rerank,omitempty"` - SparseEmbedding []types.SparseEmbeddingResult `json:"sparse_embedding,omitempty"` - TextEmbedding []types.TextEmbeddingResult `json:"text_embedding,omitempty"` - TextEmbeddingBytes []types.TextEmbeddingByteResult `json:"text_embedding_bytes,omitempty"` + Databases []types.IpLocationDatabaseConfigurationMetadata `json:"databases"` } // NewResponse returns a Response diff --git a/typedapi/ingest/getpipeline/get_pipeline.go b/typedapi/ingest/getpipeline/get_pipeline.go index cc55e10340..894bc054c5 100644 --- a/typedapi/ingest/getpipeline/get_pipeline.go +++ b/typedapi/ingest/getpipeline/get_pipeline.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information about one or more ingest pipelines. +// Get pipelines. +// +// Get information about one or more ingest pipelines. // This API returns a local reference of the pipeline. package getpipeline @@ -75,10 +77,12 @@ func NewGetPipelineFunc(tp elastictransport.Interface) NewGetPipeline { } } -// Returns information about one or more ingest pipelines. +// Get pipelines. +// +// Get information about one or more ingest pipelines. // This API returns a local reference of the pipeline. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-pipeline-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline func New(tp elastictransport.Interface) *GetPipeline { r := &GetPipeline{ transport: tp, diff --git a/typedapi/ingest/getpipeline/response.go b/typedapi/ingest/getpipeline/response.go index 1ed91c4096..8beee52b20 100644 --- a/typedapi/ingest/getpipeline/response.go +++ b/typedapi/ingest/getpipeline/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getpipeline @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getpipeline // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/get_pipeline/GetPipelineResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/get_pipeline/GetPipelineResponse.ts#L23-L26 type Response map[string]types.IngestPipeline diff --git a/typedapi/ingest/processorgrok/processor_grok.go b/typedapi/ingest/processorgrok/processor_grok.go index 144ec670aa..594a126932 100644 --- a/typedapi/ingest/processorgrok/processor_grok.go +++ b/typedapi/ingest/processorgrok/processor_grok.go @@ -16,11 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Extracts structured fields out of a single text field within a document. -// You choose which field to extract matched fields from, as well as the grok -// pattern you expect will match. +// Run a grok processor. +// Extract structured fields out of a single text field within a document. +// You must choose which field to extract matched fields from, as well as the +// grok pattern you expect will match. // A grok pattern is like a regular expression that supports aliased expressions // that can be reused. package processorgrok @@ -72,9 +73,10 @@ func NewProcessorGrokFunc(tp elastictransport.Interface) NewProcessorGrok { } } -// Extracts structured fields out of a single text field within a document. -// You choose which field to extract matched fields from, as well as the grok -// pattern you expect will match. +// Run a grok processor. +// Extract structured fields out of a single text field within a document. +// You must choose which field to extract matched fields from, as well as the +// grok pattern you expect will match. // A grok pattern is like a regular expression that supports aliased expressions // that can be reused. // diff --git a/typedapi/ingest/processorgrok/response.go b/typedapi/ingest/processorgrok/response.go index b7f6ea4f16..b0fa78d021 100644 --- a/typedapi/ingest/processorgrok/response.go +++ b/typedapi/ingest/processorgrok/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package processorgrok // Response holds the response body struct for the package processorgrok // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/processor_grok/GrokProcessorPatternsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/processor_grok/GrokProcessorPatternsResponse.ts#L22-L24 type Response struct { Patterns map[string]string `json:"patterns"` } diff --git a/typedapi/ingest/putgeoipdatabase/put_geoip_database.go b/typedapi/ingest/putgeoipdatabase/put_geoip_database.go index 721e901e86..0e2e8f60b4 100644 --- a/typedapi/ingest/putgeoipdatabase/put_geoip_database.go +++ b/typedapi/ingest/putgeoipdatabase/put_geoip_database.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information about one or more geoip database configurations. +// Create or update a GeoIP database configuration. +// +// Refer to the create or update IP geolocation database configuration API. package putgeoipdatabase import ( @@ -81,9 +83,11 @@ func NewPutGeoipDatabaseFunc(tp elastictransport.Interface) NewPutGeoipDatabase } } -// Returns information about one or more geoip database configurations. +// Create or update a GeoIP database configuration. +// +// Refer to the create or update IP geolocation database configuration API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-geoip-database-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-geoip-database func New(tp elastictransport.Interface) *PutGeoipDatabase { r := &PutGeoipDatabase{ transport: tp, @@ -91,8 +95,6 @@ func New(tp elastictransport.Interface) *PutGeoipDatabase { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -380,22 +382,31 @@ func (r *PutGeoipDatabase) Pretty(pretty bool) *PutGeoipDatabase { return r } -// Maxmind The configuration necessary to identify which IP geolocation provider to use +// The configuration necessary to identify which IP geolocation provider to use // to download the database, as well as any provider-specific configuration // necessary for such downloading. // At present, the only supported provider is maxmind, and the maxmind provider // requires that an account_id (string) is configured. // API name: maxmind -func (r *PutGeoipDatabase) Maxmind(maxmind *types.Maxmind) *PutGeoipDatabase { +func (r *PutGeoipDatabase) Maxmind(maxmind types.MaxmindVariant) *PutGeoipDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Maxmind = *maxmind + r.req.Maxmind = *maxmind.MaxmindCaster() return r } -// Name The provider-assigned name of the IP geolocation database to download. +// The provider-assigned name of the IP geolocation database to download. // API name: name func (r *PutGeoipDatabase) Name(name string) *PutGeoipDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Name = name return r diff --git a/typedapi/ingest/putgeoipdatabase/request.go b/typedapi/ingest/putgeoipdatabase/request.go index 30355a8c7f..3f6374b72a 100644 --- a/typedapi/ingest/putgeoipdatabase/request.go +++ b/typedapi/ingest/putgeoipdatabase/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putgeoipdatabase @@ -32,7 +32,7 @@ import ( // Request holds the request body struct for the package putgeoipdatabase // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/put_geoip_database/PutGeoipDatabaseRequest.ts#L25-L57 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/put_geoip_database/PutGeoipDatabaseRequest.ts#L25-L66 type Request struct { // Maxmind The configuration necessary to identify which IP geolocation provider to use diff --git a/typedapi/ingest/putgeoipdatabase/response.go b/typedapi/ingest/putgeoipdatabase/response.go index bf32e9bf5b..411547fc85 100644 --- a/typedapi/ingest/putgeoipdatabase/response.go +++ b/typedapi/ingest/putgeoipdatabase/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putgeoipdatabase // Response holds the response body struct for the package putgeoipdatabase // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/put_geoip_database/PutGeoipDatabaseResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/put_geoip_database/PutGeoipDatabaseResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ingest/putiplocationdatabase/put_ip_location_database.go b/typedapi/ingest/putiplocationdatabase/put_ip_location_database.go new file mode 100644 index 0000000000..fbdab174cf --- /dev/null +++ b/typedapi/ingest/putiplocationdatabase/put_ip_location_database.go @@ -0,0 +1,438 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Create or update an IP geolocation database configuration. +package putiplocationdatabase + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutIpLocationDatabase struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutIpLocationDatabase type alias for index. +type NewPutIpLocationDatabase func(id string) *PutIpLocationDatabase + +// NewPutIpLocationDatabaseFunc returns a new instance of PutIpLocationDatabase with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutIpLocationDatabaseFunc(tp elastictransport.Interface) NewPutIpLocationDatabase { + return func(id string) *PutIpLocationDatabase { + n := New(tp) + + n._id(id) + + return n + } +} + +// Create or update an IP geolocation database configuration. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database +func New(tp elastictransport.Interface) *PutIpLocationDatabase { + r := &PutIpLocationDatabase{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutIpLocationDatabase) Raw(raw io.Reader) *PutIpLocationDatabase { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutIpLocationDatabase) Request(req *Request) *PutIpLocationDatabase { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutIpLocationDatabase) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutIpLocationDatabase: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutIpLocationDatabase) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.put_ip_location_database") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.put_ip_location_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.put_ip_location_database", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.put_ip_location_database") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutIpLocationDatabase query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putiplocationdatabase.Response +func (r PutIpLocationDatabase) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.put_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutIpLocationDatabase headers map. +func (r *PutIpLocationDatabase) Header(key, value string) *PutIpLocationDatabase { + r.headers.Set(key, value) + + return r +} + +// Id The database configuration identifier. +// API Name: id +func (r *PutIpLocationDatabase) _id(id string) *PutIpLocationDatabase { + r.paramSet |= idMask + r.id = id + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// A value of `-1` indicates that the request should never time out. +// API name: master_timeout +func (r *PutIpLocationDatabase) MasterTimeout(duration string) *PutIpLocationDatabase { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response from all relevant nodes in the cluster +// after updating the cluster metadata. +// If no response is received before the timeout expires, the cluster metadata +// update still applies but the response indicates that it was not completely +// acknowledged. +// A value of `-1` indicates that the request should never time out. +// API name: timeout +func (r *PutIpLocationDatabase) Timeout(duration string) *PutIpLocationDatabase { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutIpLocationDatabase) ErrorTrace(errortrace bool) *PutIpLocationDatabase { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutIpLocationDatabase) FilterPath(filterpaths ...string) *PutIpLocationDatabase { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutIpLocationDatabase) Human(human bool) *PutIpLocationDatabase { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutIpLocationDatabase) Pretty(pretty bool) *PutIpLocationDatabase { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: AdditionalDatabaseConfigurationProperty +// AdditionalDatabaseConfigurationProperty is a single key dictionnary. +// It will replace the current value on each call. +func (r *PutIpLocationDatabase) AdditionalDatabaseConfigurationProperty(key string, value json.RawMessage) *PutIpLocationDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + r.req.AdditionalDatabaseConfigurationProperty = tmp + return r +} + +// API name: ipinfo +func (r *PutIpLocationDatabase) Ipinfo(ipinfo types.IpinfoVariant) *PutIpLocationDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Ipinfo = ipinfo.IpinfoCaster() + + return r +} + +// API name: maxmind +func (r *PutIpLocationDatabase) Maxmind(maxmind types.MaxmindVariant) *PutIpLocationDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Maxmind = maxmind.MaxmindCaster() + + return r +} + +// The provider-assigned name of the IP geolocation database to download. +// API name: name +func (r *PutIpLocationDatabase) Name(name string) *PutIpLocationDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Name = name + + return r +} diff --git a/typedapi/ingest/putiplocationdatabase/request.go b/typedapi/ingest/putiplocationdatabase/request.go new file mode 100644 index 0000000000..f5e3f569ec --- /dev/null +++ b/typedapi/ingest/putiplocationdatabase/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package putiplocationdatabase + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package putiplocationdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/put_ip_location_database/PutIpLocationDatabaseRequest.ts#L25-L62 +type Request = types.DatabaseConfiguration + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewDatabaseConfiguration() + + return r +} diff --git a/typedapi/ingest/putiplocationdatabase/response.go b/typedapi/ingest/putiplocationdatabase/response.go new file mode 100644 index 0000000000..6d8be58f4e --- /dev/null +++ b/typedapi/ingest/putiplocationdatabase/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package putiplocationdatabase + +// Response holds the response body struct for the package putiplocationdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/put_ip_location_database/PutIpLocationDatabaseResponse.ts#L22-L24 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/ingest/putpipeline/put_pipeline.go b/typedapi/ingest/putpipeline/put_pipeline.go index da50b40e14..8621469ded 100644 --- a/typedapi/ingest/putpipeline/put_pipeline.go +++ b/typedapi/ingest/putpipeline/put_pipeline.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Creates or updates an ingest pipeline. +// Create or update a pipeline. // Changes made using this API take effect immediately. package putpipeline @@ -82,7 +82,7 @@ func NewPutPipelineFunc(tp elastictransport.Interface) NewPutPipeline { } } -// Creates or updates an ingest pipeline. +// Create or update a pipeline. // Changes made using this API take effect immediately. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest.html @@ -93,8 +93,6 @@ func New(tp elastictransport.Interface) *PutPipeline { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -387,62 +385,95 @@ func (r *PutPipeline) Pretty(pretty bool) *PutPipeline { return r } -// Deprecated Marks this ingest pipeline as deprecated. +// Marks this ingest pipeline as deprecated. // When a deprecated ingest pipeline is referenced as the default or final // pipeline when creating or updating a non-deprecated index template, // Elasticsearch will emit a deprecation warning. // API name: deprecated func (r *PutPipeline) Deprecated(deprecated bool) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Deprecated = &deprecated return r } -// Description Description of the ingest pipeline. +// Description of the ingest pipeline. // API name: description func (r *PutPipeline) Description(description string) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Meta_ Optional metadata about the ingest pipeline. May have any contents. This map +// Optional metadata about the ingest pipeline. May have any contents. This map // is not automatically generated by Elasticsearch. // API name: _meta -func (r *PutPipeline) Meta_(metadata types.Metadata) *PutPipeline { - r.req.Meta_ = metadata +func (r *PutPipeline) Meta_(metadata types.MetadataVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() return r } -// OnFailure Processors to run immediately after a processor failure. Each processor +// Processors to run immediately after a processor failure. Each processor // supports a processor-level `on_failure` value. If a processor without an // `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as // a fallback. The processors in this parameter run sequentially in the order // specified. Elasticsearch will not attempt to run the pipeline's remaining // processors. // API name: on_failure -func (r *PutPipeline) OnFailure(onfailures ...types.ProcessorContainer) *PutPipeline { - r.req.OnFailure = onfailures +func (r *PutPipeline) OnFailure(onfailures ...types.ProcessorContainerVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range onfailures { + r.req.OnFailure = append(r.req.OnFailure, *v.ProcessorContainerCaster()) + + } return r } -// Processors Processors used to perform transformations on documents before indexing. +// Processors used to perform transformations on documents before indexing. // Processors run sequentially in the order specified. // API name: processors -func (r *PutPipeline) Processors(processors ...types.ProcessorContainer) *PutPipeline { - r.req.Processors = processors +func (r *PutPipeline) Processors(processors ...types.ProcessorContainerVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range processors { + + r.req.Processors = append(r.req.Processors, *v.ProcessorContainerCaster()) + } return r } -// Version Version number used by external systems to track ingest pipelines. This +// Version number used by external systems to track ingest pipelines. This // parameter is intended for external systems only. Elasticsearch does not use // or validate pipeline version numbers. // API name: version func (r *PutPipeline) Version(versionnumber int64) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &versionnumber return r diff --git a/typedapi/ingest/putpipeline/request.go b/typedapi/ingest/putpipeline/request.go index b74df14142..330bae7f56 100644 --- a/typedapi/ingest/putpipeline/request.go +++ b/typedapi/ingest/putpipeline/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putpipeline @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putpipeline // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/put_pipeline/PutPipelineRequest.ts#L25-L83 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/put_pipeline/PutPipelineRequest.ts#L25-L90 type Request struct { // Deprecated Marks this ingest pipeline as deprecated. diff --git a/typedapi/ingest/putpipeline/response.go b/typedapi/ingest/putpipeline/response.go index f3249572fd..d7282dfc36 100644 --- a/typedapi/ingest/putpipeline/response.go +++ b/typedapi/ingest/putpipeline/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putpipeline // Response holds the response body struct for the package putpipeline // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/put_pipeline/PutPipelineResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/put_pipeline/PutPipelineResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ingest/simulate/request.go b/typedapi/ingest/simulate/request.go index 2d16da2964..a1be41b9d0 100644 --- a/typedapi/ingest/simulate/request.go +++ b/typedapi/ingest/simulate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package simulate @@ -29,13 +29,13 @@ import ( // Request holds the request body struct for the package simulate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/simulate/SimulatePipelineRequest.ts#L25-L57 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/simulate/SimulatePipelineRequest.ts#L25-L72 type Request struct { // Docs Sample documents to test in the pipeline. Docs []types.Document `json:"docs"` - // Pipeline Pipeline to test. - // If you don’t specify the `pipeline` request path parameter, this parameter is + // Pipeline The pipeline to test. + // If you don't specify the `pipeline` request path parameter, this parameter is // required. // If you specify both this and the request path parameter, the API only uses // the request path parameter. diff --git a/typedapi/ingest/simulate/response.go b/typedapi/ingest/simulate/response.go index ffc42fe9d9..0303de8954 100644 --- a/typedapi/ingest/simulate/response.go +++ b/typedapi/ingest/simulate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package simulate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package simulate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/simulate/SimulatePipelineResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/simulate/SimulatePipelineResponse.ts#L22-L24 type Response struct { Docs []types.SimulateDocumentResult `json:"docs"` } diff --git a/typedapi/ingest/simulate/simulate.go b/typedapi/ingest/simulate/simulate.go index 7e57611ac4..9b8c3a14a4 100644 --- a/typedapi/ingest/simulate/simulate.go +++ b/typedapi/ingest/simulate/simulate.go @@ -16,9 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Executes an ingest pipeline against a set of provided documents. +// Simulate a pipeline. +// +// Run an ingest pipeline against a set of provided documents. +// You can either specify an existing pipeline to use with the provided +// documents or supply a pipeline definition in the body of the request. package simulate import ( @@ -79,9 +83,13 @@ func NewSimulateFunc(tp elastictransport.Interface) NewSimulate { } } -// Executes an ingest pipeline against a set of provided documents. +// Simulate a pipeline. +// +// Run an ingest pipeline against a set of provided documents. +// You can either specify an existing pipeline to use with the provided +// documents or supply a pipeline definition in the body of the request. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/simulate-pipeline-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate func New(tp elastictransport.Interface) *Simulate { r := &Simulate{ transport: tp, @@ -89,8 +97,6 @@ func New(tp elastictransport.Interface) *Simulate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -315,8 +321,8 @@ func (r *Simulate) Header(key, value string) *Simulate { return r } -// Id Pipeline to test. -// If you don’t specify a `pipeline` in the request body, this parameter is +// Id The pipeline to test. +// If you don't specify a `pipeline` in the request body, this parameter is // required. // API Name: id func (r *Simulate) Id(id string) *Simulate { @@ -379,23 +385,34 @@ func (r *Simulate) Pretty(pretty bool) *Simulate { return r } -// Docs Sample documents to test in the pipeline. +// Sample documents to test in the pipeline. // API name: docs -func (r *Simulate) Docs(docs ...types.Document) *Simulate { - r.req.Docs = docs +func (r *Simulate) Docs(docs ...types.DocumentVariant) *Simulate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docs { + r.req.Docs = append(r.req.Docs, *v.DocumentCaster()) + + } return r } -// Pipeline Pipeline to test. -// If you don’t specify the `pipeline` request path parameter, this parameter is +// The pipeline to test. +// If you don't specify the `pipeline` request path parameter, this parameter is // required. // If you specify both this and the request path parameter, the API only uses // the request path parameter. // API name: pipeline -func (r *Simulate) Pipeline(pipeline *types.IngestPipeline) *Simulate { +func (r *Simulate) Pipeline(pipeline types.IngestPipelineVariant) *Simulate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Pipeline = pipeline + r.req.Pipeline = pipeline.IngestPipelineCaster() return r } diff --git a/typedapi/license/delete/delete.go b/typedapi/license/delete/delete.go index 3934ad5b27..473fc8ea07 100644 --- a/typedapi/license/delete/delete.go +++ b/typedapi/license/delete/delete.go @@ -16,9 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes licensing information for the cluster +// Delete the license. +// +// When the license expires, your subscription level reverts to Basic. +// +// If the operator privileges feature is enabled, only operator users can use +// this API. package delete import ( @@ -68,9 +73,14 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { } } -// Deletes licensing information for the cluster +// Delete the license. +// +// When the license expires, your subscription level reverts to Basic. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-license.html +// If the operator privileges feature is enabled, only operator users can use +// this API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-delete func New(tp elastictransport.Interface) *Delete { r := &Delete{ transport: tp, @@ -274,6 +284,23 @@ func (r *Delete) Header(key, value string) *Delete { return r } +// MasterTimeout The period to wait for a connection to the master node. +// API name: master_timeout +func (r *Delete) MasterTimeout(duration string) *Delete { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. If no response is received before the +// timeout expires, the request fails and returns an error. +// API name: timeout +func (r *Delete) Timeout(duration string) *Delete { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/license/delete/response.go b/typedapi/license/delete/response.go index 4ada80cdf8..ceda5cd9c2 100644 --- a/typedapi/license/delete/response.go +++ b/typedapi/license/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/license/delete/DeleteLicenseResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/license/delete/DeleteLicenseResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/license/get/get.go b/typedapi/license/get/get.go index 969f8799f3..8692713688 100644 --- a/typedapi/license/get/get.go +++ b/typedapi/license/get/get.go @@ -16,13 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get license information. -// Returns information about your Elastic license, including its type, its -// status, when it was issued, and when it expires. -// For more information about the different types of licenses, refer to [Elastic -// Stack subscriptions](https://www.elastic.co/subscriptions). +// +// Get information about your Elastic license including its type, its status, +// when it was issued, and when it expires. +// +// >info +// > If the master node is generating a new cluster state, the get license API +// may return a `404 Not Found` response. +// > If you receive an unexpected 404 response after cluster startup, wait a +// short period and retry the request. package get import ( @@ -73,12 +78,17 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { } // Get license information. -// Returns information about your Elastic license, including its type, its -// status, when it was issued, and when it expires. -// For more information about the different types of licenses, refer to [Elastic -// Stack subscriptions](https://www.elastic.co/subscriptions). // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-license.html +// Get information about your Elastic license including its type, its status, +// when it was issued, and when it expires. +// +// >info +// > If the master node is generating a new cluster state, the get license API +// may return a `404 Not Found` response. +// > If you receive an unexpected 404 response after cluster startup, wait a +// short period and retry the request. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get func New(tp elastictransport.Interface) *Get { r := &Get{ transport: tp, diff --git a/typedapi/license/get/response.go b/typedapi/license/get/response.go index c2e2d51c85..d2085c301f 100644 --- a/typedapi/license/get/response.go +++ b/typedapi/license/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package get @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/license/get/GetLicenseResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/license/get/GetLicenseResponse.ts#L22-L24 type Response struct { License types.LicenseInformation `json:"license"` } diff --git a/typedapi/license/getbasicstatus/get_basic_status.go b/typedapi/license/getbasicstatus/get_basic_status.go index 4b9ed48006..7b00300f68 100644 --- a/typedapi/license/getbasicstatus/get_basic_status.go +++ b/typedapi/license/getbasicstatus/get_basic_status.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves information about the status of the basic license. +// Get the basic license status. package getbasicstatus import ( @@ -68,9 +68,9 @@ func NewGetBasicStatusFunc(tp elastictransport.Interface) NewGetBasicStatus { } } -// Retrieves information about the status of the basic license. +// Get the basic license status. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-basic-status func New(tp elastictransport.Interface) *GetBasicStatus { r := &GetBasicStatus{ transport: tp, diff --git a/typedapi/license/getbasicstatus/response.go b/typedapi/license/getbasicstatus/response.go index 3054dfc378..5621438328 100644 --- a/typedapi/license/getbasicstatus/response.go +++ b/typedapi/license/getbasicstatus/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getbasicstatus // Response holds the response body struct for the package getbasicstatus // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/license/get_basic_status/GetBasicLicenseStatusResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/license/get_basic_status/GetBasicLicenseStatusResponse.ts#L20-L22 type Response struct { EligibleToStartBasic bool `json:"eligible_to_start_basic"` } diff --git a/typedapi/license/gettrialstatus/get_trial_status.go b/typedapi/license/gettrialstatus/get_trial_status.go index 153e2967ec..ee8d5d9a3d 100644 --- a/typedapi/license/gettrialstatus/get_trial_status.go +++ b/typedapi/license/gettrialstatus/get_trial_status.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves information about the status of the trial license. +// Get the trial status. package gettrialstatus import ( @@ -68,9 +68,9 @@ func NewGetTrialStatusFunc(tp elastictransport.Interface) NewGetTrialStatus { } } -// Retrieves information about the status of the trial license. +// Get the trial status. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trial-status.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status func New(tp elastictransport.Interface) *GetTrialStatus { r := &GetTrialStatus{ transport: tp, diff --git a/typedapi/license/gettrialstatus/response.go b/typedapi/license/gettrialstatus/response.go index 89faabf424..e96340c245 100644 --- a/typedapi/license/gettrialstatus/response.go +++ b/typedapi/license/gettrialstatus/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package gettrialstatus // Response holds the response body struct for the package gettrialstatus // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/license/get_trial_status/GetTrialLicenseStatusResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/license/get_trial_status/GetTrialLicenseStatusResponse.ts#L20-L22 type Response struct { EligibleToStartTrial bool `json:"eligible_to_start_trial"` } diff --git a/typedapi/license/post/post.go b/typedapi/license/post/post.go index b7bb86a1a9..05700b95b2 100644 --- a/typedapi/license/post/post.go +++ b/typedapi/license/post/post.go @@ -16,9 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Updates the license for the cluster. +// Update the license. +// +// You can update your license at runtime without shutting down your nodes. +// License updates take effect immediately. +// If the license you are installing does not support all of the features that +// were available with your previous license, however, you are notified in the +// response. +// You must then re-submit the API request with the acknowledge parameter set to +// true. +// +// NOTE: If Elasticsearch security features are enabled and you are installing a +// gold or higher license, you must enable TLS on the transport networking layer +// before you install the license. +// If the operator privileges feature is enabled, only operator users can use +// this API. package post import ( @@ -73,9 +87,23 @@ func NewPostFunc(tp elastictransport.Interface) NewPost { } } -// Updates the license for the cluster. +// Update the license. +// +// You can update your license at runtime without shutting down your nodes. +// License updates take effect immediately. +// If the license you are installing does not support all of the features that +// were available with your previous license, however, you are notified in the +// response. +// You must then re-submit the API request with the acknowledge parameter set to +// true. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-license.html +// NOTE: If Elasticsearch security features are enabled and you are installing a +// gold or higher license, you must enable TLS on the transport networking layer +// before you install the license. +// If the operator privileges feature is enabled, only operator users can use +// this API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post func New(tp elastictransport.Interface) *Post { r := &Post{ transport: tp, @@ -83,8 +111,6 @@ func New(tp elastictransport.Interface) *Post { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -298,6 +324,23 @@ func (r *Post) Acknowledge(acknowledge bool) *Post { return r } +// MasterTimeout The period to wait for a connection to the master node. +// API name: master_timeout +func (r *Post) MasterTimeout(duration string) *Post { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. If no response is received before the +// timeout expires, the request fails and returns an error. +// API name: timeout +func (r *Post) Timeout(duration string) *Post { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -343,17 +386,28 @@ func (r *Post) Pretty(pretty bool) *Post { } // API name: license -func (r *Post) License(license *types.License) *Post { +func (r *Post) License(license types.LicenseVariant) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.License = license + r.req.License = license.LicenseCaster() return r } -// Licenses A sequence of one or more JSON documents containing the license information. +// A sequence of one or more JSON documents containing the license information. // API name: licenses -func (r *Post) Licenses(licenses ...types.License) *Post { - r.req.Licenses = licenses +func (r *Post) Licenses(licenses ...types.LicenseVariant) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range licenses { + r.req.Licenses = append(r.req.Licenses, *v.LicenseCaster()) + + } return r } diff --git a/typedapi/license/post/request.go b/typedapi/license/post/request.go index ae599851c1..09f30a1841 100644 --- a/typedapi/license/post/request.go +++ b/typedapi/license/post/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package post @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package post // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/license/post/PostLicenseRequest.ts#L23-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/license/post/PostLicenseRequest.ts#L24-L70 type Request struct { License *types.License `json:"license,omitempty"` // Licenses A sequence of one or more JSON documents containing the license information. diff --git a/typedapi/license/post/response.go b/typedapi/license/post/response.go index 50edfa7631..262068648a 100644 --- a/typedapi/license/post/response.go +++ b/typedapi/license/post/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package post @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package post // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/license/post/PostLicenseResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/license/post/PostLicenseResponse.ts#L23-L29 type Response struct { Acknowledge *types.Acknowledgement `json:"acknowledge,omitempty"` Acknowledged bool `json:"acknowledged"` diff --git a/typedapi/license/poststartbasic/post_start_basic.go b/typedapi/license/poststartbasic/post_start_basic.go index bf94548bdd..a704f68674 100644 --- a/typedapi/license/poststartbasic/post_start_basic.go +++ b/typedapi/license/poststartbasic/post_start_basic.go @@ -16,15 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// The start basic API enables you to initiate an indefinite basic license, -// which gives access to all the basic features. If the basic license does not -// support all of the features that are available with your current license, -// however, you are notified in the response. You must then re-submit the API -// request with the acknowledge parameter set to true. -// To check the status of your basic license, use the following API: [Get basic -// status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Start a basic license. +// +// Start an indefinite basic license, which gives access to all the basic +// features. +// +// NOTE: In order to start a basic license, you must not currently have a basic +// license. +// +// If the basic license does not support all of the features that are available +// with your current license, however, you are notified in the response. +// You must then re-submit the API request with the `acknowledge` parameter set +// to `true`. +// +// To check the status of your basic license, use the get basic license API. package poststartbasic import ( @@ -74,15 +81,22 @@ func NewPostStartBasicFunc(tp elastictransport.Interface) NewPostStartBasic { } } -// The start basic API enables you to initiate an indefinite basic license, -// which gives access to all the basic features. If the basic license does not -// support all of the features that are available with your current license, -// however, you are notified in the response. You must then re-submit the API -// request with the acknowledge parameter set to true. -// To check the status of your basic license, use the following API: [Get basic -// status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). +// Start a basic license. +// +// Start an indefinite basic license, which gives access to all the basic +// features. +// +// NOTE: In order to start a basic license, you must not currently have a basic +// license. +// +// If the basic license does not support all of the features that are available +// with your current license, however, you are notified in the response. +// You must then re-submit the API request with the `acknowledge` parameter set +// to `true`. +// +// To check the status of your basic license, use the get basic license API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/start-basic.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic func New(tp elastictransport.Interface) *PostStartBasic { r := &PostStartBasic{ transport: tp, @@ -296,6 +310,23 @@ func (r *PostStartBasic) Acknowledge(acknowledge bool) *PostStartBasic { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *PostStartBasic) MasterTimeout(duration string) *PostStartBasic { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *PostStartBasic) Timeout(duration string) *PostStartBasic { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/license/poststartbasic/response.go b/typedapi/license/poststartbasic/response.go index 9967d2aff4..8a744b047d 100644 --- a/typedapi/license/poststartbasic/response.go +++ b/typedapi/license/poststartbasic/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package poststartbasic @@ -33,7 +33,7 @@ import ( // Response holds the response body struct for the package poststartbasic // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/license/post_start_basic/StartBasicLicenseResponse.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/license/post_start_basic/StartBasicLicenseResponse.ts#L23-L31 type Response struct { Acknowledge map[string][]string `json:"acknowledge,omitempty"` Acknowledged bool `json:"acknowledged"` diff --git a/typedapi/license/poststarttrial/post_start_trial.go b/typedapi/license/poststarttrial/post_start_trial.go index 6d756ff180..c629706c13 100644 --- a/typedapi/license/poststarttrial/post_start_trial.go +++ b/typedapi/license/poststarttrial/post_start_trial.go @@ -16,10 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// The start trial API enables you to start a 30-day trial, which gives access -// to all subscription features. +// Start a trial. +// Start a 30-day trial, which gives access to all subscription features. +// +// NOTE: You are allowed to start a trial only if your cluster has not already +// activated a trial for the current major product version. +// For example, if you have already activated a trial for v8.0, you cannot start +// a new trial until v9.0. You can, however, request an extended trial at +// https://www.elastic.co/trialextension. +// +// To check the status of your trial, use the get trial status API. package poststarttrial import ( @@ -69,10 +77,18 @@ func NewPostStartTrialFunc(tp elastictransport.Interface) NewPostStartTrial { } } -// The start trial API enables you to start a 30-day trial, which gives access -// to all subscription features. +// Start a trial. +// Start a 30-day trial, which gives access to all subscription features. +// +// NOTE: You are allowed to start a trial only if your cluster has not already +// activated a trial for the current major product version. +// For example, if you have already activated a trial for v8.0, you cannot start +// a new trial until v9.0. You can, however, request an extended trial at +// https://www.elastic.co/trialextension. +// +// To check the status of your trial, use the get trial status API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/start-trial.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial func New(tp elastictransport.Interface) *PostStartTrial { r := &PostStartTrial{ transport: tp, @@ -293,6 +309,14 @@ func (r *PostStartTrial) TypeQueryString(typequerystring string) *PostStartTrial return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *PostStartTrial) MasterTimeout(duration string) *PostStartTrial { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/license/poststarttrial/response.go b/typedapi/license/poststarttrial/response.go index 01a9acf457..a34a7daebc 100644 --- a/typedapi/license/poststarttrial/response.go +++ b/typedapi/license/poststarttrial/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package poststarttrial @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package poststarttrial // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/license/post_start_trial/StartTrialLicenseResponse.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/license/post_start_trial/StartTrialLicenseResponse.ts#L22-L29 type Response struct { Acknowledged bool `json:"acknowledged"` ErrorMessage *string `json:"error_message,omitempty"` diff --git a/typedapi/logstash/deletepipeline/delete_pipeline.go b/typedapi/logstash/deletepipeline/delete_pipeline.go index 4d95ccfe60..095c74b3e2 100644 --- a/typedapi/logstash/deletepipeline/delete_pipeline.go +++ b/typedapi/logstash/deletepipeline/delete_pipeline.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes a pipeline used for Logstash Central Management. +// Delete a Logstash pipeline. +// Delete a pipeline that is used for Logstash Central Management. +// If the request succeeds, you receive an empty response with an appropriate +// status code. package deletepipeline import ( @@ -74,9 +77,12 @@ func NewDeletePipelineFunc(tp elastictransport.Interface) NewDeletePipeline { } } -// Deletes a pipeline used for Logstash Central Management. +// Delete a Logstash pipeline. +// Delete a pipeline that is used for Logstash Central Management. +// If the request succeeds, you receive an empty response with an appropriate +// status code. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-delete-pipeline.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline func New(tp elastictransport.Interface) *DeletePipeline { r := &DeletePipeline{ transport: tp, @@ -239,7 +245,7 @@ func (r *DeletePipeline) Header(key, value string) *DeletePipeline { return r } -// Id Identifier for the pipeline. +// Id An identifier for the pipeline. // API Name: id func (r *DeletePipeline) _id(id string) *DeletePipeline { r.paramSet |= idMask diff --git a/typedapi/logstash/getpipeline/get_pipeline.go b/typedapi/logstash/getpipeline/get_pipeline.go index b786803dfa..f82a5767b0 100644 --- a/typedapi/logstash/getpipeline/get_pipeline.go +++ b/typedapi/logstash/getpipeline/get_pipeline.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves pipelines used for Logstash Central Management. +// Get Logstash pipelines. +// Get pipelines that are used for Logstash Central Management. package getpipeline import ( @@ -74,9 +75,10 @@ func NewGetPipelineFunc(tp elastictransport.Interface) NewGetPipeline { } } -// Retrieves pipelines used for Logstash Central Management. +// Get Logstash pipelines. +// Get pipelines that are used for Logstash Central Management. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-get-pipeline.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline func New(tp elastictransport.Interface) *GetPipeline { r := &GetPipeline{ transport: tp, @@ -295,7 +297,7 @@ func (r *GetPipeline) Header(key, value string) *GetPipeline { return r } -// Id Comma-separated list of pipeline identifiers. +// Id A comma-separated list of pipeline identifiers. // API Name: id func (r *GetPipeline) Id(id string) *GetPipeline { r.paramSet |= idMask diff --git a/typedapi/logstash/getpipeline/response.go b/typedapi/logstash/getpipeline/response.go index c47f52f7dc..07319722a9 100644 --- a/typedapi/logstash/getpipeline/response.go +++ b/typedapi/logstash/getpipeline/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getpipeline @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getpipeline // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/logstash/get_pipeline/LogstashGetPipelineResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/logstash/get_pipeline/LogstashGetPipelineResponse.ts#L24-L27 type Response map[string]types.LogstashPipeline diff --git a/typedapi/logstash/putpipeline/put_pipeline.go b/typedapi/logstash/putpipeline/put_pipeline.go index c349cb3fbc..5182461430 100644 --- a/typedapi/logstash/putpipeline/put_pipeline.go +++ b/typedapi/logstash/putpipeline/put_pipeline.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Creates or updates a pipeline used for Logstash Central Management. +// Create or update a Logstash pipeline. +// +// Create a pipeline that is used for Logstash Central Management. +// If the specified pipeline exists, it is replaced. package putpipeline import ( @@ -81,9 +84,12 @@ func NewPutPipelineFunc(tp elastictransport.Interface) NewPutPipeline { } } -// Creates or updates a pipeline used for Logstash Central Management. +// Create or update a Logstash pipeline. +// +// Create a pipeline that is used for Logstash Central Management. +// If the specified pipeline exists, it is replaced. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-put-pipeline.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline func New(tp elastictransport.Interface) *PutPipeline { r := &PutPipeline{ transport: tp, @@ -91,8 +97,6 @@ func New(tp elastictransport.Interface) *PutPipeline { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -252,7 +256,7 @@ func (r *PutPipeline) Header(key, value string) *PutPipeline { return r } -// Id Identifier for the pipeline. +// Id An identifier for the pipeline. // API Name: id func (r *PutPipeline) _id(id string) *PutPipeline { r.paramSet |= idMask @@ -305,58 +309,82 @@ func (r *PutPipeline) Pretty(pretty bool) *PutPipeline { return r } -// Description Description of the pipeline. +// A description of the pipeline. // This description is not used by Elasticsearch or Logstash. // API name: description func (r *PutPipeline) Description(description string) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = description return r } -// LastModified Date the pipeline was last updated. -// Must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. +// The date the pipeline was last updated. +// It must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. // API name: last_modified -func (r *PutPipeline) LastModified(datetime types.DateTime) *PutPipeline { - r.req.LastModified = datetime +func (r *PutPipeline) LastModified(datetime types.DateTimeVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastModified = *datetime.DateTimeCaster() return r } -// Pipeline Configuration for the pipeline. +// The configuration for the pipeline. // API name: pipeline func (r *PutPipeline) Pipeline(pipeline string) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Pipeline = pipeline return r } -// PipelineMetadata Optional metadata about the pipeline. -// May have any contents. +// Optional metadata about the pipeline, which can have any contents. // This metadata is not generated or used by Elasticsearch or Logstash. // API name: pipeline_metadata -func (r *PutPipeline) PipelineMetadata(pipelinemetadata *types.PipelineMetadata) *PutPipeline { +func (r *PutPipeline) PipelineMetadata(pipelinemetadata types.PipelineMetadataVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.PipelineMetadata = *pipelinemetadata + r.req.PipelineMetadata = *pipelinemetadata.PipelineMetadataCaster() return r } -// PipelineSettings Settings for the pipeline. -// Supports only flat keys in dot notation. +// Settings for the pipeline. +// It supports only flat keys in dot notation. // API name: pipeline_settings -func (r *PutPipeline) PipelineSettings(pipelinesettings *types.PipelineSettings) *PutPipeline { +func (r *PutPipeline) PipelineSettings(pipelinesettings types.PipelineSettingsVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.PipelineSettings = *pipelinesettings + r.req.PipelineSettings = *pipelinesettings.PipelineSettingsCaster() return r } -// Username User who last updated the pipeline. +// The user who last updated the pipeline. // API name: username func (r *PutPipeline) Username(username string) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Username = username diff --git a/typedapi/logstash/putpipeline/request.go b/typedapi/logstash/putpipeline/request.go index 1a26c566f6..18ae984f4a 100644 --- a/typedapi/logstash/putpipeline/request.go +++ b/typedapi/logstash/putpipeline/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putpipeline @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package putpipeline // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/logstash/put_pipeline/LogstashPutPipelineRequest.ts#L24-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/logstash/put_pipeline/LogstashPutPipelineRequest.ts#L24-L51 type Request = types.LogstashPipeline // NewRequest returns a Request diff --git a/typedapi/migration/deprecations/deprecations.go b/typedapi/migration/deprecations/deprecations.go index ad0fa5792d..b89c47af36 100644 --- a/typedapi/migration/deprecations/deprecations.go +++ b/typedapi/migration/deprecations/deprecations.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves information about different cluster, node, and index level settings -// that use deprecated features that will be removed or changed in the next -// major version. +// Get deprecation information. +// Get information about different cluster, node, and index level settings that +// use deprecated features that will be removed or changed in the next major +// version. +// +// TIP: This APIs is designed for indirect use by the Upgrade Assistant. +// You are strongly recommended to use the Upgrade Assistant. package deprecations import ( @@ -76,11 +80,15 @@ func NewDeprecationsFunc(tp elastictransport.Interface) NewDeprecations { } } -// Retrieves information about different cluster, node, and index level settings -// that use deprecated features that will be removed or changed in the next -// major version. +// Get deprecation information. +// Get information about different cluster, node, and index level settings that +// use deprecated features that will be removed or changed in the next major +// version. +// +// TIP: This APIs is designed for indirect use by the Upgrade Assistant. +// You are strongly recommended to use the Upgrade Assistant. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-deprecation.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations func New(tp elastictransport.Interface) *Deprecations { r := &Deprecations{ transport: tp, diff --git a/typedapi/migration/deprecations/response.go b/typedapi/migration/deprecations/response.go index 48580fdf69..b7e7899d1f 100644 --- a/typedapi/migration/deprecations/response.go +++ b/typedapi/migration/deprecations/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deprecations @@ -26,18 +26,39 @@ import ( // Response holds the response body struct for the package deprecations // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/migration/deprecations/DeprecationInfoResponse.ts#L23-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/migration/deprecations/DeprecationInfoResponse.ts#L23-L54 type Response struct { + + // ClusterSettings Cluster-level deprecation warnings. ClusterSettings []types.Deprecation `json:"cluster_settings"` - IndexSettings map[string][]types.Deprecation `json:"index_settings"` - MlSettings []types.Deprecation `json:"ml_settings"` - NodeSettings []types.Deprecation `json:"node_settings"` + DataStreams map[string][]types.Deprecation `json:"data_streams"` + // IlmPolicies ILM policy warnings are sectioned off per policy. + IlmPolicies map[string][]types.Deprecation `json:"ilm_policies"` + // IndexSettings Index warnings are sectioned off per index and can be filtered using an + // index-pattern in the query. + // This section includes warnings for the backing indices of data streams + // specified in the request path. + IndexSettings map[string][]types.Deprecation `json:"index_settings"` + // MlSettings Machine learning-related deprecation warnings. + MlSettings []types.Deprecation `json:"ml_settings"` + // NodeSettings Node-level deprecation warnings. + // Since only a subset of your nodes might incorporate these settings, it is + // important to read the details section for more information about which nodes + // are affected. + NodeSettings []types.Deprecation `json:"node_settings"` + // Templates Template warnings are sectioned off per template and include deprecations for + // both component templates and + // index templates. + Templates map[string][]types.Deprecation `json:"templates"` } // NewResponse returns a Response func NewResponse() *Response { r := &Response{ + DataStreams: make(map[string][]types.Deprecation, 0), + IlmPolicies: make(map[string][]types.Deprecation, 0), IndexSettings: make(map[string][]types.Deprecation, 0), + Templates: make(map[string][]types.Deprecation, 0), } return r } diff --git a/typedapi/migration/getfeatureupgradestatus/get_feature_upgrade_status.go b/typedapi/migration/getfeatureupgradestatus/get_feature_upgrade_status.go index 58117f2cef..161ffbb77e 100644 --- a/typedapi/migration/getfeatureupgradestatus/get_feature_upgrade_status.go +++ b/typedapi/migration/getfeatureupgradestatus/get_feature_upgrade_status.go @@ -16,9 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Find out whether system features need to be upgraded or not +// Get feature migration information. +// Version upgrades sometimes require changes to how features store +// configuration information and data in system indices. +// Check which features need to be migrated and the status of any migrations +// that are in progress. +// +// TIP: This API is designed for indirect use by the Upgrade Assistant. +// You are strongly recommended to use the Upgrade Assistant. package getfeatureupgradestatus import ( @@ -68,9 +75,16 @@ func NewGetFeatureUpgradeStatusFunc(tp elastictransport.Interface) NewGetFeature } } -// Find out whether system features need to be upgraded or not +// Get feature migration information. +// Version upgrades sometimes require changes to how features store +// configuration information and data in system indices. +// Check which features need to be migrated and the status of any migrations +// that are in progress. +// +// TIP: This API is designed for indirect use by the Upgrade Assistant. +// You are strongly recommended to use the Upgrade Assistant. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-feature-upgrade.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status func New(tp elastictransport.Interface) *GetFeatureUpgradeStatus { r := &GetFeatureUpgradeStatus{ transport: tp, diff --git a/typedapi/migration/getfeatureupgradestatus/response.go b/typedapi/migration/getfeatureupgradestatus/response.go index 39d909356c..844f8f35db 100644 --- a/typedapi/migration/getfeatureupgradestatus/response.go +++ b/typedapi/migration/getfeatureupgradestatus/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getfeatureupgradestatus @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package getfeatureupgradestatus // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L23-L28 type Response struct { Features []types.GetMigrationFeature `json:"features"` MigrationStatus migrationstatus.MigrationStatus `json:"migration_status"` diff --git a/typedapi/migration/postfeatureupgrade/post_feature_upgrade.go b/typedapi/migration/postfeatureupgrade/post_feature_upgrade.go index 13596fcad8..dc9ca3e92d 100644 --- a/typedapi/migration/postfeatureupgrade/post_feature_upgrade.go +++ b/typedapi/migration/postfeatureupgrade/post_feature_upgrade.go @@ -16,9 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Begin upgrades for system features +// Start the feature migration. +// Version upgrades sometimes require changes to how features store +// configuration information and data in system indices. +// This API starts the automatic migration process. +// +// Some functionality might be temporarily unavailable during the migration +// process. +// +// TIP: The API is designed for indirect use by the Upgrade Assistant. We +// strongly recommend you use the Upgrade Assistant. package postfeatureupgrade import ( @@ -68,9 +77,18 @@ func NewPostFeatureUpgradeFunc(tp elastictransport.Interface) NewPostFeatureUpgr } } -// Begin upgrades for system features +// Start the feature migration. +// Version upgrades sometimes require changes to how features store +// configuration information and data in system indices. +// This API starts the automatic migration process. +// +// Some functionality might be temporarily unavailable during the migration +// process. +// +// TIP: The API is designed for indirect use by the Upgrade Assistant. We +// strongly recommend you use the Upgrade Assistant. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-feature-upgrade.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status func New(tp elastictransport.Interface) *PostFeatureUpgrade { r := &PostFeatureUpgrade{ transport: tp, diff --git a/typedapi/migration/postfeatureupgrade/response.go b/typedapi/migration/postfeatureupgrade/response.go index f5b0c90cef..5117dd665a 100644 --- a/typedapi/migration/postfeatureupgrade/response.go +++ b/typedapi/migration/postfeatureupgrade/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package postfeatureupgrade @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package postfeatureupgrade // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L20-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L20-L25 type Response struct { Accepted bool `json:"accepted"` Features []types.PostMigrationFeature `json:"features"` diff --git a/typedapi/ml/cleartrainedmodeldeploymentcache/clear_trained_model_deployment_cache.go b/typedapi/ml/cleartrainedmodeldeploymentcache/clear_trained_model_deployment_cache.go index 8f211bad92..dfdbd76b47 100644 --- a/typedapi/ml/cleartrainedmodeldeploymentcache/clear_trained_model_deployment_cache.go +++ b/typedapi/ml/cleartrainedmodeldeploymentcache/clear_trained_model_deployment_cache.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Clear trained model deployment cache. +// // Cache will be cleared on all nodes where the trained model is assigned. // A trained model deployment may have an inference cache enabled. // As requests are handled by each allocated node, their responses may be cached @@ -82,13 +83,14 @@ func NewClearTrainedModelDeploymentCacheFunc(tp elastictransport.Interface) NewC } // Clear trained model deployment cache. +// // Cache will be cleared on all nodes where the trained model is assigned. // A trained model deployment may have an inference cache enabled. // As requests are handled by each allocated node, their responses may be cached // on that individual node. // Calling this API clears the caches without restarting the deployment. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-trained-model-deployment-cache.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-clear-trained-model-deployment-cache func New(tp elastictransport.Interface) *ClearTrainedModelDeploymentCache { r := &ClearTrainedModelDeploymentCache{ transport: tp, diff --git a/typedapi/ml/cleartrainedmodeldeploymentcache/response.go b/typedapi/ml/cleartrainedmodeldeploymentcache/response.go index 28f60257ab..6ab5169804 100644 --- a/typedapi/ml/cleartrainedmodeldeploymentcache/response.go +++ b/typedapi/ml/cleartrainedmodeldeploymentcache/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package cleartrainedmodeldeploymentcache // Response holds the response body struct for the package cleartrainedmodeldeploymentcache // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/clear_trained_model_deployment_cache/MlClearTrainedModelDeploymentCacheResponse.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/clear_trained_model_deployment_cache/MlClearTrainedModelDeploymentCacheResponse.ts#L20-L24 type Response struct { Cleared bool `json:"cleared"` } diff --git a/typedapi/ml/closejob/close_job.go b/typedapi/ml/closejob/close_job.go index be76ceecfa..222c3b91f9 100644 --- a/typedapi/ml/closejob/close_job.go +++ b/typedapi/ml/closejob/close_job.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Close anomaly detection jobs. +// // A job can be opened and closed multiple times throughout its lifecycle. A // closed job cannot receive data or perform analysis operations, but you can // still explore and navigate results. @@ -98,6 +99,7 @@ func NewCloseJobFunc(tp elastictransport.Interface) NewCloseJob { } // Close anomaly detection jobs. +// // A job can be opened and closed multiple times throughout its lifecycle. A // closed job cannot receive data or perform analysis operations, but you can // still explore and navigate results. @@ -115,7 +117,7 @@ func NewCloseJobFunc(tp elastictransport.Interface) NewCloseJob { // When a datafeed that has a specified end date stops, it automatically closes // its associated job. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job func New(tp elastictransport.Interface) *CloseJob { r := &CloseJob{ transport: tp, @@ -123,8 +125,6 @@ func New(tp elastictransport.Interface) *CloseJob { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -397,26 +397,41 @@ func (r *CloseJob) Pretty(pretty bool) *CloseJob { return r } -// AllowNoMatch Refer to the description for the `allow_no_match` query parameter. +// Refer to the description for the `allow_no_match` query parameter. // API name: allow_no_match func (r *CloseJob) AllowNoMatch(allownomatch bool) *CloseJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowNoMatch = &allownomatch return r } -// Force Refer to the descriptiion for the `force` query parameter. +// Refer to the descriptiion for the `force` query parameter. // API name: force func (r *CloseJob) Force(force bool) *CloseJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Force = &force return r } -// Timeout Refer to the description for the `timeout` query parameter. +// Refer to the description for the `timeout` query parameter. // API name: timeout -func (r *CloseJob) Timeout(duration types.Duration) *CloseJob { - r.req.Timeout = duration +func (r *CloseJob) Timeout(duration types.DurationVariant) *CloseJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() return r } diff --git a/typedapi/ml/closejob/request.go b/typedapi/ml/closejob/request.go index a29bec0e9a..9eab7a4148 100644 --- a/typedapi/ml/closejob/request.go +++ b/typedapi/ml/closejob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package closejob @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package closejob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/close_job/MlCloseJobRequest.ts#L24-L78 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/close_job/MlCloseJobRequest.ts#L24-L85 type Request struct { // AllowNoMatch Refer to the description for the `allow_no_match` query parameter. diff --git a/typedapi/ml/closejob/response.go b/typedapi/ml/closejob/response.go index c17218217f..563e56ad31 100644 --- a/typedapi/ml/closejob/response.go +++ b/typedapi/ml/closejob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package closejob // Response holds the response body struct for the package closejob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/close_job/MlCloseJobResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/close_job/MlCloseJobResponse.ts#L20-L22 type Response struct { Closed bool `json:"closed"` } diff --git a/typedapi/ml/deletecalendar/delete_calendar.go b/typedapi/ml/deletecalendar/delete_calendar.go index 20e110b7f6..7d1ba31286 100644 --- a/typedapi/ml/deletecalendar/delete_calendar.go +++ b/typedapi/ml/deletecalendar/delete_calendar.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete a calendar. -// Removes all scheduled events from a calendar, then deletes it. +// +// Remove all scheduled events from a calendar, then delete it. package deletecalendar import ( @@ -78,9 +79,10 @@ func NewDeleteCalendarFunc(tp elastictransport.Interface) NewDeleteCalendar { } // Delete a calendar. -// Removes all scheduled events from a calendar, then deletes it. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar.html +// Remove all scheduled events from a calendar, then delete it. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar func New(tp elastictransport.Interface) *DeleteCalendar { r := &DeleteCalendar{ transport: tp, diff --git a/typedapi/ml/deletecalendar/response.go b/typedapi/ml/deletecalendar/response.go index 6544b8124f..f6896d0c71 100644 --- a/typedapi/ml/deletecalendar/response.go +++ b/typedapi/ml/deletecalendar/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletecalendar // Response holds the response body struct for the package deletecalendar // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/delete_calendar/MlDeleteCalendarResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/delete_calendar/MlDeleteCalendarResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deletecalendarevent/delete_calendar_event.go b/typedapi/ml/deletecalendarevent/delete_calendar_event.go index 93177575c0..eed1e59663 100644 --- a/typedapi/ml/deletecalendarevent/delete_calendar_event.go +++ b/typedapi/ml/deletecalendarevent/delete_calendar_event.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete events from a calendar. package deletecalendarevent @@ -83,7 +83,7 @@ func NewDeleteCalendarEventFunc(tp elastictransport.Interface) NewDeleteCalendar // Delete events from a calendar. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar-event.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-event func New(tp elastictransport.Interface) *DeleteCalendarEvent { r := &DeleteCalendarEvent{ transport: tp, diff --git a/typedapi/ml/deletecalendarevent/response.go b/typedapi/ml/deletecalendarevent/response.go index 8458843ba5..929730073c 100644 --- a/typedapi/ml/deletecalendarevent/response.go +++ b/typedapi/ml/deletecalendarevent/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletecalendarevent // Response holds the response body struct for the package deletecalendarevent // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/delete_calendar_event/MlDeleteCalendarEventResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/delete_calendar_event/MlDeleteCalendarEventResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deletecalendarjob/delete_calendar_job.go b/typedapi/ml/deletecalendarjob/delete_calendar_job.go index 1c8e1e7bd7..b37186e388 100644 --- a/typedapi/ml/deletecalendarjob/delete_calendar_job.go +++ b/typedapi/ml/deletecalendarjob/delete_calendar_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete anomaly jobs from a calendar. package deletecalendarjob @@ -83,7 +83,7 @@ func NewDeleteCalendarJobFunc(tp elastictransport.Interface) NewDeleteCalendarJo // Delete anomaly jobs from a calendar. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar-job.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job func New(tp elastictransport.Interface) *DeleteCalendarJob { r := &DeleteCalendarJob{ transport: tp, diff --git a/typedapi/ml/deletecalendarjob/response.go b/typedapi/ml/deletecalendarjob/response.go index 1e93e73dac..92d874aab0 100644 --- a/typedapi/ml/deletecalendarjob/response.go +++ b/typedapi/ml/deletecalendarjob/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletecalendarjob @@ -31,7 +31,7 @@ import ( // Response holds the response body struct for the package deletecalendarjob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/delete_calendar_job/MlDeleteCalendarJobResponse.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/delete_calendar_job/MlDeleteCalendarJobResponse.ts#L22-L31 type Response struct { // CalendarId A string that uniquely identifies a calendar. diff --git a/typedapi/ml/deletedatafeed/delete_datafeed.go b/typedapi/ml/deletedatafeed/delete_datafeed.go index 5edb5715d5..ad17e7afd4 100644 --- a/typedapi/ml/deletedatafeed/delete_datafeed.go +++ b/typedapi/ml/deletedatafeed/delete_datafeed.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete a datafeed. package deletedatafeed @@ -78,7 +78,7 @@ func NewDeleteDatafeedFunc(tp elastictransport.Interface) NewDeleteDatafeed { // Delete a datafeed. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed func New(tp elastictransport.Interface) *DeleteDatafeed { r := &DeleteDatafeed{ transport: tp, diff --git a/typedapi/ml/deletedatafeed/response.go b/typedapi/ml/deletedatafeed/response.go index ef4a322451..a506e7fefe 100644 --- a/typedapi/ml/deletedatafeed/response.go +++ b/typedapi/ml/deletedatafeed/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletedatafeed // Response holds the response body struct for the package deletedatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/delete_datafeed/MlDeleteDatafeedResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/delete_datafeed/MlDeleteDatafeedResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deletedataframeanalytics/delete_data_frame_analytics.go b/typedapi/ml/deletedataframeanalytics/delete_data_frame_analytics.go index f531a4ec64..ae6fe28219 100644 --- a/typedapi/ml/deletedataframeanalytics/delete_data_frame_analytics.go +++ b/typedapi/ml/deletedataframeanalytics/delete_data_frame_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete a data frame analytics job. package deletedataframeanalytics @@ -78,7 +78,7 @@ func NewDeleteDataFrameAnalyticsFunc(tp elastictransport.Interface) NewDeleteDat // Delete a data frame analytics job. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-dfanalytics.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics func New(tp elastictransport.Interface) *DeleteDataFrameAnalytics { r := &DeleteDataFrameAnalytics{ transport: tp, diff --git a/typedapi/ml/deletedataframeanalytics/response.go b/typedapi/ml/deletedataframeanalytics/response.go index 6d13e1effe..47a422f742 100644 --- a/typedapi/ml/deletedataframeanalytics/response.go +++ b/typedapi/ml/deletedataframeanalytics/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletedataframeanalytics // Response holds the response body struct for the package deletedataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/delete_data_frame_analytics/MlDeleteDataFrameAnalyticsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/delete_data_frame_analytics/MlDeleteDataFrameAnalyticsResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deleteexpireddata/delete_expired_data.go b/typedapi/ml/deleteexpireddata/delete_expired_data.go index 428f26dd8f..3dbfc28e94 100644 --- a/typedapi/ml/deleteexpireddata/delete_expired_data.go +++ b/typedapi/ml/deleteexpireddata/delete_expired_data.go @@ -16,17 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete expired ML data. -// Deletes all job results, model snapshots and forecast data that have exceeded +// +// Delete all job results, model snapshots and forecast data that have exceeded // their retention days period. Machine learning state documents that are not // associated with any job are also deleted. // You can limit the request to a single or set of anomaly detection jobs by // using a job identifier, a group name, a comma-separated list of jobs, or a // wildcard expression. You can delete expired data for all anomaly detection -// jobs by using _all, by specifying * as the , or by omitting the -// . +// jobs by using `_all`, by specifying `*` as the ``, or by omitting the +// ``. package deleteexpireddata import ( @@ -88,16 +89,17 @@ func NewDeleteExpiredDataFunc(tp elastictransport.Interface) NewDeleteExpiredDat } // Delete expired ML data. -// Deletes all job results, model snapshots and forecast data that have exceeded +// +// Delete all job results, model snapshots and forecast data that have exceeded // their retention days period. Machine learning state documents that are not // associated with any job are also deleted. // You can limit the request to a single or set of anomaly detection jobs by // using a job identifier, a group name, a comma-separated list of jobs, or a // wildcard expression. You can delete expired data for all anomaly detection -// jobs by using _all, by specifying * as the , or by omitting the -// . +// jobs by using `_all`, by specifying `*` as the ``, or by omitting the +// ``. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-expired-data.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data func New(tp elastictransport.Interface) *DeleteExpiredData { r := &DeleteExpiredData{ transport: tp, @@ -105,8 +107,6 @@ func New(tp elastictransport.Interface) *DeleteExpiredData { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -381,20 +381,29 @@ func (r *DeleteExpiredData) Pretty(pretty bool) *DeleteExpiredData { return r } -// RequestsPerSecond The desired requests per second for the deletion processes. The default +// The desired requests per second for the deletion processes. The default // behavior is no throttling. // API name: requests_per_second func (r *DeleteExpiredData) RequestsPerSecond(requestspersecond float32) *DeleteExpiredData { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RequestsPerSecond = &requestspersecond return r } -// Timeout How long can the underlying delete processes run until they are canceled. +// How long can the underlying delete processes run until they are canceled. // API name: timeout -func (r *DeleteExpiredData) Timeout(duration types.Duration) *DeleteExpiredData { - r.req.Timeout = duration +func (r *DeleteExpiredData) Timeout(duration types.DurationVariant) *DeleteExpiredData { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() return r } diff --git a/typedapi/ml/deleteexpireddata/request.go b/typedapi/ml/deleteexpireddata/request.go index c129de0756..31ad7e5aa5 100644 --- a/typedapi/ml/deleteexpireddata/request.go +++ b/typedapi/ml/deleteexpireddata/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deleteexpireddata @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package deleteexpireddata // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/delete_expired_data/MlDeleteExpiredDataRequest.ts#L25-L73 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/delete_expired_data/MlDeleteExpiredDataRequest.ts#L25-L85 type Request struct { // RequestsPerSecond The desired requests per second for the deletion processes. The default diff --git a/typedapi/ml/deleteexpireddata/response.go b/typedapi/ml/deleteexpireddata/response.go index 26f3cefeb4..b8946c0201 100644 --- a/typedapi/ml/deleteexpireddata/response.go +++ b/typedapi/ml/deleteexpireddata/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deleteexpireddata // Response holds the response body struct for the package deleteexpireddata // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/delete_expired_data/MlDeleteExpiredDataResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/delete_expired_data/MlDeleteExpiredDataResponse.ts#L20-L22 type Response struct { Deleted bool `json:"deleted"` } diff --git a/typedapi/ml/deletefilter/delete_filter.go b/typedapi/ml/deletefilter/delete_filter.go index 6ef923c8ab..4edb8c2d64 100644 --- a/typedapi/ml/deletefilter/delete_filter.go +++ b/typedapi/ml/deletefilter/delete_filter.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete a filter. +// // If an anomaly detection job references the filter, you cannot delete the // filter. You must update or delete the job before you can delete the filter. package deletefilter @@ -79,10 +80,11 @@ func NewDeleteFilterFunc(tp elastictransport.Interface) NewDeleteFilter { } // Delete a filter. +// // If an anomaly detection job references the filter, you cannot delete the // filter. You must update or delete the job before you can delete the filter. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-filter.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter func New(tp elastictransport.Interface) *DeleteFilter { r := &DeleteFilter{ transport: tp, diff --git a/typedapi/ml/deletefilter/response.go b/typedapi/ml/deletefilter/response.go index eff110cc0d..fdf83efe30 100644 --- a/typedapi/ml/deletefilter/response.go +++ b/typedapi/ml/deletefilter/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletefilter // Response holds the response body struct for the package deletefilter // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/delete_filter/MlDeleteFilterResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/delete_filter/MlDeleteFilterResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deleteforecast/delete_forecast.go b/typedapi/ml/deleteforecast/delete_forecast.go index a0112d4638..9b5099db96 100644 --- a/typedapi/ml/deleteforecast/delete_forecast.go +++ b/typedapi/ml/deleteforecast/delete_forecast.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete forecasts from a job. +// // By default, forecasts are retained for 14 days. You can specify a // different retention period with the `expires_in` parameter in the forecast // jobs API. The delete forecast API enables you to delete one or more @@ -84,12 +85,13 @@ func NewDeleteForecastFunc(tp elastictransport.Interface) NewDeleteForecast { } // Delete forecasts from a job. +// // By default, forecasts are retained for 14 days. You can specify a // different retention period with the `expires_in` parameter in the forecast // jobs API. The delete forecast API enables you to delete one or more // forecasts before they expire. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-forecast.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-forecast func New(tp elastictransport.Interface) *DeleteForecast { r := &DeleteForecast{ transport: tp, diff --git a/typedapi/ml/deleteforecast/response.go b/typedapi/ml/deleteforecast/response.go index 2ab9c8f8f5..e141041c82 100644 --- a/typedapi/ml/deleteforecast/response.go +++ b/typedapi/ml/deleteforecast/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deleteforecast // Response holds the response body struct for the package deleteforecast // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/delete_forecast/MlDeleteForecastResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/delete_forecast/MlDeleteForecastResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deletejob/delete_job.go b/typedapi/ml/deletejob/delete_job.go index 09a1a63569..21622bd2c4 100644 --- a/typedapi/ml/deletejob/delete_job.go +++ b/typedapi/ml/deletejob/delete_job.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete an anomaly detection job. +// // All job configuration, model state and results are deleted. // It is not currently possible to delete multiple jobs using wildcards or a // comma separated list. If you delete a job that has a datafeed, the request @@ -83,6 +84,7 @@ func NewDeleteJobFunc(tp elastictransport.Interface) NewDeleteJob { } // Delete an anomaly detection job. +// // All job configuration, model state and results are deleted. // It is not currently possible to delete multiple jobs using wildcards or a // comma separated list. If you delete a job that has a datafeed, the request @@ -90,7 +92,7 @@ func NewDeleteJobFunc(tp elastictransport.Interface) NewDeleteJob { // the delete datafeed API with the same timeout and force parameters as the // delete job request. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job func New(tp elastictransport.Interface) *DeleteJob { r := &DeleteJob{ transport: tp, diff --git a/typedapi/ml/deletejob/response.go b/typedapi/ml/deletejob/response.go index d3bc82a05b..e71b236d54 100644 --- a/typedapi/ml/deletejob/response.go +++ b/typedapi/ml/deletejob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletejob // Response holds the response body struct for the package deletejob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/delete_job/MlDeleteJobResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/delete_job/MlDeleteJobResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deletemodelsnapshot/delete_model_snapshot.go b/typedapi/ml/deletemodelsnapshot/delete_model_snapshot.go index 82c8acbdc7..0de5d47b61 100644 --- a/typedapi/ml/deletemodelsnapshot/delete_model_snapshot.go +++ b/typedapi/ml/deletemodelsnapshot/delete_model_snapshot.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete a model snapshot. +// // You cannot delete the active model snapshot. To delete that snapshot, first // revert to a different one. To identify the active model snapshot, refer to // the `model_snapshot_id` in the results from the get jobs API. @@ -85,11 +86,12 @@ func NewDeleteModelSnapshotFunc(tp elastictransport.Interface) NewDeleteModelSna } // Delete a model snapshot. +// // You cannot delete the active model snapshot. To delete that snapshot, first // revert to a different one. To identify the active model snapshot, refer to // the `model_snapshot_id` in the results from the get jobs API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-snapshot.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-model-snapshot func New(tp elastictransport.Interface) *DeleteModelSnapshot { r := &DeleteModelSnapshot{ transport: tp, diff --git a/typedapi/ml/deletemodelsnapshot/response.go b/typedapi/ml/deletemodelsnapshot/response.go index 264acb9c5f..3127065cc4 100644 --- a/typedapi/ml/deletemodelsnapshot/response.go +++ b/typedapi/ml/deletemodelsnapshot/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletemodelsnapshot // Response holds the response body struct for the package deletemodelsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/delete_model_snapshot/MlDeleteModelSnapshotResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/delete_model_snapshot/MlDeleteModelSnapshotResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deletetrainedmodel/delete_trained_model.go b/typedapi/ml/deletetrainedmodel/delete_trained_model.go index 1f146358d3..10ac16063d 100644 --- a/typedapi/ml/deletetrainedmodel/delete_trained_model.go +++ b/typedapi/ml/deletetrainedmodel/delete_trained_model.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete an unreferenced trained model. +// // The request deletes a trained inference model that is not referenced by an // ingest pipeline. package deletetrainedmodel @@ -79,10 +80,11 @@ func NewDeleteTrainedModelFunc(tp elastictransport.Interface) NewDeleteTrainedMo } // Delete an unreferenced trained model. +// // The request deletes a trained inference model that is not referenced by an // ingest pipeline. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-models.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model func New(tp elastictransport.Interface) *DeleteTrainedModel { r := &DeleteTrainedModel{ transport: tp, @@ -312,6 +314,15 @@ func (r *DeleteTrainedModel) Force(force bool) *DeleteTrainedModel { return r } +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *DeleteTrainedModel) Timeout(duration string) *DeleteTrainedModel { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ml/deletetrainedmodel/response.go b/typedapi/ml/deletetrainedmodel/response.go index 106897c9cc..41847fe5c1 100644 --- a/typedapi/ml/deletetrainedmodel/response.go +++ b/typedapi/ml/deletetrainedmodel/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletetrainedmodel // Response holds the response body struct for the package deletetrainedmodel // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/delete_trained_model/MlDeleteTrainedModelResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/delete_trained_model/MlDeleteTrainedModelResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deletetrainedmodelalias/delete_trained_model_alias.go b/typedapi/ml/deletetrainedmodelalias/delete_trained_model_alias.go index 67bb9cc267..ed0e33bbb3 100644 --- a/typedapi/ml/deletetrainedmodelalias/delete_trained_model_alias.go +++ b/typedapi/ml/deletetrainedmodelalias/delete_trained_model_alias.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete a trained model alias. +// // This API deletes an existing model alias that refers to a trained model. If // the model alias is missing or refers to a model other than the one identified // by the `model_id`, this API returns an error. @@ -85,11 +86,12 @@ func NewDeleteTrainedModelAliasFunc(tp elastictransport.Interface) NewDeleteTrai } // Delete a trained model alias. +// // This API deletes an existing model alias that refers to a trained model. If // the model alias is missing or refers to a model other than the one identified // by the `model_id`, this API returns an error. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-models-aliases.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model-alias func New(tp elastictransport.Interface) *DeleteTrainedModelAlias { r := &DeleteTrainedModelAlias{ transport: tp, diff --git a/typedapi/ml/deletetrainedmodelalias/response.go b/typedapi/ml/deletetrainedmodelalias/response.go index e8b0866b8b..1704bee4f4 100644 --- a/typedapi/ml/deletetrainedmodelalias/response.go +++ b/typedapi/ml/deletetrainedmodelalias/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletetrainedmodelalias // Response holds the response body struct for the package deletetrainedmodelalias // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/delete_trained_model_alias/MlDeleteTrainedModelAliasResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/delete_trained_model_alias/MlDeleteTrainedModelAliasResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/estimatemodelmemory/estimate_model_memory.go b/typedapi/ml/estimatemodelmemory/estimate_model_memory.go index d11123c124..4760f4d515 100644 --- a/typedapi/ml/estimatemodelmemory/estimate_model_memory.go +++ b/typedapi/ml/estimatemodelmemory/estimate_model_memory.go @@ -16,11 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Estimate job model memory usage. -// Makes an estimation of the memory usage for an anomaly detection job model. -// It is based on analysis configuration details for the job and cardinality +// +// Make an estimation of the memory usage for an anomaly detection job model. +// The estimate is based on analysis configuration details for the job and +// cardinality // estimates for the fields it references. package estimatemodelmemory @@ -77,11 +79,13 @@ func NewEstimateModelMemoryFunc(tp elastictransport.Interface) NewEstimateModelM } // Estimate job model memory usage. -// Makes an estimation of the memory usage for an anomaly detection job model. -// It is based on analysis configuration details for the job and cardinality +// +// Make an estimation of the memory usage for an anomaly detection job model. +// The estimate is based on analysis configuration details for the job and +// cardinality // estimates for the fields it references. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-apis.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-estimate-model-memory func New(tp elastictransport.Interface) *EstimateModelMemory { r := &EstimateModelMemory{ transport: tp, @@ -89,8 +93,6 @@ func New(tp elastictransport.Interface) *EstimateModelMemory { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -344,30 +346,55 @@ func (r *EstimateModelMemory) Pretty(pretty bool) *EstimateModelMemory { return r } -// AnalysisConfig For a list of the properties that you can specify in the +// For a list of the properties that you can specify in the // `analysis_config` component of the body of this API. // API name: analysis_config -func (r *EstimateModelMemory) AnalysisConfig(analysisconfig *types.AnalysisConfig) *EstimateModelMemory { +func (r *EstimateModelMemory) AnalysisConfig(analysisconfig types.AnalysisConfigVariant) *EstimateModelMemory { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AnalysisConfig = analysisconfig + r.req.AnalysisConfig = analysisconfig.AnalysisConfigCaster() return r } -// MaxBucketCardinality Estimates of the highest cardinality in a single bucket that is observed +// Estimates of the highest cardinality in a single bucket that is observed // for influencer fields over the time period that the job analyzes data. // To produce a good answer, values must be provided for all influencer // fields. Providing values for fields that are not listed as `influencers` // has no effect on the estimation. // API name: max_bucket_cardinality func (r *EstimateModelMemory) MaxBucketCardinality(maxbucketcardinality map[string]int64) *EstimateModelMemory { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxBucketCardinality = maxbucketcardinality + return r +} + +func (r *EstimateModelMemory) AddMaxBucketCardinality(key string, value int64) *EstimateModelMemory { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]int64 + if r.req.MaxBucketCardinality == nil { + r.req.MaxBucketCardinality = make(map[string]int64) + } else { + tmp = r.req.MaxBucketCardinality + } + + tmp[key] = value + r.req.MaxBucketCardinality = tmp return r } -// OverallCardinality Estimates of the cardinality that is observed for fields over the whole +// Estimates of the cardinality that is observed for fields over the whole // time period that the job analyzes data. To produce a good answer, values // must be provided for fields referenced in the `by_field_name`, // `over_field_name` and `partition_field_name` of any detectors. Providing @@ -376,8 +403,29 @@ func (r *EstimateModelMemory) MaxBucketCardinality(maxbucketcardinality map[stri // `over_field_name` or `partition_field_name`. // API name: overall_cardinality func (r *EstimateModelMemory) OverallCardinality(overallcardinality map[string]int64) *EstimateModelMemory { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.OverallCardinality = overallcardinality + return r +} + +func (r *EstimateModelMemory) AddOverallCardinality(key string, value int64) *EstimateModelMemory { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]int64 + if r.req.OverallCardinality == nil { + r.req.OverallCardinality = make(map[string]int64) + } else { + tmp = r.req.OverallCardinality + } + + tmp[key] = value + r.req.OverallCardinality = tmp return r } diff --git a/typedapi/ml/estimatemodelmemory/request.go b/typedapi/ml/estimatemodelmemory/request.go index bf8ae499a0..5f04be5e67 100644 --- a/typedapi/ml/estimatemodelmemory/request.go +++ b/typedapi/ml/estimatemodelmemory/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package estimatemodelmemory @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package estimatemodelmemory // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/estimate_model_memory/MlEstimateModelMemoryRequest.ts#L26-L63 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/estimate_model_memory/MlEstimateModelMemoryRequest.ts#L26-L71 type Request struct { // AnalysisConfig For a list of the properties that you can specify in the diff --git a/typedapi/ml/estimatemodelmemory/response.go b/typedapi/ml/estimatemodelmemory/response.go index 037e80f47c..9803baf7eb 100644 --- a/typedapi/ml/estimatemodelmemory/response.go +++ b/typedapi/ml/estimatemodelmemory/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package estimatemodelmemory // Response holds the response body struct for the package estimatemodelmemory // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/estimate_model_memory/MlEstimateModelMemoryResponse.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/estimate_model_memory/MlEstimateModelMemoryResponse.ts#L20-L24 type Response struct { ModelMemoryEstimate string `json:"model_memory_estimate"` } diff --git a/typedapi/ml/evaluatedataframe/evaluate_data_frame.go b/typedapi/ml/evaluatedataframe/evaluate_data_frame.go index 32df58759f..45477a856a 100644 --- a/typedapi/ml/evaluatedataframe/evaluate_data_frame.go +++ b/typedapi/ml/evaluatedataframe/evaluate_data_frame.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Evaluate data frame analytics. +// // The API packages together commonly used evaluation metrics for various types // of machine learning features. This has been designed for use on indexes // created by data frame analytics. Evaluation requires both a ground truth @@ -78,12 +79,13 @@ func NewEvaluateDataFrameFunc(tp elastictransport.Interface) NewEvaluateDataFram } // Evaluate data frame analytics. +// // The API packages together commonly used evaluation metrics for various types // of machine learning features. This has been designed for use on indexes // created by data frame analytics. Evaluation requires both a ground truth // field and an analytics result field to be present. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/evaluate-dfanalytics.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-evaluate-data-frame func New(tp elastictransport.Interface) *EvaluateDataFrame { r := &EvaluateDataFrame{ transport: tp, @@ -91,8 +93,6 @@ func New(tp elastictransport.Interface) *EvaluateDataFrame { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -346,28 +346,41 @@ func (r *EvaluateDataFrame) Pretty(pretty bool) *EvaluateDataFrame { return r } -// Evaluation Defines the type of evaluation you want to perform. +// Defines the type of evaluation you want to perform. // API name: evaluation -func (r *EvaluateDataFrame) Evaluation(evaluation *types.DataframeEvaluationContainer) *EvaluateDataFrame { +func (r *EvaluateDataFrame) Evaluation(evaluation types.DataframeEvaluationContainerVariant) *EvaluateDataFrame { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Evaluation = *evaluation + r.req.Evaluation = *evaluation.DataframeEvaluationContainerCaster() return r } -// Index Defines the `index` in which the evaluation will be performed. +// Defines the `index` in which the evaluation will be performed. // API name: index func (r *EvaluateDataFrame) Index(indexname string) *EvaluateDataFrame { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Index = indexname return r } -// Query A query clause that retrieves a subset of data from the source index. +// A query clause that retrieves a subset of data from the source index. // API name: query -func (r *EvaluateDataFrame) Query(query *types.Query) *EvaluateDataFrame { +func (r *EvaluateDataFrame) Query(query types.QueryVariant) *EvaluateDataFrame { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } diff --git a/typedapi/ml/evaluatedataframe/request.go b/typedapi/ml/evaluatedataframe/request.go index 91df1fe937..88ad652f2b 100644 --- a/typedapi/ml/evaluatedataframe/request.go +++ b/typedapi/ml/evaluatedataframe/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package evaluatedataframe @@ -32,7 +32,7 @@ import ( // Request holds the request body struct for the package evaluatedataframe // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/evaluate_data_frame/MlEvaluateDataFrameRequest.ts#L25-L53 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/evaluate_data_frame/MlEvaluateDataFrameRequest.ts#L25-L61 type Request struct { // Evaluation Defines the type of evaluation you want to perform. diff --git a/typedapi/ml/evaluatedataframe/response.go b/typedapi/ml/evaluatedataframe/response.go index f0c60cee8a..b6d295cbcf 100644 --- a/typedapi/ml/evaluatedataframe/response.go +++ b/typedapi/ml/evaluatedataframe/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package evaluatedataframe @@ -26,11 +26,19 @@ import ( // Response holds the response body struct for the package evaluatedataframe // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/evaluate_data_frame/MlEvaluateDataFrameResponse.ts#L26-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/evaluate_data_frame/MlEvaluateDataFrameResponse.ts#L26-L44 type Response struct { - Classification *types.DataframeClassificationSummary `json:"classification,omitempty"` + + // Classification Evaluation results for a classification analysis. + // It outputs a prediction that identifies to which of the classes each document + // belongs. + Classification *types.DataframeClassificationSummary `json:"classification,omitempty"` + // OutlierDetection Evaluation results for an outlier detection analysis. + // It outputs the probability that each document is an outlier. OutlierDetection *types.DataframeOutlierDetectionSummary `json:"outlier_detection,omitempty"` - Regression *types.DataframeRegressionSummary `json:"regression,omitempty"` + // Regression Evaluation results for a regression analysis which outputs a prediction of + // values. + Regression *types.DataframeRegressionSummary `json:"regression,omitempty"` } // NewResponse returns a Response diff --git a/typedapi/ml/explaindataframeanalytics/explain_data_frame_analytics.go b/typedapi/ml/explaindataframeanalytics/explain_data_frame_analytics.go index 5a8c378741..e3fe04fd36 100644 --- a/typedapi/ml/explaindataframeanalytics/explain_data_frame_analytics.go +++ b/typedapi/ml/explaindataframeanalytics/explain_data_frame_analytics.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Explain data frame analytics config. +// // This API provides explanations for a data frame analytics config that either // exists already or one that has not been created yet. The following // explanations are provided: @@ -88,6 +89,7 @@ func NewExplainDataFrameAnalyticsFunc(tp elastictransport.Interface) NewExplainD } // Explain data frame analytics config. +// // This API provides explanations for a data frame analytics config that either // exists already or one that has not been created yet. The following // explanations are provided: @@ -97,7 +99,7 @@ func NewExplainDataFrameAnalyticsFunc(tp elastictransport.Interface) NewExplainD // If you have object fields or fields that are excluded via source filtering, // they are not included in the explanation. // -// http://www.elastic.co/guide/en/elasticsearch/reference/current/explain-dfanalytics.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-explain-data-frame-analytics func New(tp elastictransport.Interface) *ExplainDataFrameAnalytics { r := &ExplainDataFrameAnalytics{ transport: tp, @@ -105,8 +107,6 @@ func New(tp elastictransport.Interface) *ExplainDataFrameAnalytics { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -390,88 +390,122 @@ func (r *ExplainDataFrameAnalytics) Pretty(pretty bool) *ExplainDataFrameAnalyti return r } -// AllowLazyStart Specifies whether this job can start when there is insufficient machine +// Specifies whether this job can start when there is insufficient machine // learning node capacity for it to be immediately assigned to a node. // API name: allow_lazy_start func (r *ExplainDataFrameAnalytics) AllowLazyStart(allowlazystart bool) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowLazyStart = &allowlazystart return r } -// Analysis The analysis configuration, which contains the information necessary to +// The analysis configuration, which contains the information necessary to // perform one of the following types of analysis: classification, outlier // detection, or regression. // API name: analysis -func (r *ExplainDataFrameAnalytics) Analysis(analysis *types.DataframeAnalysisContainer) *ExplainDataFrameAnalytics { +func (r *ExplainDataFrameAnalytics) Analysis(analysis types.DataframeAnalysisContainerVariant) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Analysis = analysis + r.req.Analysis = analysis.DataframeAnalysisContainerCaster() return r } -// AnalyzedFields Specify includes and/or excludes patterns to select which fields will be +// Specify includes and/or excludes patterns to select which fields will be // included in the analysis. The patterns specified in excludes are applied // last, therefore excludes takes precedence. In other words, if the same // field is specified in both includes and excludes, then the field will not // be included in the analysis. // API name: analyzed_fields -func (r *ExplainDataFrameAnalytics) AnalyzedFields(analyzedfields *types.DataframeAnalysisAnalyzedFields) *ExplainDataFrameAnalytics { +func (r *ExplainDataFrameAnalytics) AnalyzedFields(analyzedfields types.DataframeAnalysisAnalyzedFieldsVariant) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AnalyzedFields = analyzedfields + r.req.AnalyzedFields = analyzedfields.DataframeAnalysisAnalyzedFieldsCaster() return r } -// Description A description of the job. +// A description of the job. // API name: description func (r *ExplainDataFrameAnalytics) Description(description string) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Dest The destination configuration, consisting of index and optionally +// The destination configuration, consisting of index and optionally // results_field (ml by default). // API name: dest -func (r *ExplainDataFrameAnalytics) Dest(dest *types.DataframeAnalyticsDestination) *ExplainDataFrameAnalytics { +func (r *ExplainDataFrameAnalytics) Dest(dest types.DataframeAnalyticsDestinationVariant) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Dest = dest + r.req.Dest = dest.DataframeAnalyticsDestinationCaster() return r } -// MaxNumThreads The maximum number of threads to be used by the analysis. Using more +// The maximum number of threads to be used by the analysis. Using more // threads may decrease the time necessary to complete the analysis at the // cost of using more CPU. Note that the process may use additional threads // for operational functionality other than the analysis itself. // API name: max_num_threads func (r *ExplainDataFrameAnalytics) MaxNumThreads(maxnumthreads int) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxNumThreads = &maxnumthreads return r } -// ModelMemoryLimit The approximate maximum amount of memory resources that are permitted for +// The approximate maximum amount of memory resources that are permitted for // analytical processing. If your `elasticsearch.yml` file contains an // `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to // create data frame analytics jobs that have `model_memory_limit` values // greater than that setting. // API name: model_memory_limit func (r *ExplainDataFrameAnalytics) ModelMemoryLimit(modelmemorylimit string) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ModelMemoryLimit = &modelmemorylimit return r } -// Source The configuration of how to source the analysis data. It requires an +// The configuration of how to source the analysis data. It requires an // index. Optionally, query and _source may be specified. // API name: source -func (r *ExplainDataFrameAnalytics) Source(source *types.DataframeAnalyticsSource) *ExplainDataFrameAnalytics { +func (r *ExplainDataFrameAnalytics) Source(source types.DataframeAnalyticsSourceVariant) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Source = source + r.req.Source = source.DataframeAnalyticsSourceCaster() return r } diff --git a/typedapi/ml/explaindataframeanalytics/request.go b/typedapi/ml/explaindataframeanalytics/request.go index 274eed21fa..182f1f3ba9 100644 --- a/typedapi/ml/explaindataframeanalytics/request.go +++ b/typedapi/ml/explaindataframeanalytics/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package explaindataframeanalytics @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package explaindataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/explain_data_frame_analytics/MlExplainDataFrameAnalyticsRequest.ts#L30-L108 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/explain_data_frame_analytics/MlExplainDataFrameAnalyticsRequest.ts#L30-L120 type Request struct { // AllowLazyStart Specifies whether this job can start when there is insufficient machine diff --git a/typedapi/ml/explaindataframeanalytics/response.go b/typedapi/ml/explaindataframeanalytics/response.go index e1ec2a87bf..29ec25836e 100644 --- a/typedapi/ml/explaindataframeanalytics/response.go +++ b/typedapi/ml/explaindataframeanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package explaindataframeanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package explaindataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/explain_data_frame_analytics/MlExplainDataFrameAnalyticsResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/explain_data_frame_analytics/MlExplainDataFrameAnalyticsResponse.ts#L25-L32 type Response struct { // FieldSelection An array of objects that explain selection for each field, sorted by the diff --git a/typedapi/ml/flushjob/flush_job.go b/typedapi/ml/flushjob/flush_job.go index c10b1d825f..a53cf67517 100644 --- a/typedapi/ml/flushjob/flush_job.go +++ b/typedapi/ml/flushjob/flush_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Force buffered data to be processed. // The flush jobs API is only applicable when sending data for analysis using @@ -99,7 +99,7 @@ func NewFlushJobFunc(tp elastictransport.Interface) NewFlushJob { // persists the model state to disk and the job must be opened again before // analyzing further data. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job func New(tp elastictransport.Interface) *FlushJob { r := &FlushJob{ transport: tp, @@ -107,8 +107,6 @@ func New(tp elastictransport.Interface) *FlushJob { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -377,42 +375,67 @@ func (r *FlushJob) Pretty(pretty bool) *FlushJob { return r } -// AdvanceTime Refer to the description for the `advance_time` query parameter. +// Refer to the description for the `advance_time` query parameter. // API name: advance_time -func (r *FlushJob) AdvanceTime(datetime types.DateTime) *FlushJob { - r.req.AdvanceTime = datetime +func (r *FlushJob) AdvanceTime(datetime types.DateTimeVariant) *FlushJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AdvanceTime = *datetime.DateTimeCaster() return r } -// CalcInterim Refer to the description for the `calc_interim` query parameter. +// Refer to the description for the `calc_interim` query parameter. // API name: calc_interim func (r *FlushJob) CalcInterim(calcinterim bool) *FlushJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.CalcInterim = &calcinterim return r } -// End Refer to the description for the `end` query parameter. +// Refer to the description for the `end` query parameter. // API name: end -func (r *FlushJob) End(datetime types.DateTime) *FlushJob { - r.req.End = datetime +func (r *FlushJob) End(datetime types.DateTimeVariant) *FlushJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() return r } -// SkipTime Refer to the description for the `skip_time` query parameter. +// Refer to the description for the `skip_time` query parameter. // API name: skip_time -func (r *FlushJob) SkipTime(datetime types.DateTime) *FlushJob { - r.req.SkipTime = datetime +func (r *FlushJob) SkipTime(datetime types.DateTimeVariant) *FlushJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SkipTime = *datetime.DateTimeCaster() return r } -// Start Refer to the description for the `start` query parameter. +// Refer to the description for the `start` query parameter. // API name: start -func (r *FlushJob) Start(datetime types.DateTime) *FlushJob { - r.req.Start = datetime +func (r *FlushJob) Start(datetime types.DateTimeVariant) *FlushJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() return r } diff --git a/typedapi/ml/flushjob/request.go b/typedapi/ml/flushjob/request.go index 3beb279531..ce6ac5d2a2 100644 --- a/typedapi/ml/flushjob/request.go +++ b/typedapi/ml/flushjob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package flushjob @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package flushjob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/flush_job/MlFlushJobRequest.ts#L24-L100 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/flush_job/MlFlushJobRequest.ts#L24-L107 type Request struct { // AdvanceTime Refer to the description for the `advance_time` query parameter. diff --git a/typedapi/ml/flushjob/response.go b/typedapi/ml/flushjob/response.go index 3f5fcc16d2..e84774fad0 100644 --- a/typedapi/ml/flushjob/response.go +++ b/typedapi/ml/flushjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package flushjob // Response holds the response body struct for the package flushjob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/flush_job/MlFlushJobResponse.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/flush_job/MlFlushJobResponse.ts#L22-L31 type Response struct { Flushed bool `json:"flushed"` // LastFinalizedBucketEnd Provides the timestamp (in milliseconds since the epoch) of the end of diff --git a/typedapi/ml/forecast/forecast.go b/typedapi/ml/forecast/forecast.go index 8a970d2357..e3f813e7cd 100644 --- a/typedapi/ml/forecast/forecast.go +++ b/typedapi/ml/forecast/forecast.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Predict future behavior of a time series. // @@ -93,7 +93,7 @@ func NewForecastFunc(tp elastictransport.Interface) NewForecast { // `over_field_name` in its configuration. Forcasts predict future behavior // based on historical data. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-forecast.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast func New(tp elastictransport.Interface) *Forecast { r := &Forecast{ transport: tp, @@ -101,8 +101,6 @@ func New(tp elastictransport.Interface) *Forecast { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -372,25 +370,39 @@ func (r *Forecast) Pretty(pretty bool) *Forecast { return r } -// Duration Refer to the description for the `duration` query parameter. +// Refer to the description for the `duration` query parameter. // API name: duration -func (r *Forecast) Duration(duration types.Duration) *Forecast { - r.req.Duration = duration +func (r *Forecast) Duration(duration types.DurationVariant) *Forecast { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Duration = *duration.DurationCaster() return r } -// ExpiresIn Refer to the description for the `expires_in` query parameter. +// Refer to the description for the `expires_in` query parameter. // API name: expires_in -func (r *Forecast) ExpiresIn(duration types.Duration) *Forecast { - r.req.ExpiresIn = duration +func (r *Forecast) ExpiresIn(duration types.DurationVariant) *Forecast { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ExpiresIn = *duration.DurationCaster() return r } -// MaxModelMemory Refer to the description for the `max_model_memory` query parameter. +// Refer to the description for the `max_model_memory` query parameter. // API name: max_model_memory func (r *Forecast) MaxModelMemory(maxmodelmemory string) *Forecast { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxModelMemory = &maxmodelmemory diff --git a/typedapi/ml/forecast/request.go b/typedapi/ml/forecast/request.go index 86376e690e..5f8275c8b8 100644 --- a/typedapi/ml/forecast/request.go +++ b/typedapi/ml/forecast/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package forecast @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package forecast // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/forecast/MlForecastJobRequest.ts#L24-L88 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/forecast/MlForecastJobRequest.ts#L24-L95 type Request struct { // Duration Refer to the description for the `duration` query parameter. diff --git a/typedapi/ml/forecast/response.go b/typedapi/ml/forecast/response.go index 4ecdb9296d..3c36b362ad 100644 --- a/typedapi/ml/forecast/response.go +++ b/typedapi/ml/forecast/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package forecast // Response holds the response body struct for the package forecast // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/forecast/MlForecastJobResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/forecast/MlForecastJobResponse.ts#L22-L27 type Response struct { Acknowledged bool `json:"acknowledged"` ForecastId string `json:"forecast_id"` diff --git a/typedapi/ml/getbuckets/get_buckets.go b/typedapi/ml/getbuckets/get_buckets.go index a0eb44acdf..88967c7ede 100644 --- a/typedapi/ml/getbuckets/get_buckets.go +++ b/typedapi/ml/getbuckets/get_buckets.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get anomaly detection job results for buckets. // The API presents a chronological view of the records, grouped by bucket. @@ -88,7 +88,7 @@ func NewGetBucketsFunc(tp elastictransport.Interface) NewGetBuckets { // Get anomaly detection job results for buckets. // The API presents a chronological view of the records, grouped by bucket. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets func New(tp elastictransport.Interface) *GetBuckets { r := &GetBuckets{ transport: tp, @@ -96,8 +96,6 @@ func New(tp elastictransport.Interface) *GetBuckets { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -417,67 +415,105 @@ func (r *GetBuckets) Pretty(pretty bool) *GetBuckets { return r } -// AnomalyScore Refer to the description for the `anomaly_score` query parameter. +// Refer to the description for the `anomaly_score` query parameter. // API name: anomaly_score func (r *GetBuckets) AnomalyScore(anomalyscore types.Float64) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.AnomalyScore = &anomalyscore return r } -// Desc Refer to the description for the `desc` query parameter. +// Refer to the description for the `desc` query parameter. // API name: desc func (r *GetBuckets) Desc(desc bool) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Desc = &desc return r } -// End Refer to the description for the `end` query parameter. +// Refer to the description for the `end` query parameter. // API name: end -func (r *GetBuckets) End(datetime types.DateTime) *GetBuckets { - r.req.End = datetime +func (r *GetBuckets) End(datetime types.DateTimeVariant) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() return r } -// ExcludeInterim Refer to the description for the `exclude_interim` query parameter. +// Refer to the description for the `exclude_interim` query parameter. // API name: exclude_interim func (r *GetBuckets) ExcludeInterim(excludeinterim bool) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ExcludeInterim = &excludeinterim return r } -// Expand Refer to the description for the `expand` query parameter. +// Refer to the description for the `expand` query parameter. // API name: expand func (r *GetBuckets) Expand(expand bool) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Expand = &expand return r } // API name: page -func (r *GetBuckets) Page(page *types.Page) *GetBuckets { +func (r *GetBuckets) Page(page types.PageVariant) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Page = page + r.req.Page = page.PageCaster() return r } -// Sort Refer to the desription for the `sort` query parameter. +// Refer to the desription for the `sort` query parameter. // API name: sort func (r *GetBuckets) Sort(field string) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Sort = &field return r } -// Start Refer to the description for the `start` query parameter. +// Refer to the description for the `start` query parameter. // API name: start -func (r *GetBuckets) Start(datetime types.DateTime) *GetBuckets { - r.req.Start = datetime +func (r *GetBuckets) Start(datetime types.DateTimeVariant) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() return r } diff --git a/typedapi/ml/getbuckets/request.go b/typedapi/ml/getbuckets/request.go index 5d606e9d2b..06986cf375 100644 --- a/typedapi/ml/getbuckets/request.go +++ b/typedapi/ml/getbuckets/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getbuckets @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package getbuckets // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_buckets/MlGetBucketsRequest.ts#L26-L134 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_buckets/MlGetBucketsRequest.ts#L26-L145 type Request struct { // AnomalyScore Refer to the description for the `anomaly_score` query parameter. diff --git a/typedapi/ml/getbuckets/response.go b/typedapi/ml/getbuckets/response.go index ca5577f287..84ae7c702b 100644 --- a/typedapi/ml/getbuckets/response.go +++ b/typedapi/ml/getbuckets/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getbuckets @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getbuckets // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_buckets/MlGetBucketsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_buckets/MlGetBucketsResponse.ts#L23-L28 type Response struct { Buckets []types.BucketSummary `json:"buckets"` Count int64 `json:"count"` diff --git a/typedapi/ml/getcalendarevents/get_calendar_events.go b/typedapi/ml/getcalendarevents/get_calendar_events.go index 4a19144b38..2140d2d04b 100644 --- a/typedapi/ml/getcalendarevents/get_calendar_events.go +++ b/typedapi/ml/getcalendarevents/get_calendar_events.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get info about events in calendars. package getcalendarevents @@ -78,7 +78,7 @@ func NewGetCalendarEventsFunc(tp elastictransport.Interface) NewGetCalendarEvent // Get info about events in calendars. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar-event.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events func New(tp elastictransport.Interface) *GetCalendarEvents { r := &GetCalendarEvents{ transport: tp, diff --git a/typedapi/ml/getcalendarevents/response.go b/typedapi/ml/getcalendarevents/response.go index 5d2ee202df..deb5531443 100644 --- a/typedapi/ml/getcalendarevents/response.go +++ b/typedapi/ml/getcalendarevents/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getcalendarevents @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getcalendarevents // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_calendar_events/MlGetCalendarEventsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_calendar_events/MlGetCalendarEventsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Events []types.CalendarEvent `json:"events"` diff --git a/typedapi/ml/getcalendars/get_calendars.go b/typedapi/ml/getcalendars/get_calendars.go index 019ef2561f..9607c6ae2b 100644 --- a/typedapi/ml/getcalendars/get_calendars.go +++ b/typedapi/ml/getcalendars/get_calendars.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get calendar configuration info. package getcalendars @@ -81,7 +81,7 @@ func NewGetCalendarsFunc(tp elastictransport.Interface) NewGetCalendars { // Get calendar configuration info. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars func New(tp elastictransport.Interface) *GetCalendars { r := &GetCalendars{ transport: tp, @@ -89,8 +89,6 @@ func New(tp elastictransport.Interface) *GetCalendars { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -385,11 +383,15 @@ func (r *GetCalendars) Pretty(pretty bool) *GetCalendars { return r } -// Page This object is supported only when you omit the calendar identifier. +// This object is supported only when you omit the calendar identifier. // API name: page -func (r *GetCalendars) Page(page *types.Page) *GetCalendars { +func (r *GetCalendars) Page(page types.PageVariant) *GetCalendars { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Page = page + r.req.Page = page.PageCaster() return r } diff --git a/typedapi/ml/getcalendars/request.go b/typedapi/ml/getcalendars/request.go index 4360b25999..4f248b320c 100644 --- a/typedapi/ml/getcalendars/request.go +++ b/typedapi/ml/getcalendars/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getcalendars @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package getcalendars // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_calendars/MlGetCalendarsRequest.ts#L25-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_calendars/MlGetCalendarsRequest.ts#L25-L63 type Request struct { // Page This object is supported only when you omit the calendar identifier. diff --git a/typedapi/ml/getcalendars/response.go b/typedapi/ml/getcalendars/response.go index 0b6423fa5c..21fffc81fc 100644 --- a/typedapi/ml/getcalendars/response.go +++ b/typedapi/ml/getcalendars/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getcalendars @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getcalendars // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_calendars/MlGetCalendarsResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_calendars/MlGetCalendarsResponse.ts#L23-L25 type Response struct { Calendars []types.Calendar `json:"calendars"` Count int64 `json:"count"` diff --git a/typedapi/ml/getcategories/get_categories.go b/typedapi/ml/getcategories/get_categories.go index e50c00e02a..94e27cb577 100644 --- a/typedapi/ml/getcategories/get_categories.go +++ b/typedapi/ml/getcategories/get_categories.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get anomaly detection job results for categories. package getcategories @@ -86,7 +86,7 @@ func NewGetCategoriesFunc(tp elastictransport.Interface) NewGetCategories { // Get anomaly detection job results for categories. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-categories func New(tp elastictransport.Interface) *GetCategories { r := &GetCategories{ transport: tp, @@ -94,8 +94,6 @@ func New(tp elastictransport.Interface) *GetCategories { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -426,12 +424,16 @@ func (r *GetCategories) Pretty(pretty bool) *GetCategories { return r } -// Page Configures pagination. +// Configures pagination. // This parameter has the `from` and `size` properties. // API name: page -func (r *GetCategories) Page(page *types.Page) *GetCategories { +func (r *GetCategories) Page(page types.PageVariant) *GetCategories { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Page = page + r.req.Page = page.PageCaster() return r } diff --git a/typedapi/ml/getcategories/request.go b/typedapi/ml/getcategories/request.go index 9067b097eb..a84421a219 100644 --- a/typedapi/ml/getcategories/request.go +++ b/typedapi/ml/getcategories/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getcategories @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package getcategories // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_categories/MlGetCategoriesRequest.ts#L25-L71 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_categories/MlGetCategoriesRequest.ts#L25-L82 type Request struct { // Page Configures pagination. diff --git a/typedapi/ml/getcategories/response.go b/typedapi/ml/getcategories/response.go index f54938f5ae..9b7752e0ad 100644 --- a/typedapi/ml/getcategories/response.go +++ b/typedapi/ml/getcategories/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getcategories @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getcategories // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_categories/MlGetCategoriesResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_categories/MlGetCategoriesResponse.ts#L23-L28 type Response struct { Categories []types.Category `json:"categories"` Count int64 `json:"count"` diff --git a/typedapi/ml/getdatafeeds/get_datafeeds.go b/typedapi/ml/getdatafeeds/get_datafeeds.go index c34fe40b16..a3d9ec7c4f 100644 --- a/typedapi/ml/getdatafeeds/get_datafeeds.go +++ b/typedapi/ml/getdatafeeds/get_datafeeds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get datafeeds configuration info. // You can get information for multiple datafeeds in a single API request by @@ -86,7 +86,7 @@ func NewGetDatafeedsFunc(tp elastictransport.Interface) NewGetDatafeeds { // ``, or by omitting the ``. // This API returns a maximum of 10,000 datafeeds. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds func New(tp elastictransport.Interface) *GetDatafeeds { r := &GetDatafeeds{ transport: tp, diff --git a/typedapi/ml/getdatafeeds/response.go b/typedapi/ml/getdatafeeds/response.go index e81c8213e7..c6844d3595 100644 --- a/typedapi/ml/getdatafeeds/response.go +++ b/typedapi/ml/getdatafeeds/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getdatafeeds @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdatafeeds // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_datafeeds/MlGetDatafeedsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_datafeeds/MlGetDatafeedsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Datafeeds []types.MLDatafeed `json:"datafeeds"` diff --git a/typedapi/ml/getdatafeedstats/get_datafeed_stats.go b/typedapi/ml/getdatafeedstats/get_datafeed_stats.go index 27c7c96f8d..b568243d6c 100644 --- a/typedapi/ml/getdatafeedstats/get_datafeed_stats.go +++ b/typedapi/ml/getdatafeedstats/get_datafeed_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get datafeeds usage info. // You can get statistics for multiple datafeeds in a single API request by @@ -88,7 +88,7 @@ func NewGetDatafeedStatsFunc(tp elastictransport.Interface) NewGetDatafeedStats // only information you receive is the `datafeed_id` and the `state`. // This API returns a maximum of 10,000 datafeeds. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed-stats.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats func New(tp elastictransport.Interface) *GetDatafeedStats { r := &GetDatafeedStats{ transport: tp, diff --git a/typedapi/ml/getdatafeedstats/response.go b/typedapi/ml/getdatafeedstats/response.go index 5369b4297f..691b1763b7 100644 --- a/typedapi/ml/getdatafeedstats/response.go +++ b/typedapi/ml/getdatafeedstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getdatafeedstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdatafeedstats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_datafeed_stats/MlGetDatafeedStatsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_datafeed_stats/MlGetDatafeedStatsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Datafeeds []types.DatafeedStats `json:"datafeeds"` diff --git a/typedapi/ml/getdataframeanalytics/get_data_frame_analytics.go b/typedapi/ml/getdataframeanalytics/get_data_frame_analytics.go index d07ccecb2e..32942275f1 100644 --- a/typedapi/ml/getdataframeanalytics/get_data_frame_analytics.go +++ b/typedapi/ml/getdataframeanalytics/get_data_frame_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get data frame analytics job configuration info. // You can get information for multiple data frame analytics jobs in a single @@ -82,7 +82,7 @@ func NewGetDataFrameAnalyticsFunc(tp elastictransport.Interface) NewGetDataFrame // API request by using a comma-separated list of data frame analytics jobs or a // wildcard expression. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics func New(tp elastictransport.Interface) *GetDataFrameAnalytics { r := &GetDataFrameAnalytics{ transport: tp, diff --git a/typedapi/ml/getdataframeanalytics/response.go b/typedapi/ml/getdataframeanalytics/response.go index 85b8a6fd71..700710a348 100644 --- a/typedapi/ml/getdataframeanalytics/response.go +++ b/typedapi/ml/getdataframeanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getdataframeanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_data_frame_analytics/MlGetDataFrameAnalyticsResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_data_frame_analytics/MlGetDataFrameAnalyticsResponse.ts#L23-L29 type Response struct { Count int `json:"count"` // DataFrameAnalytics An array of data frame analytics job resources, which are sorted by the id diff --git a/typedapi/ml/getdataframeanalyticsstats/get_data_frame_analytics_stats.go b/typedapi/ml/getdataframeanalyticsstats/get_data_frame_analytics_stats.go index 528f452d2c..3a15194682 100644 --- a/typedapi/ml/getdataframeanalyticsstats/get_data_frame_analytics_stats.go +++ b/typedapi/ml/getdataframeanalyticsstats/get_data_frame_analytics_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get data frame analytics jobs usage info. package getdataframeanalyticsstats @@ -76,7 +76,7 @@ func NewGetDataFrameAnalyticsStatsFunc(tp elastictransport.Interface) NewGetData // Get data frame analytics jobs usage info. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics-stats.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats func New(tp elastictransport.Interface) *GetDataFrameAnalyticsStats { r := &GetDataFrameAnalyticsStats{ transport: tp, diff --git a/typedapi/ml/getdataframeanalyticsstats/response.go b/typedapi/ml/getdataframeanalyticsstats/response.go index c2d9bcb998..2022708b9a 100644 --- a/typedapi/ml/getdataframeanalyticsstats/response.go +++ b/typedapi/ml/getdataframeanalyticsstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getdataframeanalyticsstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdataframeanalyticsstats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_data_frame_analytics_stats/MlGetDataFrameAnalyticsStatsResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_data_frame_analytics_stats/MlGetDataFrameAnalyticsStatsResponse.ts#L23-L29 type Response struct { Count int64 `json:"count"` // DataFrameAnalytics An array of objects that contain usage information for data frame analytics diff --git a/typedapi/ml/getfilters/get_filters.go b/typedapi/ml/getfilters/get_filters.go index 14d3e6ad34..e91aba916e 100644 --- a/typedapi/ml/getfilters/get_filters.go +++ b/typedapi/ml/getfilters/get_filters.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get filters. // You can get a single filter or all filters. @@ -78,7 +78,7 @@ func NewGetFiltersFunc(tp elastictransport.Interface) NewGetFilters { // Get filters. // You can get a single filter or all filters. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-filter.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters func New(tp elastictransport.Interface) *GetFilters { r := &GetFilters{ transport: tp, diff --git a/typedapi/ml/getfilters/response.go b/typedapi/ml/getfilters/response.go index f7571d1f77..888a3187ad 100644 --- a/typedapi/ml/getfilters/response.go +++ b/typedapi/ml/getfilters/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getfilters @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getfilters // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_filters/MlGetFiltersResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_filters/MlGetFiltersResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Filters []types.MLFilter `json:"filters"` diff --git a/typedapi/ml/getinfluencers/get_influencers.go b/typedapi/ml/getinfluencers/get_influencers.go index 40a23187bb..efa8490e79 100644 --- a/typedapi/ml/getinfluencers/get_influencers.go +++ b/typedapi/ml/getinfluencers/get_influencers.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get anomaly detection job results for influencers. // Influencers are the entities that have contributed to, or are to blame for, @@ -89,7 +89,7 @@ func NewGetInfluencersFunc(tp elastictransport.Interface) NewGetInfluencers { // the anomalies. Influencer results are available only if an // `influencer_field_name` is specified in the job configuration. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-influencer.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers func New(tp elastictransport.Interface) *GetInfluencers { r := &GetInfluencers{ transport: tp, @@ -97,8 +97,6 @@ func New(tp elastictransport.Interface) *GetInfluencers { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -439,12 +437,16 @@ func (r *GetInfluencers) Pretty(pretty bool) *GetInfluencers { return r } -// Page Configures pagination. +// Configures pagination. // This parameter has the `from` and `size` properties. // API name: page -func (r *GetInfluencers) Page(page *types.Page) *GetInfluencers { +func (r *GetInfluencers) Page(page types.PageVariant) *GetInfluencers { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Page = page + r.req.Page = page.PageCaster() return r } diff --git a/typedapi/ml/getinfluencers/request.go b/typedapi/ml/getinfluencers/request.go index 9af085bf23..e6087e631a 100644 --- a/typedapi/ml/getinfluencers/request.go +++ b/typedapi/ml/getinfluencers/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getinfluencers @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package getinfluencers // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_influencers/MlGetInfluencersRequest.ts#L26-L98 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_influencers/MlGetInfluencersRequest.ts#L26-L105 type Request struct { // Page Configures pagination. diff --git a/typedapi/ml/getinfluencers/response.go b/typedapi/ml/getinfluencers/response.go index 2a28d7a95f..bca4f8b842 100644 --- a/typedapi/ml/getinfluencers/response.go +++ b/typedapi/ml/getinfluencers/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getinfluencers @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getinfluencers // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_influencers/MlGetInfluencersResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_influencers/MlGetInfluencersResponse.ts#L23-L29 type Response struct { Count int64 `json:"count"` // Influencers Array of influencer objects diff --git a/typedapi/ml/getjobs/get_jobs.go b/typedapi/ml/getjobs/get_jobs.go index 4d8dc506fe..8a53f37b82 100644 --- a/typedapi/ml/getjobs/get_jobs.go +++ b/typedapi/ml/getjobs/get_jobs.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get anomaly detection jobs configuration info. // You can get information for multiple anomaly detection jobs in a single API @@ -84,7 +84,7 @@ func NewGetJobsFunc(tp elastictransport.Interface) NewGetJobs { // expression. You can get information for all anomaly detection jobs by using // `_all`, by specifying `*` as the ``, or by omitting the ``. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs func New(tp elastictransport.Interface) *GetJobs { r := &GetJobs{ transport: tp, diff --git a/typedapi/ml/getjobs/response.go b/typedapi/ml/getjobs/response.go index e6fcb71e7a..a0957bd0f8 100644 --- a/typedapi/ml/getjobs/response.go +++ b/typedapi/ml/getjobs/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getjobs @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getjobs // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_jobs/MlGetJobsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_jobs/MlGetJobsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Jobs []types.Job `json:"jobs"` diff --git a/typedapi/ml/getjobstats/get_job_stats.go b/typedapi/ml/getjobstats/get_job_stats.go index 73cf6ca829..cfdb81f150 100644 --- a/typedapi/ml/getjobstats/get_job_stats.go +++ b/typedapi/ml/getjobstats/get_job_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get anomaly detection jobs usage info. package getjobstats @@ -76,7 +76,7 @@ func NewGetJobStatsFunc(tp elastictransport.Interface) NewGetJobStats { // Get anomaly detection jobs usage info. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats func New(tp elastictransport.Interface) *GetJobStats { r := &GetJobStats{ transport: tp, diff --git a/typedapi/ml/getjobstats/response.go b/typedapi/ml/getjobstats/response.go index dc94fb0bc0..d0337553fd 100644 --- a/typedapi/ml/getjobstats/response.go +++ b/typedapi/ml/getjobstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getjobstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getjobstats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_job_stats/MlGetJobStatsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_job_stats/MlGetJobStatsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Jobs []types.JobStats `json:"jobs"` diff --git a/typedapi/ml/getmemorystats/get_memory_stats.go b/typedapi/ml/getmemorystats/get_memory_stats.go index 1dd0dce4da..f93e5c3f2c 100644 --- a/typedapi/ml/getmemorystats/get_memory_stats.go +++ b/typedapi/ml/getmemorystats/get_memory_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get machine learning memory usage info. // Get information about how machine learning jobs and trained models are using @@ -82,7 +82,7 @@ func NewGetMemoryStatsFunc(tp elastictransport.Interface) NewGetMemoryStats { // memory, // on each node, both within the JVM heap, and natively, outside of the JVM. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-memory.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats func New(tp elastictransport.Interface) *GetMemoryStats { r := &GetMemoryStats{ transport: tp, @@ -316,16 +316,6 @@ func (r *GetMemoryStats) NodeId(nodeid string) *GetMemoryStats { return r } -// Human Specify this query parameter to include the fields with units in the -// response. Otherwise only -// the `_in_bytes` sizes are returned in the response. -// API name: human -func (r *GetMemoryStats) Human(human bool) *GetMemoryStats { - r.values.Set("human", strconv.FormatBool(human)) - - return r -} - // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout // expires, the request fails and returns an error. @@ -368,6 +358,19 @@ func (r *GetMemoryStats) FilterPath(filterpaths ...string) *GetMemoryStats { return r } +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetMemoryStats) Human(human bool) *GetMemoryStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + // Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use // this option for debugging only. // API name: pretty diff --git a/typedapi/ml/getmemorystats/response.go b/typedapi/ml/getmemorystats/response.go index ebdb889081..bd9d198bd5 100644 --- a/typedapi/ml/getmemorystats/response.go +++ b/typedapi/ml/getmemorystats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getmemorystats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getmemorystats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_memory_stats/MlGetMemoryStatsResponse.ts#L25-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_memory_stats/MlGetMemoryStatsResponse.ts#L25-L31 type Response struct { ClusterName string `json:"cluster_name"` NodeStats types.NodeStatistics `json:"_nodes"` diff --git a/typedapi/ml/getmodelsnapshots/get_model_snapshots.go b/typedapi/ml/getmodelsnapshots/get_model_snapshots.go index 5fa85a1806..f7715cc852 100644 --- a/typedapi/ml/getmodelsnapshots/get_model_snapshots.go +++ b/typedapi/ml/getmodelsnapshots/get_model_snapshots.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get model snapshots info. package getmodelsnapshots @@ -86,7 +86,7 @@ func NewGetModelSnapshotsFunc(tp elastictransport.Interface) NewGetModelSnapshot // Get model snapshots info. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots func New(tp elastictransport.Interface) *GetModelSnapshots { r := &GetModelSnapshots{ transport: tp, @@ -94,8 +94,6 @@ func New(tp elastictransport.Interface) *GetModelSnapshots { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -414,42 +412,66 @@ func (r *GetModelSnapshots) Pretty(pretty bool) *GetModelSnapshots { return r } -// Desc Refer to the description for the `desc` query parameter. +// Refer to the description for the `desc` query parameter. // API name: desc func (r *GetModelSnapshots) Desc(desc bool) *GetModelSnapshots { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Desc = &desc return r } -// End Refer to the description for the `end` query parameter. +// Refer to the description for the `end` query parameter. // API name: end -func (r *GetModelSnapshots) End(datetime types.DateTime) *GetModelSnapshots { - r.req.End = datetime +func (r *GetModelSnapshots) End(datetime types.DateTimeVariant) *GetModelSnapshots { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() return r } // API name: page -func (r *GetModelSnapshots) Page(page *types.Page) *GetModelSnapshots { +func (r *GetModelSnapshots) Page(page types.PageVariant) *GetModelSnapshots { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Page = page + r.req.Page = page.PageCaster() return r } -// Sort Refer to the description for the `sort` query parameter. +// Refer to the description for the `sort` query parameter. // API name: sort func (r *GetModelSnapshots) Sort(field string) *GetModelSnapshots { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Sort = &field return r } -// Start Refer to the description for the `start` query parameter. +// Refer to the description for the `start` query parameter. // API name: start -func (r *GetModelSnapshots) Start(datetime types.DateTime) *GetModelSnapshots { - r.req.Start = datetime +func (r *GetModelSnapshots) Start(datetime types.DateTimeVariant) *GetModelSnapshots { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() return r } diff --git a/typedapi/ml/getmodelsnapshots/request.go b/typedapi/ml/getmodelsnapshots/request.go index 3b47a49b7c..95dc65a6a8 100644 --- a/typedapi/ml/getmodelsnapshots/request.go +++ b/typedapi/ml/getmodelsnapshots/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getmodelsnapshots @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package getmodelsnapshots // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_model_snapshots/MlGetModelSnapshotsRequest.ts#L26-L97 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_model_snapshots/MlGetModelSnapshotsRequest.ts#L26-L108 type Request struct { // Desc Refer to the description for the `desc` query parameter. diff --git a/typedapi/ml/getmodelsnapshots/response.go b/typedapi/ml/getmodelsnapshots/response.go index 0dcc472c61..3ae69a8a18 100644 --- a/typedapi/ml/getmodelsnapshots/response.go +++ b/typedapi/ml/getmodelsnapshots/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getmodelsnapshots @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getmodelsnapshots // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_model_snapshots/MlGetModelSnapshotsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_model_snapshots/MlGetModelSnapshotsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` ModelSnapshots []types.ModelSnapshot `json:"model_snapshots"` diff --git a/typedapi/ml/getmodelsnapshotupgradestats/get_model_snapshot_upgrade_stats.go b/typedapi/ml/getmodelsnapshotupgradestats/get_model_snapshot_upgrade_stats.go index cc208d8dba..b141ef9cb8 100644 --- a/typedapi/ml/getmodelsnapshotupgradestats/get_model_snapshot_upgrade_stats.go +++ b/typedapi/ml/getmodelsnapshotupgradestats/get_model_snapshot_upgrade_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get anomaly detection job model snapshot upgrade usage info. package getmodelsnapshotupgradestats @@ -83,7 +83,7 @@ func NewGetModelSnapshotUpgradeStatsFunc(tp elastictransport.Interface) NewGetMo // Get anomaly detection job model snapshot upgrade usage info. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-model-snapshot-upgrade-stats.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshot-upgrade-stats func New(tp elastictransport.Interface) *GetModelSnapshotUpgradeStats { r := &GetModelSnapshotUpgradeStats{ transport: tp, diff --git a/typedapi/ml/getmodelsnapshotupgradestats/response.go b/typedapi/ml/getmodelsnapshotupgradestats/response.go index 5a95cd3947..3b9dea6090 100644 --- a/typedapi/ml/getmodelsnapshotupgradestats/response.go +++ b/typedapi/ml/getmodelsnapshotupgradestats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getmodelsnapshotupgradestats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getmodelsnapshotupgradestats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_model_snapshot_upgrade_stats/MlGetModelSnapshotUpgradeStatsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_model_snapshot_upgrade_stats/MlGetModelSnapshotUpgradeStatsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` ModelSnapshotUpgrades []types.ModelSnapshotUpgrade `json:"model_snapshot_upgrades"` diff --git a/typedapi/ml/getoverallbuckets/get_overall_buckets.go b/typedapi/ml/getoverallbuckets/get_overall_buckets.go index c0a405c40b..5776672019 100644 --- a/typedapi/ml/getoverallbuckets/get_overall_buckets.go +++ b/typedapi/ml/getoverallbuckets/get_overall_buckets.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get overall bucket results. // @@ -119,7 +119,7 @@ func NewGetOverallBucketsFunc(tp elastictransport.Interface) NewGetOverallBucket // `overall_score` of the overall buckets that have a span equal to the // jobs' largest bucket span. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-overall-buckets.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-overall-buckets func New(tp elastictransport.Interface) *GetOverallBuckets { r := &GetOverallBuckets{ transport: tp, @@ -127,8 +127,6 @@ func New(tp elastictransport.Interface) *GetOverallBuckets { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -404,57 +402,92 @@ func (r *GetOverallBuckets) Pretty(pretty bool) *GetOverallBuckets { return r } -// AllowNoMatch Refer to the description for the `allow_no_match` query parameter. +// Refer to the description for the `allow_no_match` query parameter. // API name: allow_no_match func (r *GetOverallBuckets) AllowNoMatch(allownomatch bool) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowNoMatch = &allownomatch return r } -// BucketSpan Refer to the description for the `bucket_span` query parameter. +// Refer to the description for the `bucket_span` query parameter. // API name: bucket_span -func (r *GetOverallBuckets) BucketSpan(duration types.Duration) *GetOverallBuckets { - r.req.BucketSpan = duration +func (r *GetOverallBuckets) BucketSpan(duration types.DurationVariant) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.BucketSpan = *duration.DurationCaster() return r } -// End Refer to the description for the `end` query parameter. +// Refer to the description for the `end` query parameter. // API name: end -func (r *GetOverallBuckets) End(datetime types.DateTime) *GetOverallBuckets { - r.req.End = datetime +func (r *GetOverallBuckets) End(datetime types.DateTimeVariant) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() return r } -// ExcludeInterim Refer to the description for the `exclude_interim` query parameter. +// Refer to the description for the `exclude_interim` query parameter. // API name: exclude_interim func (r *GetOverallBuckets) ExcludeInterim(excludeinterim bool) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ExcludeInterim = &excludeinterim return r } -// OverallScore Refer to the description for the `overall_score` query parameter. +// Refer to the description for the `overall_score` query parameter. // API name: overall_score func (r *GetOverallBuckets) OverallScore(overallscore string) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.OverallScore = overallscore return r } -// Start Refer to the description for the `start` query parameter. +// Refer to the description for the `start` query parameter. // API name: start -func (r *GetOverallBuckets) Start(datetime types.DateTime) *GetOverallBuckets { - r.req.Start = datetime +func (r *GetOverallBuckets) Start(datetime types.DateTimeVariant) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() return r } -// TopN Refer to the description for the `top_n` query parameter. +// Refer to the description for the `top_n` query parameter. // API name: top_n func (r *GetOverallBuckets) TopN(topn int) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TopN = &topn return r diff --git a/typedapi/ml/getoverallbuckets/request.go b/typedapi/ml/getoverallbuckets/request.go index d876ab0704..55a349e43c 100644 --- a/typedapi/ml/getoverallbuckets/request.go +++ b/typedapi/ml/getoverallbuckets/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getoverallbuckets @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package getoverallbuckets // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_overall_buckets/MlGetOverallBucketsRequest.ts#L25-L146 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_overall_buckets/MlGetOverallBucketsRequest.ts#L25-L153 type Request struct { // AllowNoMatch Refer to the description for the `allow_no_match` query parameter. diff --git a/typedapi/ml/getoverallbuckets/response.go b/typedapi/ml/getoverallbuckets/response.go index 826df26da7..b0ad2b977b 100644 --- a/typedapi/ml/getoverallbuckets/response.go +++ b/typedapi/ml/getoverallbuckets/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getoverallbuckets @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getoverallbuckets // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_overall_buckets/MlGetOverallBucketsResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_overall_buckets/MlGetOverallBucketsResponse.ts#L23-L29 type Response struct { Count int64 `json:"count"` // OverallBuckets Array of overall bucket objects diff --git a/typedapi/ml/getrecords/get_records.go b/typedapi/ml/getrecords/get_records.go index e21ffb0389..5818a377e7 100644 --- a/typedapi/ml/getrecords/get_records.go +++ b/typedapi/ml/getrecords/get_records.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get anomaly records for an anomaly detection job. // Records contain the detailed analytical results. They describe the anomalous @@ -103,7 +103,7 @@ func NewGetRecordsFunc(tp elastictransport.Interface) NewGetRecords { // bucket, which relates to the number of time series being modeled and the // number of detectors. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-record.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records func New(tp elastictransport.Interface) *GetRecords { r := &GetRecords{ transport: tp, @@ -111,8 +111,6 @@ func New(tp elastictransport.Interface) *GetRecords { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -399,59 +397,92 @@ func (r *GetRecords) Pretty(pretty bool) *GetRecords { return r } -// Desc Refer to the description for the `desc` query parameter. +// Refer to the description for the `desc` query parameter. // API name: desc func (r *GetRecords) Desc(desc bool) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Desc = &desc return r } -// End Refer to the description for the `end` query parameter. +// Refer to the description for the `end` query parameter. // API name: end -func (r *GetRecords) End(datetime types.DateTime) *GetRecords { - r.req.End = datetime +func (r *GetRecords) End(datetime types.DateTimeVariant) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() return r } -// ExcludeInterim Refer to the description for the `exclude_interim` query parameter. +// Refer to the description for the `exclude_interim` query parameter. // API name: exclude_interim func (r *GetRecords) ExcludeInterim(excludeinterim bool) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ExcludeInterim = &excludeinterim return r } // API name: page -func (r *GetRecords) Page(page *types.Page) *GetRecords { +func (r *GetRecords) Page(page types.PageVariant) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Page = page + r.req.Page = page.PageCaster() return r } -// RecordScore Refer to the description for the `record_score` query parameter. +// Refer to the description for the `record_score` query parameter. // API name: record_score func (r *GetRecords) RecordScore(recordscore types.Float64) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RecordScore = &recordscore return r } -// Sort Refer to the description for the `sort` query parameter. +// Refer to the description for the `sort` query parameter. // API name: sort func (r *GetRecords) Sort(field string) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Sort = &field return r } -// Start Refer to the description for the `start` query parameter. +// Refer to the description for the `start` query parameter. // API name: start -func (r *GetRecords) Start(datetime types.DateTime) *GetRecords { - r.req.Start = datetime +func (r *GetRecords) Start(datetime types.DateTimeVariant) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() return r } diff --git a/typedapi/ml/getrecords/request.go b/typedapi/ml/getrecords/request.go index 7919d53236..876ec120dc 100644 --- a/typedapi/ml/getrecords/request.go +++ b/typedapi/ml/getrecords/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getrecords @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package getrecords // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_records/MlGetAnomalyRecordsRequest.ts#L26-L128 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_records/MlGetAnomalyRecordsRequest.ts#L26-L135 type Request struct { // Desc Refer to the description for the `desc` query parameter. diff --git a/typedapi/ml/getrecords/response.go b/typedapi/ml/getrecords/response.go index c02b154016..1b748fb806 100644 --- a/typedapi/ml/getrecords/response.go +++ b/typedapi/ml/getrecords/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getrecords @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getrecords // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_records/MlGetAnomalyRecordsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_records/MlGetAnomalyRecordsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Records []types.Anomaly `json:"records"` diff --git a/typedapi/ml/gettrainedmodels/get_trained_models.go b/typedapi/ml/gettrainedmodels/get_trained_models.go index 17d182bedb..f13f5808c1 100644 --- a/typedapi/ml/gettrainedmodels/get_trained_models.go +++ b/typedapi/ml/gettrainedmodels/get_trained_models.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get trained model configuration info. package gettrainedmodels @@ -77,7 +77,7 @@ func NewGetTrainedModelsFunc(tp elastictransport.Interface) NewGetTrainedModels // Get trained model configuration info. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models func New(tp elastictransport.Interface) *GetTrainedModels { r := &GetTrainedModels{ transport: tp, diff --git a/typedapi/ml/gettrainedmodels/response.go b/typedapi/ml/gettrainedmodels/response.go index 4215ae6b45..cbe6684e80 100644 --- a/typedapi/ml/gettrainedmodels/response.go +++ b/typedapi/ml/gettrainedmodels/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package gettrainedmodels @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettrainedmodels // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_trained_models/MlGetTrainedModelResponse.ts#L23-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_trained_models/MlGetTrainedModelResponse.ts#L23-L34 type Response struct { Count int `json:"count"` // TrainedModelConfigs An array of trained model resources, which are sorted by the model_id value diff --git a/typedapi/ml/gettrainedmodelsstats/get_trained_models_stats.go b/typedapi/ml/gettrainedmodelsstats/get_trained_models_stats.go index 241bffd259..ced707a30c 100644 --- a/typedapi/ml/gettrainedmodelsstats/get_trained_models_stats.go +++ b/typedapi/ml/gettrainedmodelsstats/get_trained_models_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get trained models usage info. // You can get usage information for multiple trained @@ -82,7 +82,7 @@ func NewGetTrainedModelsStatsFunc(tp elastictransport.Interface) NewGetTrainedMo // models in a single API request by using a comma-separated list of model IDs // or a wildcard expression. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models-stats.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats func New(tp elastictransport.Interface) *GetTrainedModelsStats { r := &GetTrainedModelsStats{ transport: tp, diff --git a/typedapi/ml/gettrainedmodelsstats/response.go b/typedapi/ml/gettrainedmodelsstats/response.go index edd7b84fd0..7fa6160fd0 100644 --- a/typedapi/ml/gettrainedmodelsstats/response.go +++ b/typedapi/ml/gettrainedmodelsstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package gettrainedmodelsstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettrainedmodelsstats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_trained_models_stats/MlGetTrainedModelStatsResponse.ts#L23-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_trained_models_stats/MlGetTrainedModelStatsResponse.ts#L23-L33 type Response struct { // Count The total number of trained model statistics that matched the requested ID diff --git a/typedapi/ml/infertrainedmodel/infer_trained_model.go b/typedapi/ml/infertrainedmodel/infer_trained_model.go index 3b39c5db7f..46c9c6cd87 100644 --- a/typedapi/ml/infertrainedmodel/infer_trained_model.go +++ b/typedapi/ml/infertrainedmodel/infer_trained_model.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Evaluate a trained model. package infertrainedmodel @@ -83,7 +83,7 @@ func NewInferTrainedModelFunc(tp elastictransport.Interface) NewInferTrainedMode // Evaluate a trained model. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-infer-trained-model func New(tp elastictransport.Interface) *InferTrainedModel { r := &InferTrainedModel{ transport: tp, @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *InferTrainedModel { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -170,23 +168,6 @@ func (r *InferTrainedModel) HttpRequest(ctx context.Context) (*http.Request, err path.WriteString("/") path.WriteString("_infer") - method = http.MethodPost - case r.paramSet == modelidMask: - path.WriteString("/") - path.WriteString("_ml") - path.WriteString("/") - path.WriteString("trained_models") - path.WriteString("/") - - if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.RecordPathPart(ctx, "modelid", r.modelid) - } - path.WriteString(r.modelid) - path.WriteString("/") - path.WriteString("deployment") - path.WriteString("/") - path.WriteString("_infer") - method = http.MethodPost } @@ -386,23 +367,32 @@ func (r *InferTrainedModel) Pretty(pretty bool) *InferTrainedModel { return r } -// Docs An array of objects to pass to the model for inference. The objects should +// An array of objects to pass to the model for inference. The objects should // contain a fields matching your // configured trained model input. Typically, for NLP models, the field name is // `text_field`. // Currently, for NLP models, only a single value is allowed. // API name: docs -func (r *InferTrainedModel) Docs(docs ...map[string]json.RawMessage) *InferTrainedModel { +func (r *InferTrainedModel) Docs(docs []map[string]json.RawMessage) *InferTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Docs = docs return r } -// InferenceConfig The inference configuration updates to apply on the API call +// The inference configuration updates to apply on the API call // API name: inference_config -func (r *InferTrainedModel) InferenceConfig(inferenceconfig *types.InferenceConfigUpdateContainer) *InferTrainedModel { +func (r *InferTrainedModel) InferenceConfig(inferenceconfig types.InferenceConfigUpdateContainerVariant) *InferTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.InferenceConfig = inferenceconfig + r.req.InferenceConfig = inferenceconfig.InferenceConfigUpdateContainerCaster() return r } diff --git a/typedapi/ml/infertrainedmodel/request.go b/typedapi/ml/infertrainedmodel/request.go index eed180d5f4..f18b920bea 100644 --- a/typedapi/ml/infertrainedmodel/request.go +++ b/typedapi/ml/infertrainedmodel/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package infertrainedmodel @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package infertrainedmodel // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/infer_trained_model/MlInferTrainedModelRequest.ts#L27-L60 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/infer_trained_model/MlInferTrainedModelRequest.ts#L27-L67 type Request struct { // Docs An array of objects to pass to the model for inference. The objects should diff --git a/typedapi/ml/infertrainedmodel/response.go b/typedapi/ml/infertrainedmodel/response.go index 835e368309..4856b5c2fe 100644 --- a/typedapi/ml/infertrainedmodel/response.go +++ b/typedapi/ml/infertrainedmodel/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package infertrainedmodel @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package infertrainedmodel // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/infer_trained_model/MlInferTrainedModelResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/infer_trained_model/MlInferTrainedModelResponse.ts#L22-L26 type Response struct { InferenceResults []types.InferenceResponseResult `json:"inference_results"` } diff --git a/typedapi/ml/info/info.go b/typedapi/ml/info/info.go index 73b1f28236..96c4d7fc49 100644 --- a/typedapi/ml/info/info.go +++ b/typedapi/ml/info/info.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Return ML defaults and limits. -// Returns defaults and limits used by machine learning. +// Get machine learning information. +// Get defaults and limits used by machine learning. // This endpoint is designed to be used by a user interface that needs to fully // understand machine learning configurations where some options are not // specified, meaning that the defaults should be used. This endpoint may be @@ -75,8 +75,8 @@ func NewInfoFunc(tp elastictransport.Interface) NewInfo { } } -// Return ML defaults and limits. -// Returns defaults and limits used by machine learning. +// Get machine learning information. +// Get defaults and limits used by machine learning. // This endpoint is designed to be used by a user interface that needs to fully // understand machine learning configurations where some options are not // specified, meaning that the defaults should be used. This endpoint may be @@ -84,7 +84,7 @@ func NewInfoFunc(tp elastictransport.Interface) NewInfo { // the maximum size of machine learning jobs that could run in the current // cluster configuration. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-info.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info func New(tp elastictransport.Interface) *Info { r := &Info{ transport: tp, diff --git a/typedapi/ml/info/response.go b/typedapi/ml/info/response.go index 766a6bf497..2b376b7fec 100644 --- a/typedapi/ml/info/response.go +++ b/typedapi/ml/info/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package info @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package info // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/info/MlInfoResponse.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/info/MlInfoResponse.ts#L22-L29 type Response struct { Defaults types.Defaults `json:"defaults"` Limits types.Limits `json:"limits"` diff --git a/typedapi/ml/openjob/open_job.go b/typedapi/ml/openjob/open_job.go index 440063e37b..ff38c38e7e 100644 --- a/typedapi/ml/openjob/open_job.go +++ b/typedapi/ml/openjob/open_job.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Open anomaly detection jobs. +// // An anomaly detection job must be opened to be ready to receive and analyze // data. It can be opened and closed multiple times throughout its lifecycle. // When you open a new job, it starts with an empty model. @@ -88,6 +89,7 @@ func NewOpenJobFunc(tp elastictransport.Interface) NewOpenJob { } // Open anomaly detection jobs. +// // An anomaly detection job must be opened to be ready to receive and analyze // data. It can be opened and closed multiple times throughout its lifecycle. // When you open a new job, it starts with an empty model. @@ -95,7 +97,7 @@ func NewOpenJobFunc(tp elastictransport.Interface) NewOpenJob { // loaded. The job is ready to resume its analysis from where it left off, once // new data is received. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job func New(tp elastictransport.Interface) *OpenJob { r := &OpenJob{ transport: tp, @@ -103,8 +105,6 @@ func New(tp elastictransport.Interface) *OpenJob { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -373,10 +373,15 @@ func (r *OpenJob) Pretty(pretty bool) *OpenJob { return r } -// Timeout Refer to the description for the `timeout` query parameter. +// Refer to the description for the `timeout` query parameter. // API name: timeout -func (r *OpenJob) Timeout(duration types.Duration) *OpenJob { - r.req.Timeout = duration +func (r *OpenJob) Timeout(duration types.DurationVariant) *OpenJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() return r } diff --git a/typedapi/ml/openjob/request.go b/typedapi/ml/openjob/request.go index 4278b8658f..97f69f8ec5 100644 --- a/typedapi/ml/openjob/request.go +++ b/typedapi/ml/openjob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package openjob @@ -32,7 +32,7 @@ import ( // Request holds the request body struct for the package openjob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/open_job/MlOpenJobRequest.ts#L24-L59 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/open_job/MlOpenJobRequest.ts#L24-L67 type Request struct { // Timeout Refer to the description for the `timeout` query parameter. diff --git a/typedapi/ml/openjob/response.go b/typedapi/ml/openjob/response.go index 0f7957e92b..f504553a3b 100644 --- a/typedapi/ml/openjob/response.go +++ b/typedapi/ml/openjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package openjob // Response holds the response body struct for the package openjob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/open_job/MlOpenJobResponse.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/open_job/MlOpenJobResponse.ts#L22-L31 type Response struct { // Node The ID of the node that the job was started on. In serverless this will be diff --git a/typedapi/ml/postcalendarevents/post_calendar_events.go b/typedapi/ml/postcalendarevents/post_calendar_events.go index 2b6ca33dcd..dee28a7250 100644 --- a/typedapi/ml/postcalendarevents/post_calendar_events.go +++ b/typedapi/ml/postcalendarevents/post_calendar_events.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Add scheduled events to the calendar. package postcalendarevents @@ -83,7 +83,7 @@ func NewPostCalendarEventsFunc(tp elastictransport.Interface) NewPostCalendarEve // Add scheduled events to the calendar. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-calendar-event.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events func New(tp elastictransport.Interface) *PostCalendarEvents { r := &PostCalendarEvents{ transport: tp, @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *PostCalendarEvents { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -361,12 +359,19 @@ func (r *PostCalendarEvents) Pretty(pretty bool) *PostCalendarEvents { return r } -// Events A list of one of more scheduled events. The event’s start and end times can +// A list of one of more scheduled events. The event’s start and end times can // be specified as integer milliseconds since the epoch or as a string in ISO // 8601 format. // API name: events -func (r *PostCalendarEvents) Events(events ...types.CalendarEvent) *PostCalendarEvents { - r.req.Events = events +func (r *PostCalendarEvents) Events(events ...types.CalendarEventVariant) *PostCalendarEvents { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range events { + r.req.Events = append(r.req.Events, *v.CalendarEventCaster()) + + } return r } diff --git a/typedapi/ml/postcalendarevents/request.go b/typedapi/ml/postcalendarevents/request.go index 4c5ee03ac0..0b73996d5a 100644 --- a/typedapi/ml/postcalendarevents/request.go +++ b/typedapi/ml/postcalendarevents/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package postcalendarevents @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package postcalendarevents // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/post_calendar_events/MlPostCalendarEventsRequest.ts#L24-L41 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/post_calendar_events/MlPostCalendarEventsRequest.ts#L24-L48 type Request struct { // Events A list of one of more scheduled events. The event’s start and end times can diff --git a/typedapi/ml/postcalendarevents/response.go b/typedapi/ml/postcalendarevents/response.go index af9d64eec4..4cfb5ea63f 100644 --- a/typedapi/ml/postcalendarevents/response.go +++ b/typedapi/ml/postcalendarevents/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package postcalendarevents @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package postcalendarevents // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/post_calendar_events/MlPostCalendarEventsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/post_calendar_events/MlPostCalendarEventsResponse.ts#L22-L24 type Response struct { Events []types.CalendarEvent `json:"events"` } diff --git a/typedapi/ml/postdata/post_data.go b/typedapi/ml/postdata/post_data.go index 69f99778ab..4b81a3b9f4 100644 --- a/typedapi/ml/postdata/post_data.go +++ b/typedapi/ml/postdata/post_data.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Send data to an anomaly detection job for analysis. // @@ -93,7 +93,7 @@ func NewPostDataFunc(tp elastictransport.Interface) NewPostData { // It is not currently possible to post data to multiple jobs using wildcards or // a comma-separated list. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data func New(tp elastictransport.Interface) *PostData { r := &PostData{ transport: tp, @@ -101,8 +101,6 @@ func New(tp elastictransport.Interface) *PostData { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { diff --git a/typedapi/ml/postdata/request.go b/typedapi/ml/postdata/request.go index f4eb7c3b36..75211269c0 100644 --- a/typedapi/ml/postdata/request.go +++ b/typedapi/ml/postdata/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package postdata @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package postdata // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/post_data/MlPostJobDataRequest.ts#L24-L69 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/post_data/MlPostJobDataRequest.ts#L24-L76 type Request = []json.RawMessage // NewRequest returns a Request diff --git a/typedapi/ml/postdata/response.go b/typedapi/ml/postdata/response.go index 9e9b989c61..4ddcac787c 100644 --- a/typedapi/ml/postdata/response.go +++ b/typedapi/ml/postdata/response.go @@ -16,29 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package postdata // Response holds the response body struct for the package postdata // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/post_data/MlPostJobDataResponse.ts#L23-L41 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/post_data/MlPostJobDataResponse.ts#L24-L45 type Response struct { - BucketCount int64 `json:"bucket_count"` - EarliestRecordTimestamp int64 `json:"earliest_record_timestamp"` - EmptyBucketCount int64 `json:"empty_bucket_count"` - InputBytes int64 `json:"input_bytes"` - InputFieldCount int64 `json:"input_field_count"` - InputRecordCount int64 `json:"input_record_count"` - InvalidDateCount int64 `json:"invalid_date_count"` - JobId string `json:"job_id"` - LastDataTime int `json:"last_data_time"` - LatestRecordTimestamp int64 `json:"latest_record_timestamp"` - MissingFieldCount int64 `json:"missing_field_count"` - OutOfOrderTimestampCount int64 `json:"out_of_order_timestamp_count"` - ProcessedFieldCount int64 `json:"processed_field_count"` - ProcessedRecordCount int64 `json:"processed_record_count"` - SparseBucketCount int64 `json:"sparse_bucket_count"` + BucketCount int64 `json:"bucket_count"` + EarliestRecordTimestamp *int64 `json:"earliest_record_timestamp,omitempty"` + EmptyBucketCount int64 `json:"empty_bucket_count"` + InputBytes int64 `json:"input_bytes"` + InputFieldCount int64 `json:"input_field_count"` + InputRecordCount int64 `json:"input_record_count"` + InvalidDateCount int64 `json:"invalid_date_count"` + JobId string `json:"job_id"` + LastDataTime *int64 `json:"last_data_time,omitempty"` + LatestEmptyBucketTimestamp *int64 `json:"latest_empty_bucket_timestamp,omitempty"` + LatestRecordTimestamp *int64 `json:"latest_record_timestamp,omitempty"` + LatestSparseBucketTimestamp *int64 `json:"latest_sparse_bucket_timestamp,omitempty"` + LogTime *int64 `json:"log_time,omitempty"` + MissingFieldCount int64 `json:"missing_field_count"` + OutOfOrderTimestampCount int64 `json:"out_of_order_timestamp_count"` + ProcessedFieldCount int64 `json:"processed_field_count"` + ProcessedRecordCount int64 `json:"processed_record_count"` + SparseBucketCount int64 `json:"sparse_bucket_count"` } // NewResponse returns a Response diff --git a/typedapi/ml/previewdatafeed/preview_datafeed.go b/typedapi/ml/previewdatafeed/preview_datafeed.go index 3838478722..ef826d2995 100644 --- a/typedapi/ml/previewdatafeed/preview_datafeed.go +++ b/typedapi/ml/previewdatafeed/preview_datafeed.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Preview a datafeed. // This API returns the first "page" of search results from a datafeed. @@ -107,7 +107,7 @@ func NewPreviewDatafeedFunc(tp elastictransport.Interface) NewPreviewDatafeed { // datafeed, use the appropriate credentials. // You can also use secondary authorization headers to supply the credentials. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-datafeed.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-datafeed func New(tp elastictransport.Interface) *PreviewDatafeed { r := &PreviewDatafeed{ transport: tp, @@ -115,8 +115,6 @@ func New(tp elastictransport.Interface) *PreviewDatafeed { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -416,16 +414,20 @@ func (r *PreviewDatafeed) Pretty(pretty bool) *PreviewDatafeed { return r } -// DatafeedConfig The datafeed definition to preview. +// The datafeed definition to preview. // API name: datafeed_config -func (r *PreviewDatafeed) DatafeedConfig(datafeedconfig *types.DatafeedConfig) *PreviewDatafeed { +func (r *PreviewDatafeed) DatafeedConfig(datafeedconfig types.DatafeedConfigVariant) *PreviewDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.DatafeedConfig = datafeedconfig + r.req.DatafeedConfig = datafeedconfig.DatafeedConfigCaster() return r } -// JobConfig The configuration details for the anomaly detection job that is associated +// The configuration details for the anomaly detection job that is associated // with the datafeed. If the // `datafeed_config` object does not include a `job_id` that references an // existing anomaly detection job, you must @@ -434,9 +436,13 @@ func (r *PreviewDatafeed) DatafeedConfig(datafeedconfig *types.DatafeedConfig) * // used. You cannot specify a `job_config` object unless you also supply a // `datafeed_config` object. // API name: job_config -func (r *PreviewDatafeed) JobConfig(jobconfig *types.JobConfig) *PreviewDatafeed { +func (r *PreviewDatafeed) JobConfig(jobconfig types.JobConfigVariant) *PreviewDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.JobConfig = jobconfig + r.req.JobConfig = jobconfig.JobConfigCaster() return r } diff --git a/typedapi/ml/previewdatafeed/request.go b/typedapi/ml/previewdatafeed/request.go index 80adf49ed1..12cc4dec69 100644 --- a/typedapi/ml/previewdatafeed/request.go +++ b/typedapi/ml/previewdatafeed/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package previewdatafeed @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package previewdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/preview_datafeed/MlPreviewDatafeedRequest.ts#L26-L70 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/preview_datafeed/MlPreviewDatafeedRequest.ts#L26-L81 type Request struct { // DatafeedConfig The datafeed definition to preview. diff --git a/typedapi/ml/previewdatafeed/response.go b/typedapi/ml/previewdatafeed/response.go index b86d9a6286..a4b7a27019 100644 --- a/typedapi/ml/previewdatafeed/response.go +++ b/typedapi/ml/previewdatafeed/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package previewdatafeed @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package previewdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/preview_datafeed/MlPreviewDatafeedResponse.ts#L20-L23 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/preview_datafeed/MlPreviewDatafeedResponse.ts#L20-L23 type Response []json.RawMessage diff --git a/typedapi/ml/previewdataframeanalytics/preview_data_frame_analytics.go b/typedapi/ml/previewdataframeanalytics/preview_data_frame_analytics.go index 26001a5c3a..90bdf4438b 100644 --- a/typedapi/ml/previewdataframeanalytics/preview_data_frame_analytics.go +++ b/typedapi/ml/previewdataframeanalytics/preview_data_frame_analytics.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Preview features used by data frame analytics. -// Previews the extracted features used by a data frame analytics config. +// Preview the extracted features used by a data frame analytics config. package previewdataframeanalytics import ( @@ -81,9 +81,9 @@ func NewPreviewDataFrameAnalyticsFunc(tp elastictransport.Interface) NewPreviewD } // Preview features used by data frame analytics. -// Previews the extracted features used by a data frame analytics config. +// Preview the extracted features used by a data frame analytics config. // -// http://www.elastic.co/guide/en/elasticsearch/reference/current/preview-dfanalytics.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-data-frame-analytics func New(tp elastictransport.Interface) *PreviewDataFrameAnalytics { r := &PreviewDataFrameAnalytics{ transport: tp, @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *PreviewDataFrameAnalytics { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -374,13 +372,17 @@ func (r *PreviewDataFrameAnalytics) Pretty(pretty bool) *PreviewDataFrameAnalyti return r } -// Config A data frame analytics config as described in create data frame analytics +// A data frame analytics config as described in create data frame analytics // jobs. Note that `id` and `dest` don’t need to be provided in the context of // this API. // API name: config -func (r *PreviewDataFrameAnalytics) Config(config *types.DataframePreviewConfig) *PreviewDataFrameAnalytics { +func (r *PreviewDataFrameAnalytics) Config(config types.DataframePreviewConfigVariant) *PreviewDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Config = config + r.req.Config = config.DataframePreviewConfigCaster() return r } diff --git a/typedapi/ml/previewdataframeanalytics/request.go b/typedapi/ml/previewdataframeanalytics/request.go index db9266f439..e31b7eccb6 100644 --- a/typedapi/ml/previewdataframeanalytics/request.go +++ b/typedapi/ml/previewdataframeanalytics/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package previewdataframeanalytics @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package previewdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsRequest.ts#L24-L49 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsRequest.ts#L24-L60 type Request struct { // Config A data frame analytics config as described in create data frame analytics diff --git a/typedapi/ml/previewdataframeanalytics/response.go b/typedapi/ml/previewdataframeanalytics/response.go index 6d1deeb147..c33c6cf4be 100644 --- a/typedapi/ml/previewdataframeanalytics/response.go +++ b/typedapi/ml/previewdataframeanalytics/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package previewdataframeanalytics // Response holds the response body struct for the package previewdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsResponse.ts#L23-L28 type Response struct { // FeatureValues An array of objects that contain feature name and value pairs. The features diff --git a/typedapi/ml/putcalendar/put_calendar.go b/typedapi/ml/putcalendar/put_calendar.go index 4a13c633b0..e708b7559b 100644 --- a/typedapi/ml/putcalendar/put_calendar.go +++ b/typedapi/ml/putcalendar/put_calendar.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create a calendar. package putcalendar @@ -83,7 +83,7 @@ func NewPutCalendarFunc(tp elastictransport.Interface) NewPutCalendar { // Create a calendar. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar func New(tp elastictransport.Interface) *PutCalendar { r := &PutCalendar{ transport: tp, @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *PutCalendar { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -359,19 +357,30 @@ func (r *PutCalendar) Pretty(pretty bool) *PutCalendar { return r } -// Description A description of the calendar. +// A description of the calendar. // API name: description func (r *PutCalendar) Description(description string) *PutCalendar { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// JobIds An array of anomaly detection job identifiers. +// An array of anomaly detection job identifiers. // API name: job_ids func (r *PutCalendar) JobIds(jobids ...string) *PutCalendar { - r.req.JobIds = jobids + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range jobids { + r.req.JobIds = append(r.req.JobIds, v) + + } return r } diff --git a/typedapi/ml/putcalendar/request.go b/typedapi/ml/putcalendar/request.go index 5f6bb5b3b9..4ed7e8a823 100644 --- a/typedapi/ml/putcalendar/request.go +++ b/typedapi/ml/putcalendar/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putcalendar @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package putcalendar // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_calendar/MlPutCalendarRequest.ts#L23-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_calendar/MlPutCalendarRequest.ts#L23-L51 type Request struct { // Description A description of the calendar. diff --git a/typedapi/ml/putcalendar/response.go b/typedapi/ml/putcalendar/response.go index ed4299bf68..77f8b44cdd 100644 --- a/typedapi/ml/putcalendar/response.go +++ b/typedapi/ml/putcalendar/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putcalendar @@ -31,7 +31,7 @@ import ( // Response holds the response body struct for the package putcalendar // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_calendar/MlPutCalendarResponse.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_calendar/MlPutCalendarResponse.ts#L22-L31 type Response struct { // CalendarId A string that uniquely identifies a calendar. diff --git a/typedapi/ml/putcalendarjob/put_calendar_job.go b/typedapi/ml/putcalendarjob/put_calendar_job.go index d74306699a..48c5b0d94c 100644 --- a/typedapi/ml/putcalendarjob/put_calendar_job.go +++ b/typedapi/ml/putcalendarjob/put_calendar_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Add anomaly detection job to calendar. package putcalendarjob @@ -83,7 +83,7 @@ func NewPutCalendarJobFunc(tp elastictransport.Interface) NewPutCalendarJob { // Add anomaly detection job to calendar. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar-job.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job func New(tp elastictransport.Interface) *PutCalendarJob { r := &PutCalendarJob{ transport: tp, diff --git a/typedapi/ml/putcalendarjob/response.go b/typedapi/ml/putcalendarjob/response.go index 1bf53c4425..408d85f6c4 100644 --- a/typedapi/ml/putcalendarjob/response.go +++ b/typedapi/ml/putcalendarjob/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putcalendarjob @@ -31,7 +31,7 @@ import ( // Response holds the response body struct for the package putcalendarjob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_calendar_job/MlPutCalendarJobResponse.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_calendar_job/MlPutCalendarJobResponse.ts#L22-L31 type Response struct { // CalendarId A string that uniquely identifies a calendar. diff --git a/typedapi/ml/putdatafeed/put_datafeed.go b/typedapi/ml/putdatafeed/put_datafeed.go index 185374a9c7..b25ddb3dde 100644 --- a/typedapi/ml/putdatafeed/put_datafeed.go +++ b/typedapi/ml/putdatafeed/put_datafeed.go @@ -16,15 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create a datafeed. // Datafeeds retrieve data from Elasticsearch for analysis by an anomaly // detection job. // You can associate only one datafeed with each anomaly detection job. // The datafeed contains a query that runs at a defined interval (`frequency`). -// If you are concerned about delayed data, you can add a delay (`query_delay`) +// If you are concerned about delayed data, you can add a delay (`query_delay') // at each interval. +// By default, the datafeed uses the following query: `{"match_all": {"boost": +// 1}}`. +// // When Elasticsearch security features are enabled, your datafeed remembers // which roles the user who created it had // at the time of creation and runs the query using those same roles. If you @@ -102,8 +105,11 @@ func NewPutDatafeedFunc(tp elastictransport.Interface) NewPutDatafeed { // detection job. // You can associate only one datafeed with each anomaly detection job. // The datafeed contains a query that runs at a defined interval (`frequency`). -// If you are concerned about delayed data, you can add a delay (`query_delay`) +// If you are concerned about delayed data, you can add a delay (`query_delay') // at each interval. +// By default, the datafeed uses the following query: `{"match_all": {"boost": +// 1}}`. +// // When Elasticsearch security features are enabled, your datafeed remembers // which roles the user who created it had // at the time of creation and runs the query using those same roles. If you @@ -114,7 +120,7 @@ func NewPutDatafeedFunc(tp elastictransport.Interface) NewPutDatafeed { // directly to the `.ml-config` index. Do not give users `write` privileges on // the `.ml-config` index. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed func New(tp elastictransport.Interface) *PutDatafeed { r := &PutDatafeed{ transport: tp, @@ -122,8 +128,6 @@ func New(tp elastictransport.Interface) *PutDatafeed { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -434,18 +438,39 @@ func (r *PutDatafeed) Pretty(pretty bool) *PutDatafeed { return r } -// Aggregations If set, the datafeed performs aggregation searches. +// If set, the datafeed performs aggregation searches. // Support for aggregations is limited and should be used only with low // cardinality data. // API name: aggregations func (r *PutDatafeed) Aggregations(aggregations map[string]types.Aggregations) *PutDatafeed { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aggregations = aggregations + return r +} + +func (r *PutDatafeed) AddAggregation(key string, value types.AggregationsVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + + tmp[key] = *value.AggregationsCaster() + r.req.Aggregations = tmp return r } -// ChunkingConfig Datafeeds might be required to search over long time periods, for several +// Datafeeds might be required to search over long time periods, for several // months or years. // This search is split into time chunks in order to ensure the load on // Elasticsearch is managed. @@ -453,14 +478,18 @@ func (r *PutDatafeed) Aggregations(aggregations map[string]types.Aggregations) * // calculated; // it is an advanced configuration option. // API name: chunking_config -func (r *PutDatafeed) ChunkingConfig(chunkingconfig *types.ChunkingConfig) *PutDatafeed { +func (r *PutDatafeed) ChunkingConfig(chunkingconfig types.ChunkingConfigVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.ChunkingConfig = chunkingconfig + r.req.ChunkingConfig = chunkingconfig.ChunkingConfigCaster() return r } -// DelayedDataCheckConfig Specifies whether the datafeed checks for missing data and the size of the +// Specifies whether the datafeed checks for missing data and the size of the // window. // The datafeed can optionally search over indices that have already been read // in an effort to determine whether @@ -470,14 +499,18 @@ func (r *PutDatafeed) ChunkingConfig(chunkingconfig *types.ChunkingConfig) *PutD // has passed that moment in time. // This check runs only on real-time datafeeds. // API name: delayed_data_check_config -func (r *PutDatafeed) DelayedDataCheckConfig(delayeddatacheckconfig *types.DelayedDataCheckConfig) *PutDatafeed { +func (r *PutDatafeed) DelayedDataCheckConfig(delayeddatacheckconfig types.DelayedDataCheckConfigVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.DelayedDataCheckConfig = delayeddatacheckconfig + r.req.DelayedDataCheckConfig = delayeddatacheckconfig.DelayedDataCheckConfigCaster() return r } -// Frequency The interval at which scheduled queries are made while the datafeed runs in +// The interval at which scheduled queries are made while the datafeed runs in // real time. // The default value is either the bucket span for short bucket spans, or, for // longer bucket spans, a sensible @@ -488,47 +521,72 @@ func (r *PutDatafeed) DelayedDataCheckConfig(delayeddatacheckconfig *types.Delay // aggregations, this value must be divisible by the interval of the date // histogram aggregation. // API name: frequency -func (r *PutDatafeed) Frequency(duration types.Duration) *PutDatafeed { - r.req.Frequency = duration +func (r *PutDatafeed) Frequency(duration types.DurationVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Frequency = *duration.DurationCaster() return r } // API name: headers -func (r *PutDatafeed) Headers(httpheaders types.HttpHeaders) *PutDatafeed { - r.req.Headers = httpheaders +func (r *PutDatafeed) Headers(httpheaders types.HttpHeadersVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Headers = *httpheaders.HttpHeadersCaster() return r } -// Indices An array of index names. Wildcards are supported. If any of the indices are -// in remote clusters, the machine -// learning nodes must have the `remote_cluster_client` role. +// An array of index names. Wildcards are supported. If any of the indices are +// in remote clusters, the master +// nodes and the machine learning nodes must have the `remote_cluster_client` +// role. // API name: indices func (r *PutDatafeed) Indices(indices ...string) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Indices = indices return r } -// IndicesOptions Specifies index expansion options that are used during search +// Specifies index expansion options that are used during search // API name: indices_options -func (r *PutDatafeed) IndicesOptions(indicesoptions *types.IndicesOptions) *PutDatafeed { +func (r *PutDatafeed) IndicesOptions(indicesoptions types.IndicesOptionsVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.IndicesOptions = indicesoptions + r.req.IndicesOptions = indicesoptions.IndicesOptionsCaster() return r } -// JobId Identifier for the anomaly detection job. +// Identifier for the anomaly detection job. // API name: job_id func (r *PutDatafeed) JobId(id string) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.JobId = &id return r } -// MaxEmptySearches If a real-time datafeed has never seen any data (including during any initial +// If a real-time datafeed has never seen any data (including during any initial // training period), it automatically // stops and closes the associated job after this many real-time searches return // no documents. In other words, @@ -538,25 +596,34 @@ func (r *PutDatafeed) JobId(id string) *PutDatafeed { // default, it is not set. // API name: max_empty_searches func (r *PutDatafeed) MaxEmptySearches(maxemptysearches int) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxEmptySearches = &maxemptysearches return r } -// Query The Elasticsearch query domain-specific language (DSL). This value +// The Elasticsearch query domain-specific language (DSL). This value // corresponds to the query object in an // Elasticsearch search POST body. All the options that are supported by // Elasticsearch can be used, as this // object is passed verbatim to Elasticsearch. // API name: query -func (r *PutDatafeed) Query(query *types.Query) *PutDatafeed { +func (r *PutDatafeed) Query(query types.QueryVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// QueryDelay The number of seconds behind real time that data is queried. For example, if +// The number of seconds behind real time that data is queried. For example, if // data from 10:04 a.m. might // not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 // seconds. The default @@ -564,38 +631,74 @@ func (r *PutDatafeed) Query(query *types.Query) *PutDatafeed { // the query performance // when there are multiple jobs running on the same node. // API name: query_delay -func (r *PutDatafeed) QueryDelay(duration types.Duration) *PutDatafeed { - r.req.QueryDelay = duration +func (r *PutDatafeed) QueryDelay(duration types.DurationVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.QueryDelay = *duration.DurationCaster() return r } -// RuntimeMappings Specifies runtime fields for the datafeed search. +// Specifies runtime fields for the datafeed search. // API name: runtime_mappings -func (r *PutDatafeed) RuntimeMappings(runtimefields types.RuntimeFields) *PutDatafeed { - r.req.RuntimeMappings = runtimefields +func (r *PutDatafeed) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } -// ScriptFields Specifies scripts that evaluate custom expressions and returns script fields +// Specifies scripts that evaluate custom expressions and returns script fields // to the datafeed. // The detector configuration objects in a job can contain functions that use // these script fields. // API name: script_fields func (r *PutDatafeed) ScriptFields(scriptfields map[string]types.ScriptField) *PutDatafeed { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ScriptFields = scriptfields + return r +} + +func (r *PutDatafeed) AddScriptField(key string, value types.ScriptFieldVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ScriptField + if r.req.ScriptFields == nil { + r.req.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = r.req.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + r.req.ScriptFields = tmp return r } -// ScrollSize The size parameter that is used in Elasticsearch searches when the datafeed +// The size parameter that is used in Elasticsearch searches when the datafeed // does not use aggregations. // The maximum value is the value of `index.max_result_window`, which is 10,000 // by default. // API name: scroll_size func (r *PutDatafeed) ScrollSize(scrollsize int) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ScrollSize = &scrollsize return r diff --git a/typedapi/ml/putdatafeed/request.go b/typedapi/ml/putdatafeed/request.go index b0d87c2895..ab18a59853 100644 --- a/typedapi/ml/putdatafeed/request.go +++ b/typedapi/ml/putdatafeed/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putdatafeed @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_datafeed/MlPutDatafeedRequest.ts#L37-L173 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_datafeed/MlPutDatafeedRequest.ts#L37-L184 type Request struct { // Aggregations If set, the datafeed performs aggregation searches. @@ -71,8 +71,9 @@ type Request struct { Frequency types.Duration `json:"frequency,omitempty"` Headers types.HttpHeaders `json:"headers,omitempty"` // Indices An array of index names. Wildcards are supported. If any of the indices are - // in remote clusters, the machine - // learning nodes must have the `remote_cluster_client` role. + // in remote clusters, the master + // nodes and the machine learning nodes must have the `remote_cluster_client` + // role. Indices []string `json:"indices,omitempty"` // IndicesOptions Specifies index expansion options that are used during search IndicesOptions *types.IndicesOptions `json:"indices_options,omitempty"` @@ -151,7 +152,7 @@ func (s *Request) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": + case "aggregations", "aggs": if s.Aggregations == nil { s.Aggregations = make(map[string]types.Aggregations, 0) } diff --git a/typedapi/ml/putdatafeed/response.go b/typedapi/ml/putdatafeed/response.go index 915219bf21..733e16e84d 100644 --- a/typedapi/ml/putdatafeed/response.go +++ b/typedapi/ml/putdatafeed/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putdatafeed @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_datafeed/MlPutDatafeedResponse.ts#L31-L49 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_datafeed/MlPutDatafeedResponse.ts#L31-L49 type Response struct { Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` Authorization *types.DatafeedAuthorization `json:"authorization,omitempty"` diff --git a/typedapi/ml/putdataframeanalytics/put_data_frame_analytics.go b/typedapi/ml/putdataframeanalytics/put_data_frame_analytics.go index 7229772115..208de34261 100644 --- a/typedapi/ml/putdataframeanalytics/put_data_frame_analytics.go +++ b/typedapi/ml/putdataframeanalytics/put_data_frame_analytics.go @@ -16,11 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create a data frame analytics job. // This API creates a data frame analytics job that performs an analysis on the // source indices and stores the outcome in a destination index. +// By default, the query used in the source configuration is `{"match_all": +// {}}`. +// +// If the destination index does not exist, it is created automatically when you +// start the job. +// +// If you supply only a subset of the regression or classification parameters, +// hyperparameter optimization occurs. It determines a value for each of the +// undefined parameters. package putdataframeanalytics import ( @@ -86,8 +95,17 @@ func NewPutDataFrameAnalyticsFunc(tp elastictransport.Interface) NewPutDataFrame // Create a data frame analytics job. // This API creates a data frame analytics job that performs an analysis on the // source indices and stores the outcome in a destination index. +// By default, the query used in the source configuration is `{"match_all": +// {}}`. +// +// If the destination index does not exist, it is created automatically when you +// start the job. +// +// If you supply only a subset of the regression or classification parameters, +// hyperparameter optimization occurs. It determines a value for each of the +// undefined parameters. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-dfanalytics.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics func New(tp elastictransport.Interface) *PutDataFrameAnalytics { r := &PutDataFrameAnalytics{ transport: tp, @@ -95,8 +113,6 @@ func New(tp elastictransport.Interface) *PutDataFrameAnalytics { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -367,7 +383,7 @@ func (r *PutDataFrameAnalytics) Pretty(pretty bool) *PutDataFrameAnalytics { return r } -// AllowLazyStart Specifies whether this job can start when there is insufficient machine +// Specifies whether this job can start when there is insufficient machine // learning node capacity for it to be immediately assigned to a node. If // set to `false` and a machine learning node with capacity to run the job // cannot be immediately found, the API returns an error. If set to `true`, @@ -377,23 +393,32 @@ func (r *PutDataFrameAnalytics) Pretty(pretty bool) *PutDataFrameAnalytics { // `xpack.ml.max_lazy_ml_nodes` setting. // API name: allow_lazy_start func (r *PutDataFrameAnalytics) AllowLazyStart(allowlazystart bool) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowLazyStart = &allowlazystart return r } -// Analysis The analysis configuration, which contains the information necessary to +// The analysis configuration, which contains the information necessary to // perform one of the following types of analysis: classification, outlier // detection, or regression. // API name: analysis -func (r *PutDataFrameAnalytics) Analysis(analysis *types.DataframeAnalysisContainer) *PutDataFrameAnalytics { +func (r *PutDataFrameAnalytics) Analysis(analysis types.DataframeAnalysisContainerVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Analysis = *analysis + r.req.Analysis = *analysis.DataframeAnalysisContainerCaster() return r } -// AnalyzedFields Specifies `includes` and/or `excludes` patterns to select which fields +// Specifies `includes` and/or `excludes` patterns to select which fields // will be included in the analysis. The patterns specified in `excludes` // are applied last, therefore `excludes` takes precedence. In other words, // if the same field is specified in both `includes` and `excludes`, then @@ -422,73 +447,120 @@ func (r *PutDataFrameAnalytics) Analysis(analysis *types.DataframeAnalysisContai // values to a single number. For example, in case of age ranges, you can // model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. // API name: analyzed_fields -func (r *PutDataFrameAnalytics) AnalyzedFields(analyzedfields *types.DataframeAnalysisAnalyzedFields) *PutDataFrameAnalytics { +func (r *PutDataFrameAnalytics) AnalyzedFields(analyzedfields types.DataframeAnalysisAnalyzedFieldsVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AnalyzedFields = analyzedfields + r.req.AnalyzedFields = analyzedfields.DataframeAnalysisAnalyzedFieldsCaster() return r } -// Description A description of the job. +// A description of the job. // API name: description func (r *PutDataFrameAnalytics) Description(description string) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Dest The destination configuration. +// The destination configuration. // API name: dest -func (r *PutDataFrameAnalytics) Dest(dest *types.DataframeAnalyticsDestination) *PutDataFrameAnalytics { +func (r *PutDataFrameAnalytics) Dest(dest types.DataframeAnalyticsDestinationVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Dest = *dest + r.req.Dest = *dest.DataframeAnalyticsDestinationCaster() return r } // API name: headers -func (r *PutDataFrameAnalytics) Headers(httpheaders types.HttpHeaders) *PutDataFrameAnalytics { - r.req.Headers = httpheaders +func (r *PutDataFrameAnalytics) Headers(httpheaders types.HttpHeadersVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Headers = *httpheaders.HttpHeadersCaster() return r } -// MaxNumThreads The maximum number of threads to be used by the analysis. Using more +// The maximum number of threads to be used by the analysis. Using more // threads may decrease the time necessary to complete the analysis at the // cost of using more CPU. Note that the process may use additional threads // for operational functionality other than the analysis itself. // API name: max_num_threads func (r *PutDataFrameAnalytics) MaxNumThreads(maxnumthreads int) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxNumThreads = &maxnumthreads return r } -// ModelMemoryLimit The approximate maximum amount of memory resources that are permitted for +// API name: _meta +func (r *PutDataFrameAnalytics) Meta_(metadata types.MetadataVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() + + return r +} + +// The approximate maximum amount of memory resources that are permitted for // analytical processing. If your `elasticsearch.yml` file contains an // `xpack.ml.max_model_memory_limit` setting, an error occurs when you try // to create data frame analytics jobs that have `model_memory_limit` values // greater than that setting. // API name: model_memory_limit func (r *PutDataFrameAnalytics) ModelMemoryLimit(modelmemorylimit string) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ModelMemoryLimit = &modelmemorylimit return r } -// Source The configuration of how to source the analysis data. +// The configuration of how to source the analysis data. // API name: source -func (r *PutDataFrameAnalytics) Source(source *types.DataframeAnalyticsSource) *PutDataFrameAnalytics { +func (r *PutDataFrameAnalytics) Source(source types.DataframeAnalyticsSourceVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Source = *source + r.req.Source = *source.DataframeAnalyticsSourceCaster() return r } // API name: version func (r *PutDataFrameAnalytics) Version(versionstring string) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &versionstring return r diff --git a/typedapi/ml/putdataframeanalytics/request.go b/typedapi/ml/putdataframeanalytics/request.go index cd997f79db..7fff6930cb 100644 --- a/typedapi/ml/putdataframeanalytics/request.go +++ b/typedapi/ml/putdataframeanalytics/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putdataframeanalytics @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_data_frame_analytics/MlPutDataFrameAnalyticsRequest.ts#L30-L142 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_data_frame_analytics/MlPutDataFrameAnalyticsRequest.ts#L30-L155 type Request struct { // AllowLazyStart Specifies whether this job can start when there is insufficient machine @@ -87,7 +87,8 @@ type Request struct { // threads may decrease the time necessary to complete the analysis at the // cost of using more CPU. Note that the process may use additional threads // for operational functionality other than the analysis itself. - MaxNumThreads *int `json:"max_num_threads,omitempty"` + MaxNumThreads *int `json:"max_num_threads,omitempty"` + Meta_ types.Metadata `json:"_meta,omitempty"` // ModelMemoryLimit The approximate maximum amount of memory resources that are permitted for // analytical processing. If your `elasticsearch.yml` file contains an // `xpack.ml.max_model_memory_limit` setting, an error occurs when you try @@ -194,6 +195,11 @@ func (s *Request) UnmarshalJSON(data []byte) error { s.MaxNumThreads = &f } + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + case "model_memory_limit": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { diff --git a/typedapi/ml/putdataframeanalytics/response.go b/typedapi/ml/putdataframeanalytics/response.go index 081ad175fa..330ea67767 100644 --- a/typedapi/ml/putdataframeanalytics/response.go +++ b/typedapi/ml/putdataframeanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putdataframeanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_data_frame_analytics/MlPutDataFrameAnalyticsResponse.ts#L31-L46 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_data_frame_analytics/MlPutDataFrameAnalyticsResponse.ts#L31-L47 type Response struct { AllowLazyStart bool `json:"allow_lazy_start"` Analysis types.DataframeAnalysisContainer `json:"analysis"` @@ -37,6 +37,7 @@ type Response struct { Dest types.DataframeAnalyticsDestination `json:"dest"` Id string `json:"id"` MaxNumThreads int `json:"max_num_threads"` + Meta_ types.Metadata `json:"_meta,omitempty"` ModelMemoryLimit string `json:"model_memory_limit"` Source types.DataframeAnalyticsSource `json:"source"` Version string `json:"version"` diff --git a/typedapi/ml/putfilter/put_filter.go b/typedapi/ml/putfilter/put_filter.go index 6e9b0cfcaa..bf70bd5fc7 100644 --- a/typedapi/ml/putfilter/put_filter.go +++ b/typedapi/ml/putfilter/put_filter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create a filter. // A filter contains a list of strings. It can be used by one or more anomaly @@ -91,7 +91,7 @@ func NewPutFilterFunc(tp elastictransport.Interface) NewPutFilter { // Specifically, filters are referenced in the `custom_rules` property of // detector configuration objects. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-filter.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter func New(tp elastictransport.Interface) *PutFilter { r := &PutFilter{ transport: tp, @@ -99,8 +99,6 @@ func New(tp elastictransport.Interface) *PutFilter { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -367,21 +365,32 @@ func (r *PutFilter) Pretty(pretty bool) *PutFilter { return r } -// Description A description of the filter. +// A description of the filter. // API name: description func (r *PutFilter) Description(description string) *PutFilter { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Items The items of the filter. A wildcard `*` can be used at the beginning or the +// The items of the filter. A wildcard `*` can be used at the beginning or the // end of an item. // Up to 10000 items are allowed in each filter. // API name: items func (r *PutFilter) Items(items ...string) *PutFilter { - r.req.Items = items + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range items { + r.req.Items = append(r.req.Items, v) + + } return r } diff --git a/typedapi/ml/putfilter/request.go b/typedapi/ml/putfilter/request.go index 589e84f039..d174e6dc58 100644 --- a/typedapi/ml/putfilter/request.go +++ b/typedapi/ml/putfilter/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putfilter @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package putfilter // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_filter/MlPutFilterRequest.ts#L23-L51 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_filter/MlPutFilterRequest.ts#L23-L58 type Request struct { // Description A description of the filter. diff --git a/typedapi/ml/putfilter/response.go b/typedapi/ml/putfilter/response.go index 1b2b4cbbd2..7dba346e00 100644 --- a/typedapi/ml/putfilter/response.go +++ b/typedapi/ml/putfilter/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putfilter // Response holds the response body struct for the package putfilter // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_filter/MlPutFilterResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_filter/MlPutFilterResponse.ts#L22-L28 type Response struct { Description string `json:"description"` FilterId string `json:"filter_id"` diff --git a/typedapi/ml/putjob/put_job.go b/typedapi/ml/putjob/put_job.go index 4e1d4753d0..fb3398df18 100644 --- a/typedapi/ml/putjob/put_job.go +++ b/typedapi/ml/putjob/put_job.go @@ -16,11 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create an anomaly detection job. +// // If you include a `datafeed_config`, you must have read index privileges on // the source index. +// If you include a `datafeed_config` but do not provide a query, the datafeed +// uses `{"match_all": {"boost": 1}}`. package putjob import ( @@ -37,6 +40,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -84,10 +88,13 @@ func NewPutJobFunc(tp elastictransport.Interface) NewPutJob { } // Create an anomaly detection job. +// // If you include a `datafeed_config`, you must have read index privileges on // the source index. +// If you include a `datafeed_config` but do not provide a query, the datafeed +// uses `{"match_all": {"boost": 1}}`. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job func New(tp elastictransport.Interface) *PutJob { r := &PutJob{ transport: tp, @@ -95,8 +102,6 @@ func New(tp elastictransport.Interface) *PutJob { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -321,6 +326,56 @@ func (r *PutJob) _jobid(jobid string) *PutJob { return r } +// AllowNoIndices If `true`, wildcard indices expressions that resolve into no concrete indices +// are ignored. This includes the +// `_all` string or when no indices are specified. +// API name: allow_no_indices +func (r *PutJob) AllowNoIndices(allownoindices bool) *PutJob { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. If the request can target +// data streams, this argument determines +// whether wildcard expressions match hidden data streams. Supports +// comma-separated values. Valid values are: +// +// * `all`: Match any data stream or index, including hidden ones. +// * `closed`: Match closed, non-hidden indices. Also matches any non-hidden +// data stream. Data streams cannot be closed. +// * `hidden`: Match hidden data streams and hidden indices. Must be combined +// with `open`, `closed`, or both. +// * `none`: Wildcard patterns are not accepted. +// * `open`: Match open, non-hidden indices. Also matches any non-hidden data +// stream. +// API name: expand_wildcards +func (r *PutJob) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *PutJob { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreThrottled If `true`, concrete, expanded or aliased indices are ignored when frozen. +// API name: ignore_throttled +func (r *PutJob) IgnoreThrottled(ignorethrottled bool) *PutJob { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) + + return r +} + +// IgnoreUnavailable If `true`, unavailable indices (missing or closed) are ignored. +// API name: ignore_unavailable +func (r *PutJob) IgnoreUnavailable(ignoreunavailable bool) *PutJob { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -365,7 +420,7 @@ func (r *PutJob) Pretty(pretty bool) *PutJob { return r } -// AllowLazyOpen Advanced configuration option. Specifies whether this job can open when there +// Advanced configuration option. Specifies whether this job can open when there // is insufficient machine learning node capacity for it to be immediately // assigned to a node. By default, if a machine learning node with capacity to // run the job cannot immediately be found, the open anomaly detection jobs API @@ -375,110 +430,156 @@ func (r *PutJob) Pretty(pretty bool) *PutJob { // opening state until sufficient machine learning node capacity is available. // API name: allow_lazy_open func (r *PutJob) AllowLazyOpen(allowlazyopen bool) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowLazyOpen = &allowlazyopen return r } -// AnalysisConfig Specifies how to analyze the data. After you create a job, you cannot change +// Specifies how to analyze the data. After you create a job, you cannot change // the analysis configuration; all the properties are informational. // API name: analysis_config -func (r *PutJob) AnalysisConfig(analysisconfig *types.AnalysisConfig) *PutJob { +func (r *PutJob) AnalysisConfig(analysisconfig types.AnalysisConfigVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AnalysisConfig = *analysisconfig + r.req.AnalysisConfig = *analysisconfig.AnalysisConfigCaster() return r } -// AnalysisLimits Limits can be applied for the resources required to hold the mathematical +// Limits can be applied for the resources required to hold the mathematical // models in memory. These limits are approximate and can be set per job. They // do not control the memory used by other processes, for example the // Elasticsearch Java processes. // API name: analysis_limits -func (r *PutJob) AnalysisLimits(analysislimits *types.AnalysisLimits) *PutJob { +func (r *PutJob) AnalysisLimits(analysislimits types.AnalysisLimitsVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AnalysisLimits = analysislimits + r.req.AnalysisLimits = analysislimits.AnalysisLimitsCaster() return r } -// BackgroundPersistInterval Advanced configuration option. The time between each periodic persistence of +// Advanced configuration option. The time between each periodic persistence of // the model. The default value is a randomized value between 3 to 4 hours, // which avoids all jobs persisting at exactly the same time. The smallest // allowed value is 1 hour. For very large models (several GB), persistence // could take 10-20 minutes, so do not set the `background_persist_interval` // value too low. // API name: background_persist_interval -func (r *PutJob) BackgroundPersistInterval(duration types.Duration) *PutJob { - r.req.BackgroundPersistInterval = duration +func (r *PutJob) BackgroundPersistInterval(duration types.DurationVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.BackgroundPersistInterval = *duration.DurationCaster() return r } -// CustomSettings Advanced configuration option. Contains custom meta data about the job. +// Advanced configuration option. Contains custom meta data about the job. // API name: custom_settings func (r *PutJob) CustomSettings(customsettings json.RawMessage) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.CustomSettings = customsettings return r } -// DailyModelSnapshotRetentionAfterDays Advanced configuration option, which affects the automatic removal of old +// Advanced configuration option, which affects the automatic removal of old // model snapshots for this job. It specifies a period of time (in days) after // which only the first snapshot per day is retained. This period is relative to // the timestamp of the most recent snapshot for this job. Valid values range // from 0 to `model_snapshot_retention_days`. // API name: daily_model_snapshot_retention_after_days func (r *PutJob) DailyModelSnapshotRetentionAfterDays(dailymodelsnapshotretentionafterdays int64) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.DailyModelSnapshotRetentionAfterDays = &dailymodelsnapshotretentionafterdays return r } -// DataDescription Defines the format of the input data when you send data to the job by using +// Defines the format of the input data when you send data to the job by using // the post data API. Note that when configure a datafeed, these properties are // automatically set. When data is received via the post data API, it is not // stored in Elasticsearch. Only the results for anomaly detection are retained. // API name: data_description -func (r *PutJob) DataDescription(datadescription *types.DataDescription) *PutJob { +func (r *PutJob) DataDescription(datadescription types.DataDescriptionVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.DataDescription = *datadescription + r.req.DataDescription = *datadescription.DataDescriptionCaster() return r } -// DatafeedConfig Defines a datafeed for the anomaly detection job. If Elasticsearch security +// Defines a datafeed for the anomaly detection job. If Elasticsearch security // features are enabled, your datafeed remembers which roles the user who // created it had at the time of creation and runs the query using those same // roles. If you provide secondary authorization headers, those credentials are // used instead. // API name: datafeed_config -func (r *PutJob) DatafeedConfig(datafeedconfig *types.DatafeedConfig) *PutJob { +func (r *PutJob) DatafeedConfig(datafeedconfig types.DatafeedConfigVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.DatafeedConfig = datafeedconfig + r.req.DatafeedConfig = datafeedconfig.DatafeedConfigCaster() return r } -// Description A description of the job. +// A description of the job. // API name: description func (r *PutJob) Description(description string) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Groups A list of job groups. A job can belong to no groups or many. +// A list of job groups. A job can belong to no groups or many. // API name: groups func (r *PutJob) Groups(groups ...string) *PutJob { - r.req.Groups = groups + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range groups { + + r.req.Groups = append(r.req.Groups, v) + } return r } -// ModelPlotConfig This advanced configuration option stores model information along with the +// This advanced configuration option stores model information along with the // results. It provides a more detailed view into anomaly detection. If you // enable model plot it can add considerable overhead to the performance of the // system; it is not feasible for jobs with many entities. Model plot provides a @@ -488,47 +589,64 @@ func (r *PutJob) Groups(groups ...string) *PutJob { // the model plot. Model plot config can be configured when the job is created // or updated later. It must be disabled if performance issues are experienced. // API name: model_plot_config -func (r *PutJob) ModelPlotConfig(modelplotconfig *types.ModelPlotConfig) *PutJob { +func (r *PutJob) ModelPlotConfig(modelplotconfig types.ModelPlotConfigVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.ModelPlotConfig = modelplotconfig + r.req.ModelPlotConfig = modelplotconfig.ModelPlotConfigCaster() return r } -// ModelSnapshotRetentionDays Advanced configuration option, which affects the automatic removal of old +// Advanced configuration option, which affects the automatic removal of old // model snapshots for this job. It specifies the maximum period of time (in // days) that snapshots are retained. This period is relative to the timestamp // of the most recent snapshot for this job. By default, snapshots ten days // older than the newest snapshot are deleted. // API name: model_snapshot_retention_days func (r *PutJob) ModelSnapshotRetentionDays(modelsnapshotretentiondays int64) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ModelSnapshotRetentionDays = &modelsnapshotretentiondays return r } -// RenormalizationWindowDays Advanced configuration option. The period over which adjustments to the score +// Advanced configuration option. The period over which adjustments to the score // are applied, as new data is seen. The default value is the longer of 30 days // or 100 bucket spans. // API name: renormalization_window_days func (r *PutJob) RenormalizationWindowDays(renormalizationwindowdays int64) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RenormalizationWindowDays = &renormalizationwindowdays return r } -// ResultsIndexName A text string that affects the name of the machine learning results index. By +// A text string that affects the name of the machine learning results index. By // default, the job generates an index named `.ml-anomalies-shared`. // API name: results_index_name func (r *PutJob) ResultsIndexName(indexname string) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ResultsIndexName = &indexname return r } -// ResultsRetentionDays Advanced configuration option. The period of time (in days) that results are +// Advanced configuration option. The period of time (in days) that results are // retained. Age is calculated relative to the timestamp of the latest bucket // result. If this property has a non-null value, once per day at 00:30 (server // time), results that are the specified number of days older than the latest @@ -538,6 +656,10 @@ func (r *PutJob) ResultsIndexName(indexname string) *PutJob { // number of days as results. Annotations added by users are retained forever. // API name: results_retention_days func (r *PutJob) ResultsRetentionDays(resultsretentiondays int64) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ResultsRetentionDays = &resultsretentiondays diff --git a/typedapi/ml/putjob/request.go b/typedapi/ml/putjob/request.go index fe95341003..f4da3bf95c 100644 --- a/typedapi/ml/putjob/request.go +++ b/typedapi/ml/putjob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putjob @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putjob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_job/MlPutJobRequest.ts#L30-L113 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_job/MlPutJobRequest.ts#L30-L157 type Request struct { // AllowLazyOpen Advanced configuration option. Specifies whether this job can open when there @@ -83,6 +83,10 @@ type Request struct { Description *string `json:"description,omitempty"` // Groups A list of job groups. A job can belong to no groups or many. Groups []string `json:"groups,omitempty"` + // JobId The identifier for the anomaly detection job. This identifier can contain + // lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It + // must start and end with alphanumeric characters. + JobId *string `json:"job_id,omitempty"` // ModelPlotConfig This advanced configuration option stores model information along with the // results. It provides a more detailed view into anomaly detection. If you // enable model plot it can add considerable overhead to the performance of the @@ -226,6 +230,11 @@ func (s *Request) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Groups", err) } + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + case "model_plot_config": if err := dec.Decode(&s.ModelPlotConfig); err != nil { return fmt.Errorf("%s | %w", "ModelPlotConfig", err) diff --git a/typedapi/ml/putjob/response.go b/typedapi/ml/putjob/response.go index 581091ff99..bd57f63893 100644 --- a/typedapi/ml/putjob/response.go +++ b/typedapi/ml/putjob/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putjob @@ -33,7 +33,7 @@ import ( // Response holds the response body struct for the package putjob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_job/MlPutJobResponse.ts#L29-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_job/MlPutJobResponse.ts#L29-L52 type Response struct { AllowLazyOpen bool `json:"allow_lazy_open"` AnalysisConfig types.AnalysisConfigRead `json:"analysis_config"` diff --git a/typedapi/ml/puttrainedmodel/put_trained_model.go b/typedapi/ml/puttrainedmodel/put_trained_model.go index fdd968892b..691c970376 100644 --- a/typedapi/ml/puttrainedmodel/put_trained_model.go +++ b/typedapi/ml/puttrainedmodel/put_trained_model.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create a trained model. // Enable you to supply a trained model that is not created by data frame @@ -88,7 +88,7 @@ func NewPutTrainedModelFunc(tp elastictransport.Interface) NewPutTrainedModel { // Enable you to supply a trained model that is not created by data frame // analytics. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model func New(tp elastictransport.Interface) *PutTrainedModel { r := &PutTrainedModel{ transport: tp, @@ -96,8 +96,6 @@ func New(tp elastictransport.Interface) *PutTrainedModel { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -383,64 +381,84 @@ func (r *PutTrainedModel) Pretty(pretty bool) *PutTrainedModel { return r } -// CompressedDefinition The compressed (GZipped and Base64 encoded) inference definition of the +// The compressed (GZipped and Base64 encoded) inference definition of the // model. If compressed_definition is specified, then definition cannot be // specified. // API name: compressed_definition func (r *PutTrainedModel) CompressedDefinition(compresseddefinition string) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.CompressedDefinition = &compresseddefinition return r } -// Definition The inference definition for the model. If definition is specified, then +// The inference definition for the model. If definition is specified, then // compressed_definition cannot be specified. // API name: definition -func (r *PutTrainedModel) Definition(definition *types.Definition) *PutTrainedModel { +func (r *PutTrainedModel) Definition(definition types.DefinitionVariant) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Definition = definition + r.req.Definition = definition.DefinitionCaster() return r } -// Description A human-readable description of the inference trained model. +// A human-readable description of the inference trained model. // API name: description func (r *PutTrainedModel) Description(description string) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// InferenceConfig The default configuration for inference. This can be either a regression +// The default configuration for inference. This can be either a regression // or classification configuration. It must match the underlying // definition.trained_model's target_type. For pre-packaged models such as // ELSER the config is not required. // API name: inference_config -func (r *PutTrainedModel) InferenceConfig(inferenceconfig *types.InferenceConfigCreateContainer) *PutTrainedModel { +func (r *PutTrainedModel) InferenceConfig(inferenceconfig types.InferenceConfigCreateContainerVariant) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.InferenceConfig = inferenceconfig + r.req.InferenceConfig = inferenceconfig.InferenceConfigCreateContainerCaster() return r } -// Input The input field names for the model definition. +// The input field names for the model definition. // API name: input -func (r *PutTrainedModel) Input(input *types.Input) *PutTrainedModel { +func (r *PutTrainedModel) Input(input types.InputVariant) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Input = input + r.req.Input = input.InputCaster() return r } -// Metadata An object map that contains metadata about the model. +// An object map that contains metadata about the model. // API name: metadata -// -// metadata should be a json.RawMessage or a structure -// if a structure is provided, the client will defer a json serialization -// prior to sending the payload to Elasticsearch. func (r *PutTrainedModel) Metadata(metadata any) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } switch casted := metadata.(type) { case json.RawMessage: r.req.Metadata = casted @@ -454,30 +472,36 @@ func (r *PutTrainedModel) Metadata(metadata any) *PutTrainedModel { return nil }) } - return r } -// ModelSizeBytes The estimated memory usage in bytes to keep the trained model in memory. +// The estimated memory usage in bytes to keep the trained model in memory. // This property is supported only if defer_definition_decompression is true // or the model definition is not supplied. // API name: model_size_bytes func (r *PutTrainedModel) ModelSizeBytes(modelsizebytes int64) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ModelSizeBytes = &modelsizebytes return r } -// ModelType The model type. +// The model type. // API name: model_type func (r *PutTrainedModel) ModelType(modeltype trainedmodeltype.TrainedModelType) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ModelType = &modeltype - return r } -// PlatformArchitecture The platform architecture (if applicable) of the trained mode. If the model +// The platform architecture (if applicable) of the trained mode. If the model // only works on one platform, because it is heavily optimized for a particular // processor architecture and OS combination, then this field specifies which. // The format of the string must match the platform identifiers used by @@ -489,25 +513,40 @@ func (r *PutTrainedModel) ModelType(modeltype trainedmodeltype.TrainedModelType) // architecture or OS features), leave this field unset. // API name: platform_architecture func (r *PutTrainedModel) PlatformArchitecture(platformarchitecture string) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.PlatformArchitecture = &platformarchitecture return r } -// PrefixStrings Optional prefix strings applied at inference +// Optional prefix strings applied at inference // API name: prefix_strings -func (r *PutTrainedModel) PrefixStrings(prefixstrings *types.TrainedModelPrefixStrings) *PutTrainedModel { +func (r *PutTrainedModel) PrefixStrings(prefixstrings types.TrainedModelPrefixStringsVariant) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.PrefixStrings = prefixstrings + r.req.PrefixStrings = prefixstrings.TrainedModelPrefixStringsCaster() return r } -// Tags An array of tags to organize the model. +// An array of tags to organize the model. // API name: tags func (r *PutTrainedModel) Tags(tags ...string) *PutTrainedModel { - r.req.Tags = tags + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range tags { + r.req.Tags = append(r.req.Tags, v) + + } return r } diff --git a/typedapi/ml/puttrainedmodel/request.go b/typedapi/ml/puttrainedmodel/request.go index c360c6a8aa..6a4747c531 100644 --- a/typedapi/ml/puttrainedmodel/request.go +++ b/typedapi/ml/puttrainedmodel/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package puttrainedmodel @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package puttrainedmodel // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model/MlPutTrainedModelRequest.ts#L31-L128 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model/MlPutTrainedModelRequest.ts#L31-L135 type Request struct { // CompressedDefinition The compressed (GZipped and Base64 encoded) inference definition of the diff --git a/typedapi/ml/puttrainedmodel/response.go b/typedapi/ml/puttrainedmodel/response.go index 16cb8c1874..51fcaf2eb9 100644 --- a/typedapi/ml/puttrainedmodel/response.go +++ b/typedapi/ml/puttrainedmodel/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package puttrainedmodel @@ -34,7 +34,7 @@ import ( // Response holds the response body struct for the package puttrainedmodel // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model/MlPutTrainedModelResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model/MlPutTrainedModelResponse.ts#L22-L24 type Response struct { CompressedDefinition *string `json:"compressed_definition,omitempty"` // CreateTime The time when the trained model was created. @@ -65,8 +65,9 @@ type Response struct { // created by data frame analytics contain analysis_config and input objects. Metadata *types.TrainedModelConfigMetadata `json:"metadata,omitempty"` // ModelId Identifier for the trained model. - ModelId string `json:"model_id"` - ModelSizeBytes types.ByteSize `json:"model_size_bytes,omitempty"` + ModelId string `json:"model_id"` + ModelPackage *types.ModelPackageConfig `json:"model_package,omitempty"` + ModelSizeBytes types.ByteSize `json:"model_size_bytes,omitempty"` // ModelType The model type ModelType *trainedmodeltype.TrainedModelType `json:"model_type,omitempty"` PrefixStrings *types.TrainedModelPrefixStrings `json:"prefix_strings,omitempty"` @@ -231,6 +232,11 @@ func (s *Response) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "ModelId", err) } + case "model_package": + if err := dec.Decode(&s.ModelPackage); err != nil { + return fmt.Errorf("%s | %w", "ModelPackage", err) + } + case "model_size_bytes": if err := dec.Decode(&s.ModelSizeBytes); err != nil { return fmt.Errorf("%s | %w", "ModelSizeBytes", err) diff --git a/typedapi/ml/puttrainedmodelalias/put_trained_model_alias.go b/typedapi/ml/puttrainedmodelalias/put_trained_model_alias.go index 929d53a77f..f69cdc3b58 100644 --- a/typedapi/ml/puttrainedmodelalias/put_trained_model_alias.go +++ b/typedapi/ml/puttrainedmodelalias/put_trained_model_alias.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create or update a trained model alias. // A trained model alias is a logical name used to reference a single trained @@ -115,7 +115,7 @@ func NewPutTrainedModelAliasFunc(tp elastictransport.Interface) NewPutTrainedMod // common between the old and new trained models for the model alias, the API // returns a warning. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models-aliases.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias func New(tp elastictransport.Interface) *PutTrainedModelAlias { r := &PutTrainedModelAlias{ transport: tp, diff --git a/typedapi/ml/puttrainedmodelalias/response.go b/typedapi/ml/puttrainedmodelalias/response.go index 95324a76e0..6597bbe88d 100644 --- a/typedapi/ml/puttrainedmodelalias/response.go +++ b/typedapi/ml/puttrainedmodelalias/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package puttrainedmodelalias // Response holds the response body struct for the package puttrainedmodelalias // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model_alias/MlPutTrainedModelAliasResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model_alias/MlPutTrainedModelAliasResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/puttrainedmodeldefinitionpart/put_trained_model_definition_part.go b/typedapi/ml/puttrainedmodeldefinitionpart/put_trained_model_definition_part.go index 56dbbc92b9..23ba9c78c2 100644 --- a/typedapi/ml/puttrainedmodeldefinitionpart/put_trained_model_definition_part.go +++ b/typedapi/ml/puttrainedmodeldefinitionpart/put_trained_model_definition_part.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create part of a trained model definition. package puttrainedmodeldefinitionpart @@ -88,7 +88,7 @@ func NewPutTrainedModelDefinitionPartFunc(tp elastictransport.Interface) NewPutT // Create part of a trained model definition. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-definition-part.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-definition-part func New(tp elastictransport.Interface) *PutTrainedModelDefinitionPart { r := &PutTrainedModelDefinitionPart{ transport: tp, @@ -96,8 +96,6 @@ func New(tp elastictransport.Interface) *PutTrainedModelDefinitionPart { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -384,27 +382,40 @@ func (r *PutTrainedModelDefinitionPart) Pretty(pretty bool) *PutTrainedModelDefi return r } -// Definition The definition part for the model. Must be a base64 encoded string. +// The definition part for the model. Must be a base64 encoded string. // API name: definition func (r *PutTrainedModelDefinitionPart) Definition(definition string) *PutTrainedModelDefinitionPart { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Definition = definition return r } -// TotalDefinitionLength The total uncompressed definition length in bytes. Not base64 encoded. +// The total uncompressed definition length in bytes. Not base64 encoded. // API name: total_definition_length func (r *PutTrainedModelDefinitionPart) TotalDefinitionLength(totaldefinitionlength int64) *PutTrainedModelDefinitionPart { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.TotalDefinitionLength = totaldefinitionlength return r } -// TotalParts The total number of parts that will be uploaded. Must be greater than 0. +// The total number of parts that will be uploaded. Must be greater than 0. // API name: total_parts func (r *PutTrainedModelDefinitionPart) TotalParts(totalparts int) *PutTrainedModelDefinitionPart { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TotalParts = totalparts return r diff --git a/typedapi/ml/puttrainedmodeldefinitionpart/request.go b/typedapi/ml/puttrainedmodeldefinitionpart/request.go index fc44788183..f16502b32c 100644 --- a/typedapi/ml/puttrainedmodeldefinitionpart/request.go +++ b/typedapi/ml/puttrainedmodeldefinitionpart/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package puttrainedmodeldefinitionpart @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package puttrainedmodeldefinitionpart // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model_definition_part/MlPutTrainedModelDefinitionPartRequest.ts#L24-L58 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model_definition_part/MlPutTrainedModelDefinitionPartRequest.ts#L24-L65 type Request struct { // Definition The definition part for the model. Must be a base64 encoded string. diff --git a/typedapi/ml/puttrainedmodeldefinitionpart/response.go b/typedapi/ml/puttrainedmodeldefinitionpart/response.go index 11faba6c3f..60bed5c161 100644 --- a/typedapi/ml/puttrainedmodeldefinitionpart/response.go +++ b/typedapi/ml/puttrainedmodeldefinitionpart/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package puttrainedmodeldefinitionpart // Response holds the response body struct for the package puttrainedmodeldefinitionpart // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model_definition_part/MlPutTrainedModelDefinitionPartResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model_definition_part/MlPutTrainedModelDefinitionPartResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/puttrainedmodelvocabulary/put_trained_model_vocabulary.go b/typedapi/ml/puttrainedmodelvocabulary/put_trained_model_vocabulary.go index 37ced4bf83..741e19000f 100644 --- a/typedapi/ml/puttrainedmodelvocabulary/put_trained_model_vocabulary.go +++ b/typedapi/ml/puttrainedmodelvocabulary/put_trained_model_vocabulary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create a trained model vocabulary. // This API is supported only for natural language processing (NLP) models. @@ -89,7 +89,7 @@ func NewPutTrainedModelVocabularyFunc(tp elastictransport.Interface) NewPutTrain // The vocabulary is stored in the index as described in // `inference_config.*.vocabulary` of the trained model definition. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-vocabulary.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-vocabulary func New(tp elastictransport.Interface) *PutTrainedModelVocabulary { r := &PutTrainedModelVocabulary{ transport: tp, @@ -97,8 +97,6 @@ func New(tp elastictransport.Interface) *PutTrainedModelVocabulary { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -367,26 +365,47 @@ func (r *PutTrainedModelVocabulary) Pretty(pretty bool) *PutTrainedModelVocabula return r } -// Merges The optional model merges if required by the tokenizer. +// The optional model merges if required by the tokenizer. // API name: merges func (r *PutTrainedModelVocabulary) Merges(merges ...string) *PutTrainedModelVocabulary { - r.req.Merges = merges + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range merges { + + r.req.Merges = append(r.req.Merges, v) + } return r } -// Scores The optional vocabulary value scores if required by the tokenizer. +// The optional vocabulary value scores if required by the tokenizer. // API name: scores func (r *PutTrainedModelVocabulary) Scores(scores ...types.Float64) *PutTrainedModelVocabulary { - r.req.Scores = scores + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range scores { + r.req.Scores = append(r.req.Scores, v) + + } return r } -// Vocabulary The model vocabulary, which must not be empty. +// The model vocabulary, which must not be empty. // API name: vocabulary func (r *PutTrainedModelVocabulary) Vocabulary(vocabularies ...string) *PutTrainedModelVocabulary { - r.req.Vocabulary = vocabularies + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range vocabularies { + r.req.Vocabulary = append(r.req.Vocabulary, v) + + } return r } diff --git a/typedapi/ml/puttrainedmodelvocabulary/request.go b/typedapi/ml/puttrainedmodelvocabulary/request.go index c027003a20..329fd3e3f5 100644 --- a/typedapi/ml/puttrainedmodelvocabulary/request.go +++ b/typedapi/ml/puttrainedmodelvocabulary/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package puttrainedmodelvocabulary @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package puttrainedmodelvocabulary // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model_vocabulary/MlPutTrainedModelVocabularyRequest.ts#L24-L61 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model_vocabulary/MlPutTrainedModelVocabularyRequest.ts#L24-L68 type Request struct { // Merges The optional model merges if required by the tokenizer. diff --git a/typedapi/ml/puttrainedmodelvocabulary/response.go b/typedapi/ml/puttrainedmodelvocabulary/response.go index 7eb25b105b..9e0fca6e71 100644 --- a/typedapi/ml/puttrainedmodelvocabulary/response.go +++ b/typedapi/ml/puttrainedmodelvocabulary/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package puttrainedmodelvocabulary // Response holds the response body struct for the package puttrainedmodelvocabulary // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model_vocabulary/MlPutTrainedModelVocabularyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model_vocabulary/MlPutTrainedModelVocabularyResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/resetjob/reset_job.go b/typedapi/ml/resetjob/reset_job.go index fe3fafa89d..e678433e74 100644 --- a/typedapi/ml/resetjob/reset_job.go +++ b/typedapi/ml/resetjob/reset_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Reset an anomaly detection job. // All model state and results are deleted. The job is ready to start over as if @@ -86,7 +86,7 @@ func NewResetJobFunc(tp elastictransport.Interface) NewResetJob { // It is not currently possible to reset multiple jobs using wildcards or a // comma separated list. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-reset-job.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-reset-job func New(tp elastictransport.Interface) *ResetJob { r := &ResetJob{ transport: tp, diff --git a/typedapi/ml/resetjob/response.go b/typedapi/ml/resetjob/response.go index 6edd644f3c..3a89dd6c2e 100644 --- a/typedapi/ml/resetjob/response.go +++ b/typedapi/ml/resetjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package resetjob // Response holds the response body struct for the package resetjob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/reset_job/MlResetJobResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/reset_job/MlResetJobResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/revertmodelsnapshot/request.go b/typedapi/ml/revertmodelsnapshot/request.go index 31478f744f..94748f658f 100644 --- a/typedapi/ml/revertmodelsnapshot/request.go +++ b/typedapi/ml/revertmodelsnapshot/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package revertmodelsnapshot @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package revertmodelsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/revert_model_snapshot/MlRevertModelSnapshotRequest.ts#L23-L70 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/revert_model_snapshot/MlRevertModelSnapshotRequest.ts#L23-L77 type Request struct { // DeleteInterveningResults Refer to the description for the `delete_intervening_results` query diff --git a/typedapi/ml/revertmodelsnapshot/response.go b/typedapi/ml/revertmodelsnapshot/response.go index 0c01e52f70..a63d310a14 100644 --- a/typedapi/ml/revertmodelsnapshot/response.go +++ b/typedapi/ml/revertmodelsnapshot/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package revertmodelsnapshot @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package revertmodelsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/revert_model_snapshot/MlRevertModelSnapshotResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/revert_model_snapshot/MlRevertModelSnapshotResponse.ts#L22-L24 type Response struct { Model types.ModelSnapshot `json:"model"` } diff --git a/typedapi/ml/revertmodelsnapshot/revert_model_snapshot.go b/typedapi/ml/revertmodelsnapshot/revert_model_snapshot.go index d7931904c6..178d31fc33 100644 --- a/typedapi/ml/revertmodelsnapshot/revert_model_snapshot.go +++ b/typedapi/ml/revertmodelsnapshot/revert_model_snapshot.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Revert to a snapshot. // The machine learning features react quickly to anomalous input, learning new @@ -102,7 +102,7 @@ func NewRevertModelSnapshotFunc(tp elastictransport.Interface) NewRevertModelSna // before this event. For example, you might consider reverting to a saved // snapshot after Black Friday or a critical system failure. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-revert-snapshot.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot func New(tp elastictransport.Interface) *RevertModelSnapshot { r := &RevertModelSnapshot{ transport: tp, @@ -110,8 +110,6 @@ func New(tp elastictransport.Interface) *RevertModelSnapshot { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -399,10 +397,15 @@ func (r *RevertModelSnapshot) Pretty(pretty bool) *RevertModelSnapshot { return r } -// DeleteInterveningResults Refer to the description for the `delete_intervening_results` query +// Refer to the description for the `delete_intervening_results` query // parameter. // API name: delete_intervening_results func (r *RevertModelSnapshot) DeleteInterveningResults(deleteinterveningresults bool) *RevertModelSnapshot { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.DeleteInterveningResults = &deleteinterveningresults return r diff --git a/typedapi/ml/setupgrademode/response.go b/typedapi/ml/setupgrademode/response.go index 19ac259e11..0651c8a422 100644 --- a/typedapi/ml/setupgrademode/response.go +++ b/typedapi/ml/setupgrademode/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package setupgrademode // Response holds the response body struct for the package setupgrademode // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/set_upgrade_mode/MlSetUpgradeModeResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/set_upgrade_mode/MlSetUpgradeModeResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/setupgrademode/set_upgrade_mode.go b/typedapi/ml/setupgrademode/set_upgrade_mode.go index b67d2281b2..20e48ebf24 100644 --- a/typedapi/ml/setupgrademode/set_upgrade_mode.go +++ b/typedapi/ml/setupgrademode/set_upgrade_mode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Set upgrade_mode for ML indices. // Sets a cluster wide upgrade_mode setting that prepares machine learning @@ -94,7 +94,7 @@ func NewSetUpgradeModeFunc(tp elastictransport.Interface) NewSetUpgradeMode { // You can see the current value for the upgrade_mode setting by using the get // machine learning info API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-set-upgrade-mode.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode func New(tp elastictransport.Interface) *SetUpgradeMode { r := &SetUpgradeMode{ transport: tp, diff --git a/typedapi/ml/startdatafeed/request.go b/typedapi/ml/startdatafeed/request.go index f5059722bd..c66d05ab11 100644 --- a/typedapi/ml/startdatafeed/request.go +++ b/typedapi/ml/startdatafeed/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package startdatafeed @@ -32,7 +32,7 @@ import ( // Request holds the request body struct for the package startdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/start_datafeed/MlStartDatafeedRequest.ts#L24-L92 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/start_datafeed/MlStartDatafeedRequest.ts#L24-L99 type Request struct { // End Refer to the description for the `end` query parameter. diff --git a/typedapi/ml/startdatafeed/response.go b/typedapi/ml/startdatafeed/response.go index 908164defb..3610335199 100644 --- a/typedapi/ml/startdatafeed/response.go +++ b/typedapi/ml/startdatafeed/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package startdatafeed @@ -31,7 +31,7 @@ import ( // Response holds the response body struct for the package startdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/start_datafeed/MlStartDatafeedResponse.ts#L22-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/start_datafeed/MlStartDatafeedResponse.ts#L22-L34 type Response struct { // Node The ID of the node that the job was started on. In serverless this will be diff --git a/typedapi/ml/startdatafeed/start_datafeed.go b/typedapi/ml/startdatafeed/start_datafeed.go index 008149dcb5..9f858ea6d7 100644 --- a/typedapi/ml/startdatafeed/start_datafeed.go +++ b/typedapi/ml/startdatafeed/start_datafeed.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Start datafeeds. // @@ -121,7 +121,7 @@ func NewStartDatafeedFunc(tp elastictransport.Interface) NewStartDatafeed { // authorization headers when you created or updated the datafeed, those // credentials are used instead. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-datafeed func New(tp elastictransport.Interface) *StartDatafeed { r := &StartDatafeed{ transport: tp, @@ -129,8 +129,6 @@ func New(tp elastictransport.Interface) *StartDatafeed { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -403,26 +401,41 @@ func (r *StartDatafeed) Pretty(pretty bool) *StartDatafeed { return r } -// End Refer to the description for the `end` query parameter. +// Refer to the description for the `end` query parameter. // API name: end -func (r *StartDatafeed) End(datetime types.DateTime) *StartDatafeed { - r.req.End = datetime +func (r *StartDatafeed) End(datetime types.DateTimeVariant) *StartDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() return r } -// Start Refer to the description for the `start` query parameter. +// Refer to the description for the `start` query parameter. // API name: start -func (r *StartDatafeed) Start(datetime types.DateTime) *StartDatafeed { - r.req.Start = datetime +func (r *StartDatafeed) Start(datetime types.DateTimeVariant) *StartDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() return r } -// Timeout Refer to the description for the `timeout` query parameter. +// Refer to the description for the `timeout` query parameter. // API name: timeout -func (r *StartDatafeed) Timeout(duration types.Duration) *StartDatafeed { - r.req.Timeout = duration +func (r *StartDatafeed) Timeout(duration types.DurationVariant) *StartDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() return r } diff --git a/typedapi/ml/startdataframeanalytics/response.go b/typedapi/ml/startdataframeanalytics/response.go index 20e4396745..c7a933e055 100644 --- a/typedapi/ml/startdataframeanalytics/response.go +++ b/typedapi/ml/startdataframeanalytics/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package startdataframeanalytics // Response holds the response body struct for the package startdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/start_data_frame_analytics/MlStartDataFrameAnalyticsResponse.ts#L22-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/start_data_frame_analytics/MlStartDataFrameAnalyticsResponse.ts#L22-L34 type Response struct { Acknowledged bool `json:"acknowledged"` // Node The ID of the node that the job was started on. If the job is allowed to open diff --git a/typedapi/ml/startdataframeanalytics/start_data_frame_analytics.go b/typedapi/ml/startdataframeanalytics/start_data_frame_analytics.go index 33afb57398..ed1a1456b1 100644 --- a/typedapi/ml/startdataframeanalytics/start_data_frame_analytics.go +++ b/typedapi/ml/startdataframeanalytics/start_data_frame_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Start a data frame analytics job. // A data frame analytics job can be started and stopped multiple times @@ -100,7 +100,7 @@ func NewStartDataFrameAnalyticsFunc(tp elastictransport.Interface) NewStartDataF // If the destination index exists, it is used as is. You can therefore set up // the destination index in advance with custom settings and mappings. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/start-dfanalytics.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics func New(tp elastictransport.Interface) *StartDataFrameAnalytics { r := &StartDataFrameAnalytics{ transport: tp, diff --git a/typedapi/ml/starttrainedmodeldeployment/request.go b/typedapi/ml/starttrainedmodeldeployment/request.go new file mode 100644 index 0000000000..80e84d9057 --- /dev/null +++ b/typedapi/ml/starttrainedmodeldeployment/request.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package starttrainedmodeldeployment + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package starttrainedmodeldeployment +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/start_trained_model_deployment/MlStartTrainedModelDeploymentRequest.ts#L30-L111 +type Request struct { + + // AdaptiveAllocations Adaptive allocations configuration. When enabled, the number of allocations + // is set based on the current load. + // If adaptive_allocations is enabled, do not set the number of allocations + // manually. + AdaptiveAllocations *types.AdaptiveAllocationsSettings `json:"adaptive_allocations,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Starttrainedmodeldeployment request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/ml/starttrainedmodeldeployment/response.go b/typedapi/ml/starttrainedmodeldeployment/response.go index 01a488f504..8e18cd9995 100644 --- a/typedapi/ml/starttrainedmodeldeployment/response.go +++ b/typedapi/ml/starttrainedmodeldeployment/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package starttrainedmodeldeployment @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package starttrainedmodeldeployment // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/start_trained_model_deployment/MlStartTrainedModelDeploymentResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/start_trained_model_deployment/MlStartTrainedModelDeploymentResponse.ts#L22-L26 type Response struct { Assignment types.TrainedModelAssignment `json:"assignment"` } diff --git a/typedapi/ml/starttrainedmodeldeployment/start_trained_model_deployment.go b/typedapi/ml/starttrainedmodeldeployment/start_trained_model_deployment.go index b643586617..23fda88fe8 100644 --- a/typedapi/ml/starttrainedmodeldeployment/start_trained_model_deployment.go +++ b/typedapi/ml/starttrainedmodeldeployment/start_trained_model_deployment.go @@ -16,13 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Start a trained model deployment. // It allocates the model to every machine learning node. package starttrainedmodeldeployment import ( + gobytes "bytes" "context" "encoding/json" "errors" @@ -55,6 +56,10 @@ type StartTrainedModelDeployment struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int modelid string @@ -82,12 +87,14 @@ func NewStartTrainedModelDeploymentFunc(tp elastictransport.Interface) NewStartT // Start a trained model deployment. // It allocates the model to every machine learning node. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/start-trained-model-deployment.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment func New(tp elastictransport.Interface) *StartTrainedModelDeployment { r := &StartTrainedModelDeployment{ transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -99,6 +106,21 @@ func New(tp elastictransport.Interface) *StartTrainedModelDeployment { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *StartTrainedModelDeployment) Raw(raw io.Reader) *StartTrainedModelDeployment { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *StartTrainedModelDeployment) Request(req *Request) *StartTrainedModelDeployment { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *StartTrainedModelDeployment) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -108,6 +130,31 @@ func (r *StartTrainedModelDeployment) HttpRequest(ctx context.Context) (*http.Re var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for StartTrainedModelDeployment: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -258,45 +305,6 @@ func (r StartTrainedModelDeployment) Do(providedCtx context.Context) (*Response, return nil, errorResponse } -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r StartTrainedModelDeployment) IsSuccess(providedCtx context.Context) (bool, error) { - var ctx context.Context - r.spanStarted = true - if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - ctx = instrument.Start(providedCtx, "ml.start_trained_model_deployment") - defer instrument.Close(ctx) - } - if ctx == nil { - ctx = providedCtx - } - - res, err := r.Perform(ctx) - - if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err - } - - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil - } - - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the StartTrainedModelDeployment query execution, status code: %d", res.StatusCode) - if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.RecordError(ctx, err) - } - return false, err - } - - return false, nil -} - // Header set a key, value pair in the StartTrainedModelDeployment headers map. func (r *StartTrainedModelDeployment) Header(key, value string) *StartTrainedModelDeployment { r.headers.Set(key, value) @@ -341,6 +349,8 @@ func (r *StartTrainedModelDeployment) DeploymentId(deploymentid string) *StartTr // If this setting is greater than the number of hardware threads // it will automatically be changed to a value less than the number of hardware // threads. +// If adaptive_allocations is enabled, do not set this value, because it’s +// automatically set. // API name: number_of_allocations func (r *StartTrainedModelDeployment) NumberOfAllocations(numberofallocations int) *StartTrainedModelDeployment { r.values.Set("number_of_allocations", strconv.Itoa(numberofallocations)) @@ -442,3 +452,19 @@ func (r *StartTrainedModelDeployment) Pretty(pretty bool) *StartTrainedModelDepl return r } + +// Adaptive allocations configuration. When enabled, the number of allocations +// is set based on the current load. +// If adaptive_allocations is enabled, do not set the number of allocations +// manually. +// API name: adaptive_allocations +func (r *StartTrainedModelDeployment) AdaptiveAllocations(adaptiveallocations types.AdaptiveAllocationsSettingsVariant) *StartTrainedModelDeployment { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AdaptiveAllocations = adaptiveallocations.AdaptiveAllocationsSettingsCaster() + + return r +} diff --git a/typedapi/ml/stopdatafeed/request.go b/typedapi/ml/stopdatafeed/request.go index 8c1395bfa0..5155e87823 100644 --- a/typedapi/ml/stopdatafeed/request.go +++ b/typedapi/ml/stopdatafeed/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package stopdatafeed @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package stopdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/stop_datafeed/MlStopDatafeedRequest.ts#L24-L79 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/stop_datafeed/MlStopDatafeedRequest.ts#L24-L86 type Request struct { // AllowNoMatch Refer to the description for the `allow_no_match` query parameter. diff --git a/typedapi/ml/stopdatafeed/response.go b/typedapi/ml/stopdatafeed/response.go index 4709340e4c..59279ea28c 100644 --- a/typedapi/ml/stopdatafeed/response.go +++ b/typedapi/ml/stopdatafeed/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package stopdatafeed // Response holds the response body struct for the package stopdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/stop_datafeed/MlStopDatafeedResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/stop_datafeed/MlStopDatafeedResponse.ts#L20-L22 type Response struct { Stopped bool `json:"stopped"` } diff --git a/typedapi/ml/stopdatafeed/stop_datafeed.go b/typedapi/ml/stopdatafeed/stop_datafeed.go index 1fe7bd7b91..40c1efe8ca 100644 --- a/typedapi/ml/stopdatafeed/stop_datafeed.go +++ b/typedapi/ml/stopdatafeed/stop_datafeed.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Stop datafeeds. // A datafeed that is stopped ceases to retrieve data from Elasticsearch. A @@ -89,7 +89,7 @@ func NewStopDatafeedFunc(tp elastictransport.Interface) NewStopDatafeed { // datafeed can be started and stopped // multiple times throughout its lifecycle. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-datafeed func New(tp elastictransport.Interface) *StopDatafeed { r := &StopDatafeed{ transport: tp, @@ -97,8 +97,6 @@ func New(tp elastictransport.Interface) *StopDatafeed { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -371,26 +369,41 @@ func (r *StopDatafeed) Pretty(pretty bool) *StopDatafeed { return r } -// AllowNoMatch Refer to the description for the `allow_no_match` query parameter. +// Refer to the description for the `allow_no_match` query parameter. // API name: allow_no_match func (r *StopDatafeed) AllowNoMatch(allownomatch bool) *StopDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowNoMatch = &allownomatch return r } -// Force Refer to the description for the `force` query parameter. +// Refer to the description for the `force` query parameter. // API name: force func (r *StopDatafeed) Force(force bool) *StopDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Force = &force return r } -// Timeout Refer to the description for the `timeout` query parameter. +// Refer to the description for the `timeout` query parameter. // API name: timeout -func (r *StopDatafeed) Timeout(duration types.Duration) *StopDatafeed { - r.req.Timeout = duration +func (r *StopDatafeed) Timeout(duration types.DurationVariant) *StopDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() return r } diff --git a/typedapi/ml/stopdataframeanalytics/response.go b/typedapi/ml/stopdataframeanalytics/response.go index 7f20fbbca9..5b9c6ce898 100644 --- a/typedapi/ml/stopdataframeanalytics/response.go +++ b/typedapi/ml/stopdataframeanalytics/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package stopdataframeanalytics // Response holds the response body struct for the package stopdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/stop_data_frame_analytics/MlStopDataFrameAnalyticsResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/stop_data_frame_analytics/MlStopDataFrameAnalyticsResponse.ts#L20-L22 type Response struct { Stopped bool `json:"stopped"` } diff --git a/typedapi/ml/stopdataframeanalytics/stop_data_frame_analytics.go b/typedapi/ml/stopdataframeanalytics/stop_data_frame_analytics.go index 628d45ef2d..9b478c54c3 100644 --- a/typedapi/ml/stopdataframeanalytics/stop_data_frame_analytics.go +++ b/typedapi/ml/stopdataframeanalytics/stop_data_frame_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Stop data frame analytics jobs. // A data frame analytics job can be started and stopped multiple times @@ -82,7 +82,7 @@ func NewStopDataFrameAnalyticsFunc(tp elastictransport.Interface) NewStopDataFra // A data frame analytics job can be started and stopped multiple times // throughout its lifecycle. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-dfanalytics.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics func New(tp elastictransport.Interface) *StopDataFrameAnalytics { r := &StopDataFrameAnalytics{ transport: tp, diff --git a/typedapi/ml/stoptrainedmodeldeployment/response.go b/typedapi/ml/stoptrainedmodeldeployment/response.go index 0574d9a535..48a82515d9 100644 --- a/typedapi/ml/stoptrainedmodeldeployment/response.go +++ b/typedapi/ml/stoptrainedmodeldeployment/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package stoptrainedmodeldeployment // Response holds the response body struct for the package stoptrainedmodeldeployment // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/stop_trained_model_deployment/MlStopTrainedModelDeploymentResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/stop_trained_model_deployment/MlStopTrainedModelDeploymentResponse.ts#L20-L22 type Response struct { Stopped bool `json:"stopped"` } diff --git a/typedapi/ml/stoptrainedmodeldeployment/stop_trained_model_deployment.go b/typedapi/ml/stoptrainedmodeldeployment/stop_trained_model_deployment.go index eb2c80d6b1..e0da7b6e2c 100644 --- a/typedapi/ml/stoptrainedmodeldeployment/stop_trained_model_deployment.go +++ b/typedapi/ml/stoptrainedmodeldeployment/stop_trained_model_deployment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Stop a trained model deployment. package stoptrainedmodeldeployment @@ -78,7 +78,7 @@ func NewStopTrainedModelDeploymentFunc(tp elastictransport.Interface) NewStopTra // Stop a trained model deployment. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-trained-model-deployment.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment func New(tp elastictransport.Interface) *StopTrainedModelDeployment { r := &StopTrainedModelDeployment{ transport: tp, diff --git a/typedapi/ml/updatedatafeed/request.go b/typedapi/ml/updatedatafeed/request.go index 17cbede3ab..31923700c7 100644 --- a/typedapi/ml/updatedatafeed/request.go +++ b/typedapi/ml/updatedatafeed/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatedatafeed @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package updatedatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/update_datafeed/MlUpdateDatafeedRequest.ts#L31-L163 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/update_datafeed/MlUpdateDatafeedRequest.ts#L31-L170 type Request struct { // Aggregations If set, the datafeed performs aggregation searches. Support for aggregations diff --git a/typedapi/ml/updatedatafeed/response.go b/typedapi/ml/updatedatafeed/response.go index 92882c24a2..65b4387cc4 100644 --- a/typedapi/ml/updatedatafeed/response.go +++ b/typedapi/ml/updatedatafeed/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatedatafeed @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatedatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/update_datafeed/MlUpdateDatafeedResponse.ts#L31-L49 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/update_datafeed/MlUpdateDatafeedResponse.ts#L31-L49 type Response struct { Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` Authorization *types.DatafeedAuthorization `json:"authorization,omitempty"` diff --git a/typedapi/ml/updatedatafeed/update_datafeed.go b/typedapi/ml/updatedatafeed/update_datafeed.go index 67b45e00a5..5691fb58a5 100644 --- a/typedapi/ml/updatedatafeed/update_datafeed.go +++ b/typedapi/ml/updatedatafeed/update_datafeed.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update a datafeed. // You must stop and start the datafeed for the changes to be applied. @@ -96,7 +96,7 @@ func NewUpdateDatafeedFunc(tp elastictransport.Interface) NewUpdateDatafeed { // provide secondary authorization headers, // those credentials are used instead. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafeed.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-datafeed func New(tp elastictransport.Interface) *UpdateDatafeed { r := &UpdateDatafeed{ transport: tp, @@ -104,8 +104,6 @@ func New(tp elastictransport.Interface) *UpdateDatafeed { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -427,31 +425,56 @@ func (r *UpdateDatafeed) Pretty(pretty bool) *UpdateDatafeed { return r } -// Aggregations If set, the datafeed performs aggregation searches. Support for aggregations +// If set, the datafeed performs aggregation searches. Support for aggregations // is limited and should be used only // with low cardinality data. // API name: aggregations func (r *UpdateDatafeed) Aggregations(aggregations map[string]types.Aggregations) *UpdateDatafeed { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aggregations = aggregations + return r +} + +func (r *UpdateDatafeed) AddAggregation(key string, value types.AggregationsVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + tmp[key] = *value.AggregationsCaster() + + r.req.Aggregations = tmp return r } -// ChunkingConfig Datafeeds might search over long time periods, for several months or years. +// Datafeeds might search over long time periods, for several months or years. // This search is split into time // chunks in order to ensure the load on Elasticsearch is managed. Chunking // configuration controls how the size of // these time chunks are calculated; it is an advanced configuration option. // API name: chunking_config -func (r *UpdateDatafeed) ChunkingConfig(chunkingconfig *types.ChunkingConfig) *UpdateDatafeed { +func (r *UpdateDatafeed) ChunkingConfig(chunkingconfig types.ChunkingConfigVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.ChunkingConfig = chunkingconfig + r.req.ChunkingConfig = chunkingconfig.ChunkingConfigCaster() return r } -// DelayedDataCheckConfig Specifies whether the datafeed checks for missing data and the size of the +// Specifies whether the datafeed checks for missing data and the size of the // window. The datafeed can optionally // search over indices that have already been read in an effort to determine // whether any data has subsequently been @@ -461,14 +484,18 @@ func (r *UpdateDatafeed) ChunkingConfig(chunkingconfig *types.ChunkingConfig) *U // This check runs only on real-time // datafeeds. // API name: delayed_data_check_config -func (r *UpdateDatafeed) DelayedDataCheckConfig(delayeddatacheckconfig *types.DelayedDataCheckConfig) *UpdateDatafeed { +func (r *UpdateDatafeed) DelayedDataCheckConfig(delayeddatacheckconfig types.DelayedDataCheckConfigVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.DelayedDataCheckConfig = delayeddatacheckconfig + r.req.DelayedDataCheckConfig = delayeddatacheckconfig.DelayedDataCheckConfigCaster() return r } -// Frequency The interval at which scheduled queries are made while the datafeed runs in +// The interval at which scheduled queries are made while the datafeed runs in // real time. The default value is // either the bucket span for short bucket spans, or, for longer bucket spans, a // sensible fraction of the bucket @@ -478,39 +505,60 @@ func (r *UpdateDatafeed) DelayedDataCheckConfig(delayeddatacheckconfig *types.De // datafeed uses aggregations, this value // must be divisible by the interval of the date histogram aggregation. // API name: frequency -func (r *UpdateDatafeed) Frequency(duration types.Duration) *UpdateDatafeed { - r.req.Frequency = duration +func (r *UpdateDatafeed) Frequency(duration types.DurationVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Frequency = *duration.DurationCaster() return r } -// Indices An array of index names. Wildcards are supported. If any of the indices are +// An array of index names. Wildcards are supported. If any of the indices are // in remote clusters, the machine // learning nodes must have the `remote_cluster_client` role. // API name: indices func (r *UpdateDatafeed) Indices(indices ...string) *UpdateDatafeed { - r.req.Indices = indices + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range indices { + + r.req.Indices = append(r.req.Indices, v) + } return r } -// IndicesOptions Specifies index expansion options that are used during search. +// Specifies index expansion options that are used during search. // API name: indices_options -func (r *UpdateDatafeed) IndicesOptions(indicesoptions *types.IndicesOptions) *UpdateDatafeed { +func (r *UpdateDatafeed) IndicesOptions(indicesoptions types.IndicesOptionsVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.IndicesOptions = indicesoptions + r.req.IndicesOptions = indicesoptions.IndicesOptionsCaster() return r } // API name: job_id func (r *UpdateDatafeed) JobId(id string) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.JobId = &id return r } -// MaxEmptySearches If a real-time datafeed has never seen any data (including during any initial +// If a real-time datafeed has never seen any data (including during any initial // training period), it automatically // stops and closes the associated job after this many real-time searches return // no documents. In other words, @@ -520,12 +568,17 @@ func (r *UpdateDatafeed) JobId(id string) *UpdateDatafeed { // default, it is not set. // API name: max_empty_searches func (r *UpdateDatafeed) MaxEmptySearches(maxemptysearches int) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxEmptySearches = &maxemptysearches return r } -// Query The Elasticsearch query domain-specific language (DSL). This value +// The Elasticsearch query domain-specific language (DSL). This value // corresponds to the query object in an // Elasticsearch search POST body. All the options that are supported by // Elasticsearch can be used, as this @@ -539,14 +592,18 @@ func (r *UpdateDatafeed) MaxEmptySearches(maxemptysearches int) *UpdateDatafeed // in parallel and close one // when you are satisfied with the results of the job. // API name: query -func (r *UpdateDatafeed) Query(query *types.Query) *UpdateDatafeed { +func (r *UpdateDatafeed) Query(query types.QueryVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// QueryDelay The number of seconds behind real time that data is queried. For example, if +// The number of seconds behind real time that data is queried. For example, if // data from 10:04 a.m. might // not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 // seconds. The default @@ -554,37 +611,73 @@ func (r *UpdateDatafeed) Query(query *types.Query) *UpdateDatafeed { // the query performance // when there are multiple jobs running on the same node. // API name: query_delay -func (r *UpdateDatafeed) QueryDelay(duration types.Duration) *UpdateDatafeed { - r.req.QueryDelay = duration +func (r *UpdateDatafeed) QueryDelay(duration types.DurationVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.QueryDelay = *duration.DurationCaster() return r } -// RuntimeMappings Specifies runtime fields for the datafeed search. +// Specifies runtime fields for the datafeed search. // API name: runtime_mappings -func (r *UpdateDatafeed) RuntimeMappings(runtimefields types.RuntimeFields) *UpdateDatafeed { - r.req.RuntimeMappings = runtimefields +func (r *UpdateDatafeed) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } -// ScriptFields Specifies scripts that evaluate custom expressions and returns script fields +// Specifies scripts that evaluate custom expressions and returns script fields // to the datafeed. // The detector configuration objects in a job can contain functions that use // these script fields. // API name: script_fields func (r *UpdateDatafeed) ScriptFields(scriptfields map[string]types.ScriptField) *UpdateDatafeed { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ScriptFields = scriptfields + return r +} + +func (r *UpdateDatafeed) AddScriptField(key string, value types.ScriptFieldVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ScriptField + if r.req.ScriptFields == nil { + r.req.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = r.req.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + r.req.ScriptFields = tmp return r } -// ScrollSize The size parameter that is used in Elasticsearch searches when the datafeed +// The size parameter that is used in Elasticsearch searches when the datafeed // does not use aggregations. // The maximum value is the value of `index.max_result_window`. // API name: scroll_size func (r *UpdateDatafeed) ScrollSize(scrollsize int) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ScrollSize = &scrollsize return r diff --git a/typedapi/ml/updatedataframeanalytics/request.go b/typedapi/ml/updatedataframeanalytics/request.go index 506afe8044..88ee9aca34 100644 --- a/typedapi/ml/updatedataframeanalytics/request.go +++ b/typedapi/ml/updatedataframeanalytics/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatedataframeanalytics @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updatedataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsRequest.ts#L24-L73 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsRequest.ts#L24-L80 type Request struct { // AllowLazyStart Specifies whether this job can start when there is insufficient machine diff --git a/typedapi/ml/updatedataframeanalytics/response.go b/typedapi/ml/updatedataframeanalytics/response.go index 2745fe424a..0184867f55 100644 --- a/typedapi/ml/updatedataframeanalytics/response.go +++ b/typedapi/ml/updatedataframeanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatedataframeanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatedataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsResponse.ts#L30-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsResponse.ts#L30-L45 type Response struct { AllowLazyStart bool `json:"allow_lazy_start"` Analysis types.DataframeAnalysisContainer `json:"analysis"` diff --git a/typedapi/ml/updatedataframeanalytics/update_data_frame_analytics.go b/typedapi/ml/updatedataframeanalytics/update_data_frame_analytics.go index 097e48b850..b3730f84a6 100644 --- a/typedapi/ml/updatedataframeanalytics/update_data_frame_analytics.go +++ b/typedapi/ml/updatedataframeanalytics/update_data_frame_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update a data frame analytics job. package updatedataframeanalytics @@ -83,7 +83,7 @@ func NewUpdateDataFrameAnalyticsFunc(tp elastictransport.Interface) NewUpdateDat // Update a data frame analytics job. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-dfanalytics.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics func New(tp elastictransport.Interface) *UpdateDataFrameAnalytics { r := &UpdateDataFrameAnalytics{ transport: tp, @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *UpdateDataFrameAnalytics { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -365,42 +363,60 @@ func (r *UpdateDataFrameAnalytics) Pretty(pretty bool) *UpdateDataFrameAnalytics return r } -// AllowLazyStart Specifies whether this job can start when there is insufficient machine +// Specifies whether this job can start when there is insufficient machine // learning node capacity for it to be immediately assigned to a node. // API name: allow_lazy_start func (r *UpdateDataFrameAnalytics) AllowLazyStart(allowlazystart bool) *UpdateDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowLazyStart = &allowlazystart return r } -// Description A description of the job. +// A description of the job. // API name: description func (r *UpdateDataFrameAnalytics) Description(description string) *UpdateDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// MaxNumThreads The maximum number of threads to be used by the analysis. Using more +// The maximum number of threads to be used by the analysis. Using more // threads may decrease the time necessary to complete the analysis at the // cost of using more CPU. Note that the process may use additional threads // for operational functionality other than the analysis itself. // API name: max_num_threads func (r *UpdateDataFrameAnalytics) MaxNumThreads(maxnumthreads int) *UpdateDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxNumThreads = &maxnumthreads return r } -// ModelMemoryLimit The approximate maximum amount of memory resources that are permitted for +// The approximate maximum amount of memory resources that are permitted for // analytical processing. If your `elasticsearch.yml` file contains an // `xpack.ml.max_model_memory_limit` setting, an error occurs when you try // to create data frame analytics jobs that have `model_memory_limit` values // greater than that setting. // API name: model_memory_limit func (r *UpdateDataFrameAnalytics) ModelMemoryLimit(modelmemorylimit string) *UpdateDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ModelMemoryLimit = &modelmemorylimit diff --git a/typedapi/ml/updatefilter/request.go b/typedapi/ml/updatefilter/request.go index bb8c031936..fbd93dcefe 100644 --- a/typedapi/ml/updatefilter/request.go +++ b/typedapi/ml/updatefilter/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatefilter @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updatefilter // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/update_filter/MlUpdateFilterRequest.ts#L23-L53 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/update_filter/MlUpdateFilterRequest.ts#L23-L60 type Request struct { // AddItems The items to add to the filter. diff --git a/typedapi/ml/updatefilter/response.go b/typedapi/ml/updatefilter/response.go index 468cd705eb..5bec77fdcb 100644 --- a/typedapi/ml/updatefilter/response.go +++ b/typedapi/ml/updatefilter/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatefilter // Response holds the response body struct for the package updatefilter // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/update_filter/MlUpdateFilterResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/update_filter/MlUpdateFilterResponse.ts#L22-L28 type Response struct { Description string `json:"description"` FilterId string `json:"filter_id"` diff --git a/typedapi/ml/updatefilter/update_filter.go b/typedapi/ml/updatefilter/update_filter.go index 46fe949914..fff02e49f2 100644 --- a/typedapi/ml/updatefilter/update_filter.go +++ b/typedapi/ml/updatefilter/update_filter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update a filter. // Updates the description of a filter, adds items, or removes items from the @@ -87,7 +87,7 @@ func NewUpdateFilterFunc(tp elastictransport.Interface) NewUpdateFilter { // Updates the description of a filter, adds items, or removes items from the // list. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-filter.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-filter func New(tp elastictransport.Interface) *UpdateFilter { r := &UpdateFilter{ transport: tp, @@ -95,8 +95,6 @@ func New(tp elastictransport.Interface) *UpdateFilter { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -365,27 +363,45 @@ func (r *UpdateFilter) Pretty(pretty bool) *UpdateFilter { return r } -// AddItems The items to add to the filter. +// The items to add to the filter. // API name: add_items func (r *UpdateFilter) AddItems(additems ...string) *UpdateFilter { - r.req.AddItems = additems + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range additems { + r.req.AddItems = append(r.req.AddItems, v) + + } return r } -// Description A description for the filter. +// A description for the filter. // API name: description func (r *UpdateFilter) Description(description string) *UpdateFilter { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// RemoveItems The items to remove from the filter. +// The items to remove from the filter. // API name: remove_items func (r *UpdateFilter) RemoveItems(removeitems ...string) *UpdateFilter { - r.req.RemoveItems = removeitems + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range removeitems { + r.req.RemoveItems = append(r.req.RemoveItems, v) + + } return r } diff --git a/typedapi/ml/updatejob/request.go b/typedapi/ml/updatejob/request.go index 55d10610e0..6a463b8033 100644 --- a/typedapi/ml/updatejob/request.go +++ b/typedapi/ml/updatejob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatejob @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package updatejob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/update_job/MlUpdateJobRequest.ts#L33-L140 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/update_job/MlUpdateJobRequest.ts#L33-L147 type Request struct { // AllowLazyOpen Advanced configuration option. Specifies whether this job can open when @@ -74,7 +74,7 @@ type Request struct { // Description A description of the job. Description *string `json:"description,omitempty"` // Detectors An array of detector update objects. - Detectors []types.Detector `json:"detectors,omitempty"` + Detectors []types.DetectorUpdate `json:"detectors,omitempty"` // Groups A list of job groups. A job can belong to no groups or many. Groups []string `json:"groups,omitempty"` ModelPlotConfig *types.ModelPlotConfig `json:"model_plot_config,omitempty"` diff --git a/typedapi/ml/updatejob/response.go b/typedapi/ml/updatejob/response.go index 582f558c5e..c9d03ef0d8 100644 --- a/typedapi/ml/updatejob/response.go +++ b/typedapi/ml/updatejob/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatejob @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatejob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/update_job/MlUpdateJobResponse.ts#L29-L53 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/update_job/MlUpdateJobResponse.ts#L29-L53 type Response struct { AllowLazyOpen bool `json:"allow_lazy_open"` AnalysisConfig types.AnalysisConfigRead `json:"analysis_config"` diff --git a/typedapi/ml/updatejob/update_job.go b/typedapi/ml/updatejob/update_job.go index 33b4745153..09b0b1bd21 100644 --- a/typedapi/ml/updatejob/update_job.go +++ b/typedapi/ml/updatejob/update_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update an anomaly detection job. // Updates certain properties of an anomaly detection job. @@ -85,7 +85,7 @@ func NewUpdateJobFunc(tp elastictransport.Interface) NewUpdateJob { // Update an anomaly detection job. // Updates certain properties of an anomaly detection job. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-job func New(tp elastictransport.Interface) *UpdateJob { r := &UpdateJob{ transport: tp, @@ -93,8 +93,6 @@ func New(tp elastictransport.Interface) *UpdateJob { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -363,7 +361,7 @@ func (r *UpdateJob) Pretty(pretty bool) *UpdateJob { return r } -// AllowLazyOpen Advanced configuration option. Specifies whether this job can open when +// Advanced configuration option. Specifies whether this job can open when // there is insufficient machine learning node capacity for it to be // immediately assigned to a node. If `false` and a machine learning node // with capacity to run the job cannot immediately be found, the open @@ -374,20 +372,29 @@ func (r *UpdateJob) Pretty(pretty bool) *UpdateJob { // machine learning node capacity is available. // API name: allow_lazy_open func (r *UpdateJob) AllowLazyOpen(allowlazyopen bool) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowLazyOpen = &allowlazyopen return r } // API name: analysis_limits -func (r *UpdateJob) AnalysisLimits(analysislimits *types.AnalysisMemoryLimit) *UpdateJob { +func (r *UpdateJob) AnalysisLimits(analysislimits types.AnalysisMemoryLimitVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AnalysisLimits = analysislimits + r.req.AnalysisLimits = analysislimits.AnalysisMemoryLimitCaster() return r } -// BackgroundPersistInterval Advanced configuration option. The time between each periodic persistence +// Advanced configuration option. The time between each periodic persistence // of the model. // The default value is a randomized value between 3 to 4 hours, which // avoids all jobs persisting at exactly the same time. The smallest allowed @@ -398,31 +405,64 @@ func (r *UpdateJob) AnalysisLimits(analysislimits *types.AnalysisMemoryLimit) *U // close the job, then reopen the job and restart the datafeed for the // changes to take effect. // API name: background_persist_interval -func (r *UpdateJob) BackgroundPersistInterval(duration types.Duration) *UpdateJob { - r.req.BackgroundPersistInterval = duration +func (r *UpdateJob) BackgroundPersistInterval(duration types.DurationVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.BackgroundPersistInterval = *duration.DurationCaster() return r } // API name: categorization_filters func (r *UpdateJob) CategorizationFilters(categorizationfilters ...string) *UpdateJob { - r.req.CategorizationFilters = categorizationfilters + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range categorizationfilters { + r.req.CategorizationFilters = append(r.req.CategorizationFilters, v) + + } return r } -// CustomSettings Advanced configuration option. Contains custom meta data about the job. +// Advanced configuration option. Contains custom meta data about the job. // For example, it can contain custom URL information as shown in Adding // custom URLs to machine learning results. // API name: custom_settings func (r *UpdateJob) CustomSettings(customsettings map[string]json.RawMessage) *UpdateJob { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.CustomSettings = customsettings + return r +} + +func (r *UpdateJob) AddCustomSetting(key string, value json.RawMessage) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.CustomSettings == nil { + r.req.CustomSettings = make(map[string]json.RawMessage) + } else { + tmp = r.req.CustomSettings + } + + tmp[key] = value + r.req.CustomSettings = tmp return r } -// DailyModelSnapshotRetentionAfterDays Advanced configuration option, which affects the automatic removal of old +// Advanced configuration option, which affects the automatic removal of old // model snapshots for this job. It specifies a period of time (in days) // after which only the first snapshot per day is retained. This period is // relative to the timestamp of the most recent snapshot for this job. Valid @@ -431,84 +471,127 @@ func (r *UpdateJob) CustomSettings(customsettings map[string]json.RawMessage) *U // `model_snapshot_retention_days`. // API name: daily_model_snapshot_retention_after_days func (r *UpdateJob) DailyModelSnapshotRetentionAfterDays(dailymodelsnapshotretentionafterdays int64) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.DailyModelSnapshotRetentionAfterDays = &dailymodelsnapshotretentionafterdays return r } -// Description A description of the job. +// A description of the job. // API name: description func (r *UpdateJob) Description(description string) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Detectors An array of detector update objects. +// An array of detector update objects. // API name: detectors -func (r *UpdateJob) Detectors(detectors ...types.Detector) *UpdateJob { - r.req.Detectors = detectors +func (r *UpdateJob) Detectors(detectors ...types.DetectorUpdateVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range detectors { + r.req.Detectors = append(r.req.Detectors, *v.DetectorUpdateCaster()) + + } return r } -// Groups A list of job groups. A job can belong to no groups or many. +// A list of job groups. A job can belong to no groups or many. // API name: groups func (r *UpdateJob) Groups(groups ...string) *UpdateJob { - r.req.Groups = groups + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range groups { + + r.req.Groups = append(r.req.Groups, v) + } return r } // API name: model_plot_config -func (r *UpdateJob) ModelPlotConfig(modelplotconfig *types.ModelPlotConfig) *UpdateJob { +func (r *UpdateJob) ModelPlotConfig(modelplotconfig types.ModelPlotConfigVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.ModelPlotConfig = modelplotconfig + r.req.ModelPlotConfig = modelplotconfig.ModelPlotConfigCaster() return r } // API name: model_prune_window -func (r *UpdateJob) ModelPruneWindow(duration types.Duration) *UpdateJob { - r.req.ModelPruneWindow = duration +func (r *UpdateJob) ModelPruneWindow(duration types.DurationVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ModelPruneWindow = *duration.DurationCaster() return r } -// ModelSnapshotRetentionDays Advanced configuration option, which affects the automatic removal of old +// Advanced configuration option, which affects the automatic removal of old // model snapshots for this job. It specifies the maximum period of time (in // days) that snapshots are retained. This period is relative to the // timestamp of the most recent snapshot for this job. // API name: model_snapshot_retention_days func (r *UpdateJob) ModelSnapshotRetentionDays(modelsnapshotretentiondays int64) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ModelSnapshotRetentionDays = &modelsnapshotretentiondays return r } -// PerPartitionCategorization Settings related to how categorization interacts with partition fields. +// Settings related to how categorization interacts with partition fields. // API name: per_partition_categorization -func (r *UpdateJob) PerPartitionCategorization(perpartitioncategorization *types.PerPartitionCategorization) *UpdateJob { +func (r *UpdateJob) PerPartitionCategorization(perpartitioncategorization types.PerPartitionCategorizationVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.PerPartitionCategorization = perpartitioncategorization + r.req.PerPartitionCategorization = perpartitioncategorization.PerPartitionCategorizationCaster() return r } -// RenormalizationWindowDays Advanced configuration option. The period over which adjustments to the +// Advanced configuration option. The period over which adjustments to the // score are applied, as new data is seen. // API name: renormalization_window_days func (r *UpdateJob) RenormalizationWindowDays(renormalizationwindowdays int64) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RenormalizationWindowDays = &renormalizationwindowdays return r } -// ResultsRetentionDays Advanced configuration option. The period of time (in days) that results +// Advanced configuration option. The period of time (in days) that results // are retained. Age is calculated relative to the timestamp of the latest // bucket result. If this property has a non-null value, once per day at // 00:30 (server time), results that are the specified number of days older @@ -516,6 +599,10 @@ func (r *UpdateJob) RenormalizationWindowDays(renormalizationwindowdays int64) * // value is null, which means all results are retained. // API name: results_retention_days func (r *UpdateJob) ResultsRetentionDays(resultsretentiondays int64) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ResultsRetentionDays = &resultsretentiondays diff --git a/typedapi/ml/updatemodelsnapshot/request.go b/typedapi/ml/updatemodelsnapshot/request.go index 02e9f5eea1..807350e077 100644 --- a/typedapi/ml/updatemodelsnapshot/request.go +++ b/typedapi/ml/updatemodelsnapshot/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatemodelsnapshot @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updatemodelsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/update_model_snapshot/MlUpdateModelSnapshotRequest.ts#L23-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/update_model_snapshot/MlUpdateModelSnapshotRequest.ts#L23-L63 type Request struct { // Description A description of the model snapshot. diff --git a/typedapi/ml/updatemodelsnapshot/response.go b/typedapi/ml/updatemodelsnapshot/response.go index 059aefef2d..e3d6984e3d 100644 --- a/typedapi/ml/updatemodelsnapshot/response.go +++ b/typedapi/ml/updatemodelsnapshot/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatemodelsnapshot @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatemodelsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/update_model_snapshot/MlUpdateModelSnapshotResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/update_model_snapshot/MlUpdateModelSnapshotResponse.ts#L22-L27 type Response struct { Acknowledged bool `json:"acknowledged"` Model types.ModelSnapshot `json:"model"` diff --git a/typedapi/ml/updatemodelsnapshot/update_model_snapshot.go b/typedapi/ml/updatemodelsnapshot/update_model_snapshot.go index 2a888dcdc7..f7cb8a0b1e 100644 --- a/typedapi/ml/updatemodelsnapshot/update_model_snapshot.go +++ b/typedapi/ml/updatemodelsnapshot/update_model_snapshot.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update a snapshot. // Updates certain properties of a snapshot. @@ -90,7 +90,7 @@ func NewUpdateModelSnapshotFunc(tp elastictransport.Interface) NewUpdateModelSna // Update a snapshot. // Updates certain properties of a snapshot. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-model-snapshot func New(tp elastictransport.Interface) *UpdateModelSnapshot { r := &UpdateModelSnapshot{ transport: tp, @@ -98,8 +98,6 @@ func New(tp elastictransport.Interface) *UpdateModelSnapshot { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -385,20 +383,29 @@ func (r *UpdateModelSnapshot) Pretty(pretty bool) *UpdateModelSnapshot { return r } -// Description A description of the model snapshot. +// A description of the model snapshot. // API name: description func (r *UpdateModelSnapshot) Description(description string) *UpdateModelSnapshot { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Retain If `true`, this snapshot will not be deleted during automatic cleanup of +// If `true`, this snapshot will not be deleted during automatic cleanup of // snapshots older than `model_snapshot_retention_days`. However, this // snapshot will be deleted when the job is deleted. // API name: retain func (r *UpdateModelSnapshot) Retain(retain bool) *UpdateModelSnapshot { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Retain = &retain return r diff --git a/typedapi/ml/updatetrainedmodeldeployment/request.go b/typedapi/ml/updatetrainedmodeldeployment/request.go index 7cb285008b..9767107ee6 100644 --- a/typedapi/ml/updatetrainedmodeldeployment/request.go +++ b/typedapi/ml/updatetrainedmodeldeployment/request.go @@ -16,20 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatetrainedmodeldeployment import ( "encoding/json" "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Request holds the request body struct for the package updatetrainedmodeldeployment // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/update_trained_model_deployment/MlUpdateTrainedModelDeploymentRequest.ts#L24-L63 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/update_trained_model_deployment/MlUpdateTrainedModelDeploymentRequest.ts#L25-L78 type Request struct { + // AdaptiveAllocations Adaptive allocations configuration. When enabled, the number of allocations + // is set based on the current load. + // If adaptive_allocations is enabled, do not set the number of allocations + // manually. + AdaptiveAllocations *types.AdaptiveAllocationsSettings `json:"adaptive_allocations,omitempty"` // NumberOfAllocations The number of model allocations on each node where the model is deployed. // All allocations on a node share the same copy of the model in memory but use // a separate set of threads to evaluate the model. @@ -37,6 +44,8 @@ type Request struct { // If this setting is greater than the number of hardware threads // it will automatically be changed to a value less than the number of hardware // threads. + // If adaptive_allocations is enabled, do not set this value, because it’s + // automatically set. NumberOfAllocations *int `json:"number_of_allocations,omitempty"` } diff --git a/typedapi/ml/updatetrainedmodeldeployment/response.go b/typedapi/ml/updatetrainedmodeldeployment/response.go index d32695e1f6..430ee21adc 100644 --- a/typedapi/ml/updatetrainedmodeldeployment/response.go +++ b/typedapi/ml/updatetrainedmodeldeployment/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatetrainedmodeldeployment @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatetrainedmodeldeployment // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/update_trained_model_deployment/MlUpdateTrainedModelDeploymentResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/update_trained_model_deployment/MlUpdateTrainedModelDeploymentResponse.ts#L22-L26 type Response struct { Assignment types.TrainedModelAssignment `json:"assignment"` } diff --git a/typedapi/ml/updatetrainedmodeldeployment/update_trained_model_deployment.go b/typedapi/ml/updatetrainedmodeldeployment/update_trained_model_deployment.go index f00414296b..6013d62cc8 100644 --- a/typedapi/ml/updatetrainedmodeldeployment/update_trained_model_deployment.go +++ b/typedapi/ml/updatetrainedmodeldeployment/update_trained_model_deployment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update a trained model deployment. package updatetrainedmodeldeployment @@ -83,7 +83,7 @@ func NewUpdateTrainedModelDeploymentFunc(tp elastictransport.Interface) NewUpdat // Update a trained model deployment. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-trained-model-deployment.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-trained-model-deployment func New(tp elastictransport.Interface) *UpdateTrainedModelDeployment { r := &UpdateTrainedModelDeployment{ transport: tp, @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *UpdateTrainedModelDeployment { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -364,15 +362,38 @@ func (r *UpdateTrainedModelDeployment) Pretty(pretty bool) *UpdateTrainedModelDe return r } -// NumberOfAllocations The number of model allocations on each node where the model is deployed. +// Adaptive allocations configuration. When enabled, the number of allocations +// is set based on the current load. +// If adaptive_allocations is enabled, do not set the number of allocations +// manually. +// API name: adaptive_allocations +func (r *UpdateTrainedModelDeployment) AdaptiveAllocations(adaptiveallocations types.AdaptiveAllocationsSettingsVariant) *UpdateTrainedModelDeployment { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AdaptiveAllocations = adaptiveallocations.AdaptiveAllocationsSettingsCaster() + + return r +} + +// The number of model allocations on each node where the model is deployed. // All allocations on a node share the same copy of the model in memory but use // a separate set of threads to evaluate the model. // Increasing this value generally increases the throughput. // If this setting is greater than the number of hardware threads // it will automatically be changed to a value less than the number of hardware // threads. +// If adaptive_allocations is enabled, do not set this value, because it’s +// automatically set. // API name: number_of_allocations func (r *UpdateTrainedModelDeployment) NumberOfAllocations(numberofallocations int) *UpdateTrainedModelDeployment { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.NumberOfAllocations = &numberofallocations return r diff --git a/typedapi/ml/upgradejobsnapshot/response.go b/typedapi/ml/upgradejobsnapshot/response.go index 095c0da6ff..fab6ea9bcf 100644 --- a/typedapi/ml/upgradejobsnapshot/response.go +++ b/typedapi/ml/upgradejobsnapshot/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package upgradejobsnapshot // Response holds the response body struct for the package upgradejobsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/upgrade_job_snapshot/MlUpgradeJobSnapshotResponse.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/upgrade_job_snapshot/MlUpgradeJobSnapshotResponse.ts#L22-L31 type Response struct { // Completed When true, this means the task is complete. When false, it is still running. diff --git a/typedapi/ml/upgradejobsnapshot/upgrade_job_snapshot.go b/typedapi/ml/upgradejobsnapshot/upgrade_job_snapshot.go index 71f37390dd..f0224bcc70 100644 --- a/typedapi/ml/upgradejobsnapshot/upgrade_job_snapshot.go +++ b/typedapi/ml/upgradejobsnapshot/upgrade_job_snapshot.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Upgrade a snapshot. -// Upgrades an anomaly detection model snapshot to the latest major version. +// Upgrade an anomaly detection model snapshot to the latest major version. // Over time, older snapshot formats are deprecated and removed. Anomaly // detection jobs support only snapshots that are from the current or previous // major version. @@ -91,7 +91,7 @@ func NewUpgradeJobSnapshotFunc(tp elastictransport.Interface) NewUpgradeJobSnaps } // Upgrade a snapshot. -// Upgrades an anomaly detection model snapshot to the latest major version. +// Upgrade an anomaly detection model snapshot to the latest major version. // Over time, older snapshot formats are deprecated and removed. Anomaly // detection jobs support only snapshots that are from the current or previous // major version. @@ -101,7 +101,7 @@ func NewUpgradeJobSnapshotFunc(tp elastictransport.Interface) NewUpgradeJobSnaps // upgraded snapshot cannot be the current snapshot of the anomaly detection // job. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-upgrade-job-model-snapshot.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-upgrade-job-snapshot func New(tp elastictransport.Interface) *UpgradeJobSnapshot { r := &UpgradeJobSnapshot{ transport: tp, diff --git a/typedapi/ml/validate/request.go b/typedapi/ml/validate/request.go index e04c557914..5e14407103 100644 --- a/typedapi/ml/validate/request.go +++ b/typedapi/ml/validate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package validate @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package validate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/validate/MlValidateJobRequest.ts#L27-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/validate/MlValidateJobRequest.ts#L27-L52 type Request struct { AnalysisConfig *types.AnalysisConfig `json:"analysis_config,omitempty"` AnalysisLimits *types.AnalysisLimits `json:"analysis_limits,omitempty"` diff --git a/typedapi/ml/validate/response.go b/typedapi/ml/validate/response.go index 31149fb1a4..b9831c702d 100644 --- a/typedapi/ml/validate/response.go +++ b/typedapi/ml/validate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package validate // Response holds the response body struct for the package validate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/validate/MlValidateJobResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/validate/MlValidateJobResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/validate/validate.go b/typedapi/ml/validate/validate.go index 3e15ea20e6..8277b515bb 100644 --- a/typedapi/ml/validate/validate.go +++ b/typedapi/ml/validate/validate.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Validates an anomaly detection job. +// Validate an anomaly detection job. package validate import ( @@ -73,7 +73,7 @@ func NewValidateFunc(tp elastictransport.Interface) NewValidate { } } -// Validates an anomaly detection job. +// Validate an anomaly detection job. // // https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html func New(tp elastictransport.Interface) *Validate { @@ -83,8 +83,6 @@ func New(tp elastictransport.Interface) *Validate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -339,31 +337,47 @@ func (r *Validate) Pretty(pretty bool) *Validate { } // API name: analysis_config -func (r *Validate) AnalysisConfig(analysisconfig *types.AnalysisConfig) *Validate { +func (r *Validate) AnalysisConfig(analysisconfig types.AnalysisConfigVariant) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AnalysisConfig = analysisconfig + r.req.AnalysisConfig = analysisconfig.AnalysisConfigCaster() return r } // API name: analysis_limits -func (r *Validate) AnalysisLimits(analysislimits *types.AnalysisLimits) *Validate { +func (r *Validate) AnalysisLimits(analysislimits types.AnalysisLimitsVariant) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AnalysisLimits = analysislimits + r.req.AnalysisLimits = analysislimits.AnalysisLimitsCaster() return r } // API name: data_description -func (r *Validate) DataDescription(datadescription *types.DataDescription) *Validate { +func (r *Validate) DataDescription(datadescription types.DataDescriptionVariant) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.DataDescription = datadescription + r.req.DataDescription = datadescription.DataDescriptionCaster() return r } // API name: description func (r *Validate) Description(description string) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description @@ -372,21 +386,35 @@ func (r *Validate) Description(description string) *Validate { // API name: job_id func (r *Validate) JobId(id string) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.JobId = &id return r } // API name: model_plot -func (r *Validate) ModelPlot(modelplot *types.ModelPlotConfig) *Validate { +func (r *Validate) ModelPlot(modelplot types.ModelPlotConfigVariant) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.ModelPlot = modelplot + r.req.ModelPlot = modelplot.ModelPlotConfigCaster() return r } // API name: model_snapshot_id func (r *Validate) ModelSnapshotId(id string) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ModelSnapshotId = &id return r @@ -394,6 +422,10 @@ func (r *Validate) ModelSnapshotId(id string) *Validate { // API name: model_snapshot_retention_days func (r *Validate) ModelSnapshotRetentionDays(modelsnapshotretentiondays int64) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ModelSnapshotRetentionDays = &modelsnapshotretentiondays @@ -402,6 +434,11 @@ func (r *Validate) ModelSnapshotRetentionDays(modelsnapshotretentiondays int64) // API name: results_index_name func (r *Validate) ResultsIndexName(indexname string) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ResultsIndexName = &indexname return r diff --git a/typedapi/ml/validatedetector/request.go b/typedapi/ml/validatedetector/request.go index 3f399c1e79..1cb3c93767 100644 --- a/typedapi/ml/validatedetector/request.go +++ b/typedapi/ml/validatedetector/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package validatedetector @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package validatedetector // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/validate_detector/MlValidateDetectorRequest.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/validate_detector/MlValidateDetectorRequest.ts#L23-L40 type Request = types.Detector // NewRequest returns a Request diff --git a/typedapi/ml/validatedetector/response.go b/typedapi/ml/validatedetector/response.go index fa1cb7c62c..a1e25ded96 100644 --- a/typedapi/ml/validatedetector/response.go +++ b/typedapi/ml/validatedetector/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package validatedetector // Response holds the response body struct for the package validatedetector // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/validate_detector/MlValidateDetectorResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/validate_detector/MlValidateDetectorResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/validatedetector/validate_detector.go b/typedapi/ml/validatedetector/validate_detector.go index cc5c5d22e6..c5dbb9deca 100644 --- a/typedapi/ml/validatedetector/validate_detector.go +++ b/typedapi/ml/validatedetector/validate_detector.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Validates an anomaly detection detector. +// Validate an anomaly detection job. package validatedetector import ( @@ -74,9 +74,9 @@ func NewValidateDetectorFunc(tp elastictransport.Interface) NewValidateDetector } } -// Validates an anomaly detection detector. +// Validate an anomaly detection job. // -// https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html +// https://www.elastic.co/docs/api/doc/elasticsearch func New(tp elastictransport.Interface) *ValidateDetector { r := &ValidateDetector{ transport: tp, @@ -84,8 +84,6 @@ func New(tp elastictransport.Interface) *ValidateDetector { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -341,99 +339,147 @@ func (r *ValidateDetector) Pretty(pretty bool) *ValidateDetector { return r } -// ByFieldName The field used to split the data. In particular, this property is used for +// The field used to split the data. In particular, this property is used for // analyzing the splits with respect to their own history. It is used for // finding unusual values in the context of the split. // API name: by_field_name func (r *ValidateDetector) ByFieldName(field string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ByFieldName = &field return r } -// CustomRules Custom rules enable you to customize the way detectors operate. For example, +// Custom rules enable you to customize the way detectors operate. For example, // a rule may dictate conditions under which results should be skipped. Kibana // refers to custom rules as job rules. // API name: custom_rules -func (r *ValidateDetector) CustomRules(customrules ...types.DetectionRule) *ValidateDetector { - r.req.CustomRules = customrules +func (r *ValidateDetector) CustomRules(customrules ...types.DetectionRuleVariant) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range customrules { + + r.req.CustomRules = append(r.req.CustomRules, *v.DetectionRuleCaster()) + } return r } -// DetectorDescription A description of the detector. +// A description of the detector. // API name: detector_description func (r *ValidateDetector) DetectorDescription(detectordescription string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.DetectorDescription = &detectordescription return r } -// DetectorIndex A unique identifier for the detector. This identifier is based on the order +// A unique identifier for the detector. This identifier is based on the order // of the detectors in the `analysis_config`, starting at zero. If you specify a // value for this property, it is ignored. // API name: detector_index func (r *ValidateDetector) DetectorIndex(detectorindex int) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.DetectorIndex = &detectorindex return r } -// ExcludeFrequent If set, frequent entities are excluded from influencing the anomaly results. +// If set, frequent entities are excluded from influencing the anomaly results. // Entities can be considered frequent over time or frequent in a population. If // you are working with both over and by fields, you can set `exclude_frequent` // to `all` for both fields, or to `by` or `over` for those specific fields. // API name: exclude_frequent func (r *ValidateDetector) ExcludeFrequent(excludefrequent excludefrequent.ExcludeFrequent) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ExcludeFrequent = &excludefrequent - return r } -// FieldName The field that the detector uses in the function. If you use an event rate +// The field that the detector uses in the function. If you use an event rate // function such as count or rare, do not specify this field. The `field_name` // cannot contain double quotes or backslashes. // API name: field_name func (r *ValidateDetector) FieldName(field string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FieldName = &field return r } -// Function The analysis function that is used. For example, `count`, `rare`, `mean`, +// The analysis function that is used. For example, `count`, `rare`, `mean`, // `min`, `max`, or `sum`. // API name: function func (r *ValidateDetector) Function(function string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Function = &function return r } -// OverFieldName The field used to split the data. In particular, this property is used for +// The field used to split the data. In particular, this property is used for // analyzing the splits with respect to the history of all splits. It is used // for finding unusual values in the population of all splits. // API name: over_field_name func (r *ValidateDetector) OverFieldName(field string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.OverFieldName = &field return r } -// PartitionFieldName The field used to segment the analysis. When you use this property, you have +// The field used to segment the analysis. When you use this property, you have // completely independent baselines for each value of this field. // API name: partition_field_name func (r *ValidateDetector) PartitionFieldName(field string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.PartitionFieldName = &field return r } -// UseNull Defines whether a new series is used as the null series when there is no +// Defines whether a new series is used as the null series when there is no // value for the by or partition fields. // API name: use_null func (r *ValidateDetector) UseNull(usenull bool) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.UseNull = &usenull return r diff --git a/typedapi/monitoring/bulk/bulk.go b/typedapi/monitoring/bulk/bulk.go index 110e6b33fe..f6903fb02f 100644 --- a/typedapi/monitoring/bulk/bulk.go +++ b/typedapi/monitoring/bulk/bulk.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Used by the monitoring features to send monitoring data. +// Send monitoring data. +// This API is used by the monitoring features to send monitoring data. package bulk import ( @@ -79,9 +80,10 @@ func NewBulkFunc(tp elastictransport.Interface) NewBulk { } } -// Used by the monitoring features to send monitoring data. +// Send monitoring data. +// This API is used by the monitoring features to send monitoring data. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/monitor-elasticsearch-cluster.html +// https://www.elastic.co/docs/api/doc/elasticsearch func New(tp elastictransport.Interface) *Bulk { r := &Bulk{ transport: tp, diff --git a/typedapi/monitoring/bulk/request.go b/typedapi/monitoring/bulk/request.go index e67ccf1214..75e4f066ab 100644 --- a/typedapi/monitoring/bulk/request.go +++ b/typedapi/monitoring/bulk/request.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package bulk // Request holds the request body struct for the package bulk // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/monitoring/bulk/BulkMonitoringRequest.ts#L24-L59 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/monitoring/bulk/BulkMonitoringRequest.ts#L24-L73 type Request = []any diff --git a/typedapi/monitoring/bulk/response.go b/typedapi/monitoring/bulk/response.go index b1f7c2e219..ed850a8e4b 100644 --- a/typedapi/monitoring/bulk/response.go +++ b/typedapi/monitoring/bulk/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package bulk @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package bulk // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/monitoring/bulk/BulkMonitoringResponse.ts#L23-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/monitoring/bulk/BulkMonitoringResponse.ts#L23-L32 type Response struct { Error *types.ErrorCause `json:"error,omitempty"` // Errors True if there is was an error diff --git a/typedapi/nodes/clearrepositoriesmeteringarchive/clear_repositories_metering_archive.go b/typedapi/nodes/clearrepositoriesmeteringarchive/clear_repositories_metering_archive.go index 428a3d0e4e..80d8145499 100644 --- a/typedapi/nodes/clearrepositoriesmeteringarchive/clear_repositories_metering_archive.go +++ b/typedapi/nodes/clearrepositoriesmeteringarchive/clear_repositories_metering_archive.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// You can use this API to clear the archived repositories metering information -// in the cluster. +// Clear the archived repositories metering. +// Clear the archived repositories metering information in the cluster. package clearrepositoriesmeteringarchive import ( @@ -82,10 +82,10 @@ func NewClearRepositoriesMeteringArchiveFunc(tp elastictransport.Interface) NewC } } -// You can use this API to clear the archived repositories metering information -// in the cluster. +// Clear the archived repositories metering. +// Clear the archived repositories metering information in the cluster. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-repositories-metering-archive-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive func New(tp elastictransport.Interface) *ClearRepositoriesMeteringArchive { r := &ClearRepositoriesMeteringArchive{ transport: tp, @@ -304,8 +304,6 @@ func (r *ClearRepositoriesMeteringArchive) Header(key, value string) *ClearRepos } // NodeId Comma-separated list of node IDs or names used to limit returned information. -// All the nodes selective options are explained -// [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). // API Name: nodeid func (r *ClearRepositoriesMeteringArchive) _nodeid(nodeid string) *ClearRepositoriesMeteringArchive { r.paramSet |= nodeidMask @@ -314,9 +312,7 @@ func (r *ClearRepositoriesMeteringArchive) _nodeid(nodeid string) *ClearReposito return r } -// MaxArchiveVersion Specifies the maximum -// [archive_version](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html#get-repositories-metering-api-response-body) -// to be cleared from the archive. +// MaxArchiveVersion Specifies the maximum `archive_version` to be cleared from the archive. // API Name: maxarchiveversion func (r *ClearRepositoriesMeteringArchive) _maxarchiveversion(maxarchiveversion string) *ClearRepositoriesMeteringArchive { r.paramSet |= maxarchiveversionMask diff --git a/typedapi/nodes/clearrepositoriesmeteringarchive/response.go b/typedapi/nodes/clearrepositoriesmeteringarchive/response.go index 7d8e51aa5e..63dc54c01b 100644 --- a/typedapi/nodes/clearrepositoriesmeteringarchive/response.go +++ b/typedapi/nodes/clearrepositoriesmeteringarchive/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package clearrepositoriesmeteringarchive @@ -26,11 +26,10 @@ import ( // Response holds the response body struct for the package clearrepositoriesmeteringarchive // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveResponse.ts#L36-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveResponse.ts#L37-L39 type Response struct { - // ClusterName Name of the cluster. Based on the [Cluster name - // setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). + // ClusterName Name of the cluster. Based on the `cluster.name` setting. ClusterName string `json:"cluster_name"` // NodeStats Contains statistics about the number of nodes selected by the request’s node // filters. diff --git a/typedapi/nodes/getrepositoriesmeteringinfo/get_repositories_metering_info.go b/typedapi/nodes/getrepositoriesmeteringinfo/get_repositories_metering_info.go index fa5d9f95fc..70a2b961af 100644 --- a/typedapi/nodes/getrepositoriesmeteringinfo/get_repositories_metering_info.go +++ b/typedapi/nodes/getrepositoriesmeteringinfo/get_repositories_metering_info.go @@ -16,15 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// You can use the cluster repositories metering API to retrieve repositories -// metering information in a cluster. -// This API exposes monotonically non-decreasing counters and it’s expected that -// clients would durably store the -// information needed to compute aggregations over a period of time. -// Additionally, the information exposed by this -// API is volatile, meaning that it won’t be present after node restarts. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Get cluster repositories metering. +// Get repositories metering information for a cluster. +// This API exposes monotonically non-decreasing counters and it is expected +// that clients would durably store the information needed to compute +// aggregations over a period of time. +// Additionally, the information exposed by this API is volatile, meaning that +// it will not be present after node restarts. package getrepositoriesmeteringinfo import ( @@ -82,15 +82,15 @@ func NewGetRepositoriesMeteringInfoFunc(tp elastictransport.Interface) NewGetRep } } -// You can use the cluster repositories metering API to retrieve repositories -// metering information in a cluster. -// This API exposes monotonically non-decreasing counters and it’s expected that -// clients would durably store the -// information needed to compute aggregations over a period of time. -// Additionally, the information exposed by this -// API is volatile, meaning that it won’t be present after node restarts. +// Get cluster repositories metering. +// Get repositories metering information for a cluster. +// This API exposes monotonically non-decreasing counters and it is expected +// that clients would durably store the information needed to compute +// aggregations over a period of time. +// Additionally, the information exposed by this API is volatile, meaning that +// it will not be present after node restarts. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-get-repositories-metering-info func New(tp elastictransport.Interface) *GetRepositoriesMeteringInfo { r := &GetRepositoriesMeteringInfo{ transport: tp, diff --git a/typedapi/nodes/getrepositoriesmeteringinfo/response.go b/typedapi/nodes/getrepositoriesmeteringinfo/response.go index dea9e7bfa4..42bf376c1b 100644 --- a/typedapi/nodes/getrepositoriesmeteringinfo/response.go +++ b/typedapi/nodes/getrepositoriesmeteringinfo/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getrepositoriesmeteringinfo @@ -26,11 +26,10 @@ import ( // Response holds the response body struct for the package getrepositoriesmeteringinfo // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/get_repositories_metering_info/GetRepositoriesMeteringInfoResponse.ts#L36-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/get_repositories_metering_info/GetRepositoriesMeteringInfoResponse.ts#L36-L38 type Response struct { - // ClusterName Name of the cluster. Based on the [Cluster name - // setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). + // ClusterName Name of the cluster. Based on the `cluster.name` setting. ClusterName string `json:"cluster_name"` // NodeStats Contains statistics about the number of nodes selected by the request’s node // filters. diff --git a/typedapi/nodes/hotthreads/hot_threads.go b/typedapi/nodes/hotthreads/hot_threads.go index e83c6e9a0f..6bdcb3e0a4 100644 --- a/typedapi/nodes/hotthreads/hot_threads.go +++ b/typedapi/nodes/hotthreads/hot_threads.go @@ -16,11 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// This API yields a breakdown of the hot threads on each selected node in the -// cluster. -// The output is plain text with a breakdown of each node’s top hot threads. +// Get the hot threads for nodes. +// Get a breakdown of the hot threads on each selected node in the cluster. +// The output is plain text with a breakdown of the top hot threads for each +// node. package hotthreads import ( @@ -77,11 +78,12 @@ func NewHotThreadsFunc(tp elastictransport.Interface) NewHotThreads { } } -// This API yields a breakdown of the hot threads on each selected node in the -// cluster. -// The output is plain text with a breakdown of each node’s top hot threads. +// Get the hot threads for nodes. +// Get a breakdown of the hot threads on each selected node in the cluster. +// The output is plain text with a breakdown of the top hot threads for each +// node. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-hot-threads.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-hot-threads func New(tp elastictransport.Interface) *HotThreads { r := &HotThreads{ transport: tp, @@ -334,16 +336,6 @@ func (r *HotThreads) Snapshots(snapshots string) *HotThreads { return r } -// MasterTimeout Period to wait for a connection to the master node. If no response -// is received before the timeout expires, the request fails and -// returns an error. -// API name: master_timeout -func (r *HotThreads) MasterTimeout(duration string) *HotThreads { - r.values.Set("master_timeout", duration) - - return r -} - // Threads Specifies the number of hot threads to provide information for. // API name: threads func (r *HotThreads) Threads(threads string) *HotThreads { diff --git a/typedapi/nodes/hotthreads/response.go b/typedapi/nodes/hotthreads/response.go index 894c7435a7..13d377a426 100644 --- a/typedapi/nodes/hotthreads/response.go +++ b/typedapi/nodes/hotthreads/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package hotthreads // Response holds the response body struct for the package hotthreads // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/hot_threads/NodesHotThreadsResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/hot_threads/NodesHotThreadsResponse.ts#L20-L22 type Response struct { } diff --git a/typedapi/nodes/info/info.go b/typedapi/nodes/info/info.go index 8f1d968bd3..99f36a9db4 100644 --- a/typedapi/nodes/info/info.go +++ b/typedapi/nodes/info/info.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns cluster nodes information. +// Get node information. +// +// By default, the API returns all attributes and core settings for cluster +// nodes. package info import ( @@ -77,9 +80,12 @@ func NewInfoFunc(tp elastictransport.Interface) NewInfo { } } -// Returns cluster nodes information. +// Get node information. +// +// By default, the API returns all attributes and core settings for cluster +// nodes. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-info.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info func New(tp elastictransport.Interface) *Info { r := &Info{ transport: tp, @@ -349,15 +355,6 @@ func (r *Info) FlatSettings(flatsettings bool) *Info { return r } -// MasterTimeout Period to wait for a connection to the master node. If no response is -// received before the timeout expires, the request fails and returns an error. -// API name: master_timeout -func (r *Info) MasterTimeout(duration string) *Info { - r.values.Set("master_timeout", duration) - - return r -} - // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout diff --git a/typedapi/nodes/info/response.go b/typedapi/nodes/info/response.go index d7500f0dd5..26aa3ed105 100644 --- a/typedapi/nodes/info/response.go +++ b/typedapi/nodes/info/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package info @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package info // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/NodesInfoResponse.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/NodesInfoResponse.ts#L30-L32 type Response struct { ClusterName string `json:"cluster_name"` // NodeStats Contains statistics about the number of nodes selected by the request’s node diff --git a/typedapi/nodes/reloadsecuresettings/reload_secure_settings.go b/typedapi/nodes/reloadsecuresettings/reload_secure_settings.go index 8c4f17218f..9627d45089 100644 --- a/typedapi/nodes/reloadsecuresettings/reload_secure_settings.go +++ b/typedapi/nodes/reloadsecuresettings/reload_secure_settings.go @@ -16,9 +16,26 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Reloads the keystore on nodes in the cluster. +// Reload the keystore on nodes in the cluster. +// +// Secure settings are stored in an on-disk keystore. Certain of these settings +// are reloadable. +// That is, you can change them on disk and reload them without restarting any +// nodes in the cluster. +// When you have updated reloadable secure settings in your keystore, you can +// use this API to reload those settings on each node. +// +// When the Elasticsearch keystore is password protected and not simply +// obfuscated, you must provide the password for the keystore when you reload +// the secure settings. +// Reloading the settings for the whole cluster assumes that the keystores for +// all nodes are protected with the same password; this method is allowed only +// when inter-node communications are encrypted. +// Alternatively, you can reload the secure settings on each node by locally +// accessing the API and passing the node-specific Elasticsearch keystore +// password. package reloadsecuresettings import ( @@ -79,9 +96,26 @@ func NewReloadSecureSettingsFunc(tp elastictransport.Interface) NewReloadSecureS } } -// Reloads the keystore on nodes in the cluster. +// Reload the keystore on nodes in the cluster. +// +// Secure settings are stored in an on-disk keystore. Certain of these settings +// are reloadable. +// That is, you can change them on disk and reload them without restarting any +// nodes in the cluster. +// When you have updated reloadable secure settings in your keystore, you can +// use this API to reload those settings on each node. +// +// When the Elasticsearch keystore is password protected and not simply +// obfuscated, you must provide the password for the keystore when you reload +// the secure settings. +// Reloading the settings for the whole cluster assumes that the keystores for +// all nodes are protected with the same password; this method is allowed only +// when inter-node communications are encrypted. +// Alternatively, you can reload the secure settings on each node by locally +// accessing the API and passing the node-specific Elasticsearch keystore +// password. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-settings.html#reloadable-secure-settings +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-reload-secure-settings func New(tp elastictransport.Interface) *ReloadSecureSettings { r := &ReloadSecureSettings{ transport: tp, @@ -89,8 +123,6 @@ func New(tp elastictransport.Interface) *ReloadSecureSettings { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -374,9 +406,14 @@ func (r *ReloadSecureSettings) Pretty(pretty bool) *ReloadSecureSettings { return r } -// SecureSettingsPassword The password for the Elasticsearch keystore. +// The password for the Elasticsearch keystore. // API name: secure_settings_password func (r *ReloadSecureSettings) SecureSettingsPassword(password string) *ReloadSecureSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.SecureSettingsPassword = &password return r diff --git a/typedapi/nodes/reloadsecuresettings/request.go b/typedapi/nodes/reloadsecuresettings/request.go index 7fdf221c41..dcd681998a 100644 --- a/typedapi/nodes/reloadsecuresettings/request.go +++ b/typedapi/nodes/reloadsecuresettings/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package reloadsecuresettings @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package reloadsecuresettings // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/reload_secure_settings/ReloadSecureSettingsRequest.ts#L24-L51 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/reload_secure_settings/ReloadSecureSettingsRequest.ts#L24-L70 type Request struct { // SecureSettingsPassword The password for the Elasticsearch keystore. diff --git a/typedapi/nodes/reloadsecuresettings/response.go b/typedapi/nodes/reloadsecuresettings/response.go index 51817717c2..5d54824761 100644 --- a/typedapi/nodes/reloadsecuresettings/response.go +++ b/typedapi/nodes/reloadsecuresettings/response.go @@ -16,23 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package reloadsecuresettings import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Response holds the response body struct for the package reloadsecuresettings // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/reload_secure_settings/ReloadSecureSettingsResponse.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/reload_secure_settings/ReloadSecureSettingsResponse.ts#L30-L32 type Response struct { ClusterName string `json:"cluster_name"` // NodeStats Contains statistics about the number of nodes selected by the request’s node @@ -48,40 +42,3 @@ func NewResponse() *Response { } return r } - -func (s *Response) UnmarshalJSON(data []byte) error { - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "cluster_name": - if err := dec.Decode(&s.ClusterName); err != nil { - return fmt.Errorf("%s | %w", "ClusterName", err) - } - - case "_nodes": - if err := dec.Decode(&s.NodeStats); err != nil { - return fmt.Errorf("%s | %w", "NodeStats", err) - } - - case "nodes": - if s.Nodes == nil { - s.Nodes = make(map[string]types.NodeReloadResult, 0) - } - if err := dec.Decode(&s.Nodes); err != nil { - return fmt.Errorf("%s | %w", "Nodes", err) - } - - } - } - return nil -} diff --git a/typedapi/nodes/stats/response.go b/typedapi/nodes/stats/response.go index 0e965485ae..6416ca74fe 100644 --- a/typedapi/nodes/stats/response.go +++ b/typedapi/nodes/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package stats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/stats/NodesStatsResponse.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/stats/NodesStatsResponse.ts#L30-L32 type Response struct { ClusterName *string `json:"cluster_name,omitempty"` // NodeStats Contains statistics about the number of nodes selected by the request’s node diff --git a/typedapi/nodes/stats/stats.go b/typedapi/nodes/stats/stats.go index bcffc0b126..2e10cff014 100644 --- a/typedapi/nodes/stats/stats.go +++ b/typedapi/nodes/stats/stats.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns cluster nodes statistics. +// Get node statistics. +// Get statistics for nodes in a cluster. +// By default, all stats are returned. You can limit the returned information by +// using metrics. package stats import ( @@ -81,9 +84,12 @@ func NewStatsFunc(tp elastictransport.Interface) NewStats { } } -// Returns cluster nodes statistics. +// Get node statistics. +// Get statistics for nodes in a cluster. +// By default, all stats are returned. You can limit the returned information by +// using metrics. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats func New(tp elastictransport.Interface) *Stats { r := &Stats{ transport: tp, @@ -459,15 +465,6 @@ func (r *Stats) Level(level level.Level) *Stats { return r } -// MasterTimeout Period to wait for a connection to the master node. If no response is -// received before the timeout expires, the request fails and returns an error. -// API name: master_timeout -func (r *Stats) MasterTimeout(duration string) *Stats { - r.values.Set("master_timeout", duration) - - return r -} - // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout diff --git a/typedapi/nodes/usage/response.go b/typedapi/nodes/usage/response.go index eb4b9fdee4..8d2de367d3 100644 --- a/typedapi/nodes/usage/response.go +++ b/typedapi/nodes/usage/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package usage @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package usage // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/usage/NodesUsageResponse.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/usage/NodesUsageResponse.ts#L30-L32 type Response struct { ClusterName string `json:"cluster_name"` // NodeStats Contains statistics about the number of nodes selected by the request’s node diff --git a/typedapi/nodes/usage/usage.go b/typedapi/nodes/usage/usage.go index 5e482123e1..482fd42c07 100644 --- a/typedapi/nodes/usage/usage.go +++ b/typedapi/nodes/usage/usage.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information on the usage of features. +// Get feature usage information. package usage import ( @@ -77,9 +77,9 @@ func NewUsageFunc(tp elastictransport.Interface) NewUsage { } } -// Returns information on the usage of features. +// Get feature usage information. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-usage.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-usage func New(tp elastictransport.Interface) *Usage { r := &Usage{ transport: tp, diff --git a/typedapi/profiling/flamegraph/flamegraph.go b/typedapi/profiling/flamegraph/flamegraph.go index fd1dc94a63..77b3a3eb39 100644 --- a/typedapi/profiling/flamegraph/flamegraph.go +++ b/typedapi/profiling/flamegraph/flamegraph.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Extracts a UI-optimized structure to render flamegraphs from Universal // Profiling. diff --git a/typedapi/profiling/stacktraces/stacktraces.go b/typedapi/profiling/stacktraces/stacktraces.go index e11ce0a47a..bb7b973e34 100644 --- a/typedapi/profiling/stacktraces/stacktraces.go +++ b/typedapi/profiling/stacktraces/stacktraces.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Extracts raw stacktrace information from Universal Profiling. package stacktraces diff --git a/typedapi/profiling/status/status.go b/typedapi/profiling/status/status.go index 09e6839212..d706d856d7 100644 --- a/typedapi/profiling/status/status.go +++ b/typedapi/profiling/status/status.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Returns basic information about the status of Universal Profiling. package status diff --git a/typedapi/profiling/topnfunctions/topn_functions.go b/typedapi/profiling/topnfunctions/topn_functions.go index 655aa7ffd7..86174fe21f 100644 --- a/typedapi/profiling/topnfunctions/topn_functions.go +++ b/typedapi/profiling/topnfunctions/topn_functions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Extracts a list of topN functions from Universal Profiling. package topnfunctions diff --git a/typedapi/queryrules/deleterule/delete_rule.go b/typedapi/queryrules/deleterule/delete_rule.go index 1c2d588c93..800482a577 100644 --- a/typedapi/queryrules/deleterule/delete_rule.go +++ b/typedapi/queryrules/deleterule/delete_rule.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes a query rule within a query ruleset. +// Delete a query rule. +// Delete a query rule within a query ruleset. +// This is a destructive action that is only recoverable by re-adding the same +// rule with the create or update query rule API. package deleterule import ( @@ -81,9 +84,12 @@ func NewDeleteRuleFunc(tp elastictransport.Interface) NewDeleteRule { } } -// Deletes a query rule within a query ruleset. +// Delete a query rule. +// Delete a query rule within a query ruleset. +// This is a destructive action that is only recoverable by re-adding the same +// rule with the create or update query rule API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-query-rule.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-rule func New(tp elastictransport.Interface) *DeleteRule { r := &DeleteRule{ transport: tp, diff --git a/typedapi/queryrules/deleterule/response.go b/typedapi/queryrules/deleterule/response.go index 26bdbefa6b..ac8c7dbfd1 100644 --- a/typedapi/queryrules/deleterule/response.go +++ b/typedapi/queryrules/deleterule/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deleterule // Response holds the response body struct for the package deleterule // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/delete_rule/QueryRuleDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/delete_rule/QueryRuleDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/queryrules/deleteruleset/delete_ruleset.go b/typedapi/queryrules/deleteruleset/delete_ruleset.go index 961031ff0e..2b5e3283ad 100644 --- a/typedapi/queryrules/deleteruleset/delete_ruleset.go +++ b/typedapi/queryrules/deleteruleset/delete_ruleset.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes a query ruleset. +// Delete a query ruleset. +// Remove a query ruleset and its associated data. +// This is a destructive action that is not recoverable. package deleteruleset import ( @@ -76,9 +78,11 @@ func NewDeleteRulesetFunc(tp elastictransport.Interface) NewDeleteRuleset { } } -// Deletes a query ruleset. +// Delete a query ruleset. +// Remove a query ruleset and its associated data. +// This is a destructive action that is not recoverable. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-query-ruleset.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset func New(tp elastictransport.Interface) *DeleteRuleset { r := &DeleteRuleset{ transport: tp, diff --git a/typedapi/queryrules/deleteruleset/response.go b/typedapi/queryrules/deleteruleset/response.go index dd8394ece4..ad0b26874f 100644 --- a/typedapi/queryrules/deleteruleset/response.go +++ b/typedapi/queryrules/deleteruleset/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deleteruleset // Response holds the response body struct for the package deleteruleset // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/delete_ruleset/QueryRulesetDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/delete_ruleset/QueryRulesetDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/queryrules/getrule/get_rule.go b/typedapi/queryrules/getrule/get_rule.go index f5e381f8d0..bfaed1ca5c 100644 --- a/typedapi/queryrules/getrule/get_rule.go +++ b/typedapi/queryrules/getrule/get_rule.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns the details about a query rule within a query ruleset +// Get a query rule. +// Get details about a query rule within a query ruleset. package getrule import ( @@ -81,9 +82,10 @@ func NewGetRuleFunc(tp elastictransport.Interface) NewGetRule { } } -// Returns the details about a query rule within a query ruleset +// Get a query rule. +// Get details about a query rule within a query ruleset. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-query-rule.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-rule func New(tp elastictransport.Interface) *GetRule { r := &GetRule{ transport: tp, diff --git a/typedapi/queryrules/getrule/response.go b/typedapi/queryrules/getrule/response.go index 3b3353f39a..710b0b4836 100644 --- a/typedapi/queryrules/getrule/response.go +++ b/typedapi/queryrules/getrule/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getrule @@ -27,13 +27,24 @@ import ( // Response holds the response body struct for the package getrule // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/get_rule/QueryRuleGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/get_rule/QueryRuleGetResponse.ts#L22-L24 type Response struct { - Actions types.QueryRuleActions `json:"actions"` - Criteria []types.QueryRuleCriteria `json:"criteria"` - Priority *int `json:"priority,omitempty"` - RuleId string `json:"rule_id"` - Type queryruletype.QueryRuleType `json:"type"` + + // Actions The actions to take when the rule is matched. + // The format of this action depends on the rule type. + Actions types.QueryRuleActions `json:"actions"` + // Criteria The criteria that must be met for the rule to be applied. + // If multiple criteria are specified for a rule, all criteria must be met for + // the rule to be applied. + Criteria []types.QueryRuleCriteria `json:"criteria"` + Priority *int `json:"priority,omitempty"` + // RuleId A unique identifier for the rule. + RuleId string `json:"rule_id"` + // Type The type of rule. + // `pinned` will identify and pin specific documents to the top of search + // results. + // `exclude` will exclude specific documents from search results. + Type queryruletype.QueryRuleType `json:"type"` } // NewResponse returns a Response diff --git a/typedapi/queryrules/getruleset/get_ruleset.go b/typedapi/queryrules/getruleset/get_ruleset.go index d7c9e2226b..2f2186741d 100644 --- a/typedapi/queryrules/getruleset/get_ruleset.go +++ b/typedapi/queryrules/getruleset/get_ruleset.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns the details about a query ruleset +// Get a query ruleset. +// Get details about a query ruleset. package getruleset import ( @@ -76,9 +77,10 @@ func NewGetRulesetFunc(tp elastictransport.Interface) NewGetRuleset { } } -// Returns the details about a query ruleset +// Get a query ruleset. +// Get details about a query ruleset. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-query-ruleset.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset func New(tp elastictransport.Interface) *GetRuleset { r := &GetRuleset{ transport: tp, diff --git a/typedapi/queryrules/getruleset/response.go b/typedapi/queryrules/getruleset/response.go index d94dd8026f..28fe14d2e4 100644 --- a/typedapi/queryrules/getruleset/response.go +++ b/typedapi/queryrules/getruleset/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getruleset @@ -26,12 +26,12 @@ import ( // Response holds the response body struct for the package getruleset // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/get_ruleset/QueryRulesetGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/get_ruleset/QueryRulesetGetResponse.ts#L22-L24 type Response struct { - // Rules Rules associated with the query ruleset + // Rules Rules associated with the query ruleset. Rules []types.QueryRule `json:"rules"` - // RulesetId Query Ruleset unique identifier + // RulesetId A unique identifier for the ruleset. RulesetId string `json:"ruleset_id"` } diff --git a/typedapi/queryrules/listrulesets/list_rulesets.go b/typedapi/queryrules/listrulesets/list_rulesets.go index 764e0fc697..0a931071db 100644 --- a/typedapi/queryrules/listrulesets/list_rulesets.go +++ b/typedapi/queryrules/listrulesets/list_rulesets.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns summarized information about existing query rulesets. +// Get all query rulesets. +// Get summarized information about the query rulesets. package listrulesets import ( @@ -68,9 +69,10 @@ func NewListRulesetsFunc(tp elastictransport.Interface) NewListRulesets { } } -// Returns summarized information about existing query rulesets. +// Get all query rulesets. +// Get summarized information about the query rulesets. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/list-query-rulesets.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-list-rulesets func New(tp elastictransport.Interface) *ListRulesets { r := &ListRulesets{ transport: tp, @@ -274,7 +276,7 @@ func (r *ListRulesets) Header(key, value string) *ListRulesets { return r } -// From Starting offset (default: 0) +// From The offset from the first result to fetch. // API name: from func (r *ListRulesets) From(from int) *ListRulesets { r.values.Set("from", strconv.Itoa(from)) @@ -282,7 +284,7 @@ func (r *ListRulesets) From(from int) *ListRulesets { return r } -// Size specifies a max number of results to get +// Size The maximum number of results to retrieve. // API name: size func (r *ListRulesets) Size(size int) *ListRulesets { r.values.Set("size", strconv.Itoa(size)) diff --git a/typedapi/queryrules/listrulesets/response.go b/typedapi/queryrules/listrulesets/response.go index 80af272813..96d66006a2 100644 --- a/typedapi/queryrules/listrulesets/response.go +++ b/typedapi/queryrules/listrulesets/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package listrulesets @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package listrulesets // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/list_rulesets/QueryRulesetListResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/list_rulesets/QueryRulesetListResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Results []types.QueryRulesetListItem `json:"results"` diff --git a/typedapi/queryrules/putrule/put_rule.go b/typedapi/queryrules/putrule/put_rule.go index a30bfbe871..3a6d887044 100644 --- a/typedapi/queryrules/putrule/put_rule.go +++ b/typedapi/queryrules/putrule/put_rule.go @@ -16,9 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Creates or updates a query rule within a query ruleset. +// Create or update a query rule. +// Create or update a query rule within a query ruleset. +// +// IMPORTANT: Due to limitations within pinned queries, you can only pin +// documents using ids or docs, but cannot use both in single rule. +// It is advised to use one or the other in query rulesets, to avoid errors. +// Additionally, pinned queries have a maximum limit of 100 pinned hits. +// If multiple matching rules pin more than 100 documents, only the first 100 +// documents are pinned in the order they are specified in the ruleset. package putrule import ( @@ -87,9 +95,17 @@ func NewPutRuleFunc(tp elastictransport.Interface) NewPutRule { } } -// Creates or updates a query rule within a query ruleset. +// Create or update a query rule. +// Create or update a query rule within a query ruleset. +// +// IMPORTANT: Due to limitations within pinned queries, you can only pin +// documents using ids or docs, but cannot use both in single rule. +// It is advised to use one or the other in query rulesets, to avoid errors. +// Additionally, pinned queries have a maximum limit of 100 pinned hits. +// If multiple matching rules pin more than 100 documents, only the first 100 +// documents are pinned in the order they are specified in the ruleset. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-query-rule.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-rule func New(tp elastictransport.Interface) *PutRule { r := &PutRule{ transport: tp, @@ -97,8 +113,6 @@ func New(tp elastictransport.Interface) *PutRule { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -319,7 +333,7 @@ func (r *PutRule) Header(key, value string) *PutRule { } // RulesetId The unique identifier of the query ruleset containing the rule to be created -// or updated +// or updated. // API Name: rulesetid func (r *PutRule) _rulesetid(rulesetid string) *PutRule { r.paramSet |= rulesetidMask @@ -329,7 +343,7 @@ func (r *PutRule) _rulesetid(rulesetid string) *PutRule { } // RuleId The unique identifier of the query rule within the specified ruleset to be -// created or updated +// created or updated. // API Name: ruleid func (r *PutRule) _ruleid(ruleid string) *PutRule { r.paramSet |= ruleidMask @@ -382,31 +396,56 @@ func (r *PutRule) Pretty(pretty bool) *PutRule { return r } +// The actions to take when the rule is matched. +// The format of this action depends on the rule type. // API name: actions -func (r *PutRule) Actions(actions *types.QueryRuleActions) *PutRule { +func (r *PutRule) Actions(actions types.QueryRuleActionsVariant) *PutRule { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Actions = *actions + r.req.Actions = *actions.QueryRuleActionsCaster() return r } +// The criteria that must be met for the rule to be applied. +// If multiple criteria are specified for a rule, all criteria must be met for +// the rule to be applied. // API name: criteria -func (r *PutRule) Criteria(criteria ...types.QueryRuleCriteria) *PutRule { - r.req.Criteria = criteria +func (r *PutRule) Criteria(criteria ...types.QueryRuleCriteriaVariant) *PutRule { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Criteria = make([]types.QueryRuleCriteria, len(criteria)) + for i, v := range criteria { + r.req.Criteria[i] = *v.QueryRuleCriteriaCaster() + } return r } // API name: priority func (r *PutRule) Priority(priority int) *PutRule { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Priority = &priority return r } +// The type of rule. // API name: type func (r *PutRule) Type(type_ queryruletype.QueryRuleType) *PutRule { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Type = type_ - return r } diff --git a/typedapi/queryrules/putrule/request.go b/typedapi/queryrules/putrule/request.go index e3cf75f217..c3b77e405a 100644 --- a/typedapi/queryrules/putrule/request.go +++ b/typedapi/queryrules/putrule/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putrule @@ -34,12 +34,19 @@ import ( // Request holds the request body struct for the package putrule // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/put_rule/QueryRulePutRequest.ts#L28-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/put_rule/QueryRulePutRequest.ts#L28-L79 type Request struct { - Actions types.QueryRuleActions `json:"actions"` - Criteria []types.QueryRuleCriteria `json:"criteria"` - Priority *int `json:"priority,omitempty"` - Type queryruletype.QueryRuleType `json:"type"` + + // Actions The actions to take when the rule is matched. + // The format of this action depends on the rule type. + Actions types.QueryRuleActions `json:"actions"` + // Criteria The criteria that must be met for the rule to be applied. + // If multiple criteria are specified for a rule, all criteria must be met for + // the rule to be applied. + Criteria []types.QueryRuleCriteria `json:"criteria"` + Priority *int `json:"priority,omitempty"` + // Type The type of rule. + Type queryruletype.QueryRuleType `json:"type"` } // NewRequest returns a Request diff --git a/typedapi/queryrules/putrule/response.go b/typedapi/queryrules/putrule/response.go index 26b48fdc23..d2cfd2bd75 100644 --- a/typedapi/queryrules/putrule/response.go +++ b/typedapi/queryrules/putrule/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putrule @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putrule // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/put_rule/QueryRulePutResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/put_rule/QueryRulePutResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/queryrules/putruleset/put_ruleset.go b/typedapi/queryrules/putruleset/put_ruleset.go index ab73a5f0e6..38cd116782 100644 --- a/typedapi/queryrules/putruleset/put_ruleset.go +++ b/typedapi/queryrules/putruleset/put_ruleset.go @@ -16,9 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Creates or updates a query ruleset. +// Create or update a query ruleset. +// There is a limit of 100 rules per ruleset. +// This limit can be increased by using the +// `xpack.applications.rules.max_rules_per_ruleset` cluster setting. +// +// IMPORTANT: Due to limitations within pinned queries, you can only select +// documents using `ids` or `docs`, but cannot use both in single rule. +// It is advised to use one or the other in query rulesets, to avoid errors. +// Additionally, pinned queries have a maximum limit of 100 pinned hits. +// If multiple matching rules pin more than 100 documents, only the first 100 +// documents are pinned in the order they are specified in the ruleset. package putruleset import ( @@ -81,9 +91,19 @@ func NewPutRulesetFunc(tp elastictransport.Interface) NewPutRuleset { } } -// Creates or updates a query ruleset. +// Create or update a query ruleset. +// There is a limit of 100 rules per ruleset. +// This limit can be increased by using the +// `xpack.applications.rules.max_rules_per_ruleset` cluster setting. +// +// IMPORTANT: Due to limitations within pinned queries, you can only select +// documents using `ids` or `docs`, but cannot use both in single rule. +// It is advised to use one or the other in query rulesets, to avoid errors. +// Additionally, pinned queries have a maximum limit of 100 pinned hits. +// If multiple matching rules pin more than 100 documents, only the first 100 +// documents are pinned in the order they are specified in the ruleset. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-query-ruleset.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset func New(tp elastictransport.Interface) *PutRuleset { r := &PutRuleset{ transport: tp, @@ -91,8 +111,6 @@ func New(tp elastictransport.Interface) *PutRuleset { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -304,7 +322,7 @@ func (r *PutRuleset) Header(key, value string) *PutRuleset { return r } -// RulesetId The unique identifier of the query ruleset to be created or updated +// RulesetId The unique identifier of the query ruleset to be created or updated. // API Name: rulesetid func (r *PutRuleset) _rulesetid(rulesetid string) *PutRuleset { r.paramSet |= rulesetidMask @@ -358,8 +376,15 @@ func (r *PutRuleset) Pretty(pretty bool) *PutRuleset { } // API name: rules -func (r *PutRuleset) Rules(rules ...types.QueryRule) *PutRuleset { - r.req.Rules = rules +func (r *PutRuleset) Rules(rules ...types.QueryRuleVariant) *PutRuleset { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Rules = make([]types.QueryRule, len(rules)) + for i, v := range rules { + r.req.Rules[i] = *v.QueryRuleCaster() + } return r } diff --git a/typedapi/queryrules/putruleset/request.go b/typedapi/queryrules/putruleset/request.go index cad490dee6..b0d748f23d 100644 --- a/typedapi/queryrules/putruleset/request.go +++ b/typedapi/queryrules/putruleset/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putruleset @@ -32,7 +32,7 @@ import ( // Request holds the request body struct for the package putruleset // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/put_ruleset/QueryRulesetPutRequest.ts#L23-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/put_ruleset/QueryRulesetPutRequest.ts#L23-L59 type Request struct { Rules []types.QueryRule `json:"rules"` } diff --git a/typedapi/queryrules/putruleset/response.go b/typedapi/queryrules/putruleset/response.go index eeeff03925..850f7998a0 100644 --- a/typedapi/queryrules/putruleset/response.go +++ b/typedapi/queryrules/putruleset/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putruleset @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putruleset // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/put_ruleset/QueryRulesetPutResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/put_ruleset/QueryRulesetPutResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/queryrules/test/request.go b/typedapi/queryrules/test/request.go index 2cf32dfd7f..ff7a10688e 100644 --- a/typedapi/queryrules/test/request.go +++ b/typedapi/queryrules/test/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package test @@ -27,8 +27,12 @@ import ( // Request holds the request body struct for the package test // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/test/QueryRulesetTestRequest.ts#L24-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/test/QueryRulesetTestRequest.ts#L24-L57 type Request struct { + + // MatchCriteria The match criteria to apply to rules in the given query ruleset. + // Match criteria should match the keys defined in the `criteria.metadata` field + // of the rule. MatchCriteria map[string]json.RawMessage `json:"match_criteria"` } diff --git a/typedapi/queryrules/test/response.go b/typedapi/queryrules/test/response.go index f9ac0ecefb..0b7a948ee2 100644 --- a/typedapi/queryrules/test/response.go +++ b/typedapi/queryrules/test/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package test @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package test // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/test/QueryRulesetTestResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/test/QueryRulesetTestResponse.ts#L23-L28 type Response struct { MatchedRules []types.QueryRulesetMatchedRule `json:"matched_rules"` TotalMatchedRules int `json:"total_matched_rules"` diff --git a/typedapi/queryrules/test/test.go b/typedapi/queryrules/test/test.go index 96aa08fcf0..fcc89d196e 100644 --- a/typedapi/queryrules/test/test.go +++ b/typedapi/queryrules/test/test.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Creates or updates a query ruleset. +// Test a query ruleset. +// Evaluate match criteria against a query ruleset to identify the rules that +// would match that criteria. package test import ( @@ -81,9 +83,11 @@ func NewTestFunc(tp elastictransport.Interface) NewTest { } } -// Creates or updates a query ruleset. +// Test a query ruleset. +// Evaluate match criteria against a query ruleset to identify the rules that +// would match that criteria. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/test-query-ruleset.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-test func New(tp elastictransport.Interface) *Test { r := &Test{ transport: tp, @@ -91,8 +95,6 @@ func New(tp elastictransport.Interface) *Test { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -359,10 +361,34 @@ func (r *Test) Pretty(pretty bool) *Test { return r } +// The match criteria to apply to rules in the given query ruleset. +// Match criteria should match the keys defined in the `criteria.metadata` field +// of the rule. // API name: match_criteria func (r *Test) MatchCriteria(matchcriteria map[string]json.RawMessage) *Test { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MatchCriteria = matchcriteria + return r +} + +func (r *Test) AddMatchCriterion(key string, value json.RawMessage) *Test { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.MatchCriteria == nil { + r.req.MatchCriteria = make(map[string]json.RawMessage) + } else { + tmp = r.req.MatchCriteria + } + + tmp[key] = value + r.req.MatchCriteria = tmp return r } diff --git a/typedapi/rollup/deletejob/delete_job.go b/typedapi/rollup/deletejob/delete_job.go index 2fe00713c5..eb9380abc7 100644 --- a/typedapi/rollup/deletejob/delete_job.go +++ b/typedapi/rollup/deletejob/delete_job.go @@ -16,9 +16,39 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes an existing rollup job. +// Delete a rollup job. +// +// A job must be stopped before it can be deleted. +// If you attempt to delete a started job, an error occurs. +// Similarly, if you attempt to delete a nonexistent job, an exception occurs. +// +// IMPORTANT: When you delete a job, you remove only the process that is +// actively monitoring and rolling up data. +// The API does not delete any previously rolled up data. +// This is by design; a user may wish to roll up a static data set. +// Because the data set is static, after it has been fully rolled up there is no +// need to keep the indexing rollup job around (as there will be no new data). +// Thus the job can be deleted, leaving behind the rolled up data for analysis. +// If you wish to also remove the rollup data and the rollup index contains the +// data for only a single job, you can delete the whole rollup index. +// If the rollup index stores data from several jobs, you must issue a +// delete-by-query that targets the rollup job's identifier in the rollup index. +// For example: +// +// ``` +// POST my_rollup_index/_delete_by_query +// +// { +// "query": { +// "term": { +// "_rollup.id": "the_rollup_job_id" +// } +// } +// } +// +// ``` package deletejob import ( @@ -76,9 +106,39 @@ func NewDeleteJobFunc(tp elastictransport.Interface) NewDeleteJob { } } -// Deletes an existing rollup job. +// Delete a rollup job. +// +// A job must be stopped before it can be deleted. +// If you attempt to delete a started job, an error occurs. +// Similarly, if you attempt to delete a nonexistent job, an exception occurs. +// +// IMPORTANT: When you delete a job, you remove only the process that is +// actively monitoring and rolling up data. +// The API does not delete any previously rolled up data. +// This is by design; a user may wish to roll up a static data set. +// Because the data set is static, after it has been fully rolled up there is no +// need to keep the indexing rollup job around (as there will be no new data). +// Thus the job can be deleted, leaving behind the rolled up data for analysis. +// If you wish to also remove the rollup data and the rollup index contains the +// data for only a single job, you can delete the whole rollup index. +// If the rollup index stores data from several jobs, you must issue a +// delete-by-query that targets the rollup job's identifier in the rollup index. +// For example: +// +// ``` +// POST my_rollup_index/_delete_by_query +// +// { +// "query": { +// "term": { +// "_rollup.id": "the_rollup_job_id" +// } +// } +// } +// +// ``` // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-delete-job.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-delete-job func New(tp elastictransport.Interface) *DeleteJob { r := &DeleteJob{ transport: tp, diff --git a/typedapi/rollup/deletejob/response.go b/typedapi/rollup/deletejob/response.go index 702f627e16..1bd35fe45a 100644 --- a/typedapi/rollup/deletejob/response.go +++ b/typedapi/rollup/deletejob/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletejob @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package deletejob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/delete_job/DeleteRollupJobResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/delete_job/DeleteRollupJobResponse.ts#L22-L27 type Response struct { Acknowledged bool `json:"acknowledged"` TaskFailures []types.TaskFailure `json:"task_failures,omitempty"` diff --git a/typedapi/rollup/getjobs/get_jobs.go b/typedapi/rollup/getjobs/get_jobs.go index e201ab67d6..376c75f18d 100644 --- a/typedapi/rollup/getjobs/get_jobs.go +++ b/typedapi/rollup/getjobs/get_jobs.go @@ -16,9 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves the configuration, stats, and status of rollup jobs. +// Get rollup job information. +// Get the configuration, stats, and status of rollup jobs. +// +// NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. +// If a job was created, ran for a while, then was deleted, the API does not +// return any details about it. +// For details about a historical rollup job, the rollup capabilities API may be +// more useful. package getjobs import ( @@ -74,9 +81,16 @@ func NewGetJobsFunc(tp elastictransport.Interface) NewGetJobs { } } -// Retrieves the configuration, stats, and status of rollup jobs. +// Get rollup job information. +// Get the configuration, stats, and status of rollup jobs. +// +// NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. +// If a job was created, ran for a while, then was deleted, the API does not +// return any details about it. +// For details about a historical rollup job, the rollup capabilities API may be +// more useful. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-job.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs func New(tp elastictransport.Interface) *GetJobs { r := &GetJobs{ transport: tp, diff --git a/typedapi/rollup/getjobs/response.go b/typedapi/rollup/getjobs/response.go index c75ca06626..b15ebe2512 100644 --- a/typedapi/rollup/getjobs/response.go +++ b/typedapi/rollup/getjobs/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getjobs @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getjobs // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/get_jobs/GetRollupJobResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/get_jobs/GetRollupJobResponse.ts#L22-L24 type Response struct { Jobs []types.RollupJob `json:"jobs"` } diff --git a/typedapi/rollup/getrollupcaps/get_rollup_caps.go b/typedapi/rollup/getrollupcaps/get_rollup_caps.go index 5cd13a4551..3e52db5f4e 100644 --- a/typedapi/rollup/getrollupcaps/get_rollup_caps.go +++ b/typedapi/rollup/getrollupcaps/get_rollup_caps.go @@ -16,10 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns the capabilities of any rollup jobs that have been configured for a +// Get the rollup job capabilities. +// Get the capabilities of any rollup jobs that have been configured for a // specific index or index pattern. +// +// This API is useful because a rollup job is often configured to rollup only a +// subset of fields from the source index. +// Furthermore, only certain aggregations can be configured for various fields, +// leading to a limited subset of functionality depending on that configuration. +// This API enables you to inspect an index and determine: +// +// 1. Does this index have associated rollup data somewhere in the cluster? +// 2. If yes to the first question, what fields were rolled up, what +// aggregations can be performed, and where does the data live? package getrollupcaps import ( @@ -75,10 +86,21 @@ func NewGetRollupCapsFunc(tp elastictransport.Interface) NewGetRollupCaps { } } -// Returns the capabilities of any rollup jobs that have been configured for a +// Get the rollup job capabilities. +// Get the capabilities of any rollup jobs that have been configured for a // specific index or index pattern. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-rollup-caps.html +// This API is useful because a rollup job is often configured to rollup only a +// subset of fields from the source index. +// Furthermore, only certain aggregations can be configured for various fields, +// leading to a limited subset of functionality depending on that configuration. +// This API enables you to inspect an index and determine: +// +// 1. Does this index have associated rollup data somewhere in the cluster? +// 2. If yes to the first question, what fields were rolled up, what +// aggregations can be performed, and where does the data live? +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-caps func New(tp elastictransport.Interface) *GetRollupCaps { r := &GetRollupCaps{ transport: tp, diff --git a/typedapi/rollup/getrollupcaps/response.go b/typedapi/rollup/getrollupcaps/response.go index 1491750566..756d5d1c7b 100644 --- a/typedapi/rollup/getrollupcaps/response.go +++ b/typedapi/rollup/getrollupcaps/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getrollupcaps @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getrollupcaps // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/get_rollup_caps/GetRollupCapabilitiesResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/get_rollup_caps/GetRollupCapabilitiesResponse.ts#L24-L27 type Response map[string]types.RollupCapabilities diff --git a/typedapi/rollup/getrollupindexcaps/get_rollup_index_caps.go b/typedapi/rollup/getrollupindexcaps/get_rollup_index_caps.go index 1dea234f8a..5e9f8a2ad2 100644 --- a/typedapi/rollup/getrollupindexcaps/get_rollup_index_caps.go +++ b/typedapi/rollup/getrollupindexcaps/get_rollup_index_caps.go @@ -16,10 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns the rollup capabilities of all jobs inside of a rollup index (for -// example, the index where rollup data is stored). +// Get the rollup index capabilities. +// Get the rollup capabilities of all jobs inside of a rollup index. +// A single rollup index may store the data for multiple rollup jobs and may +// have a variety of capabilities depending on those jobs. This API enables you +// to determine: +// +// * What jobs are stored in an index (or indices specified via a pattern)? +// * What target indices were rolled up, what fields were used in those rollups, +// and what aggregations can be performed on each job? package getrollupindexcaps import ( @@ -77,10 +84,17 @@ func NewGetRollupIndexCapsFunc(tp elastictransport.Interface) NewGetRollupIndexC } } -// Returns the rollup capabilities of all jobs inside of a rollup index (for -// example, the index where rollup data is stored). +// Get the rollup index capabilities. +// Get the rollup capabilities of all jobs inside of a rollup index. +// A single rollup index may store the data for multiple rollup jobs and may +// have a variety of capabilities depending on those jobs. This API enables you +// to determine: +// +// * What jobs are stored in an index (or indices specified via a pattern)? +// * What target indices were rolled up, what fields were used in those rollups, +// and what aggregations can be performed on each job? // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-rollup-index-caps.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-index-caps func New(tp elastictransport.Interface) *GetRollupIndexCaps { r := &GetRollupIndexCaps{ transport: tp, diff --git a/typedapi/rollup/getrollupindexcaps/response.go b/typedapi/rollup/getrollupindexcaps/response.go index 2dd0012ce9..73478da1a7 100644 --- a/typedapi/rollup/getrollupindexcaps/response.go +++ b/typedapi/rollup/getrollupindexcaps/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getrollupindexcaps @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getrollupindexcaps // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/get_rollup_index_caps/GetRollupIndexCapabilitiesResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/get_rollup_index_caps/GetRollupIndexCapabilitiesResponse.ts#L24-L27 type Response map[string]types.IndexCapabilities diff --git a/typedapi/rollup/putjob/put_job.go b/typedapi/rollup/putjob/put_job.go index b454960fcf..ebbd54dd0c 100644 --- a/typedapi/rollup/putjob/put_job.go +++ b/typedapi/rollup/putjob/put_job.go @@ -16,9 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Creates a rollup job. +// Create a rollup job. +// +// WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will +// fail with a message about the deprecation and planned removal of rollup +// features. A cluster needs to contain either a rollup job or a rollup index in +// order for this API to be allowed to run. +// +// The rollup job configuration contains all the details about how the job +// should run, when it indexes documents, and what future queries will be able +// to run against the rollup index. +// +// There are three main sections to the job configuration: the logistical +// details about the job (for example, the cron schedule), the fields that are +// used for grouping, and what metrics to collect for each group. +// +// Jobs are created in a `STOPPED` state. You can start them with the start +// rollup jobs API. package putjob import ( @@ -81,9 +97,25 @@ func NewPutJobFunc(tp elastictransport.Interface) NewPutJob { } } -// Creates a rollup job. +// Create a rollup job. +// +// WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will +// fail with a message about the deprecation and planned removal of rollup +// features. A cluster needs to contain either a rollup job or a rollup index in +// order for this API to be allowed to run. +// +// The rollup job configuration contains all the details about how the job +// should run, when it indexes documents, and what future queries will be able +// to run against the rollup index. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-put-job.html +// There are three main sections to the job configuration: the logistical +// details about the job (for example, the cron schedule), the fields that are +// used for grouping, and what metrics to collect for each group. +// +// Jobs are created in a `STOPPED` state. You can start them with the start +// rollup jobs API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-put-job func New(tp elastictransport.Interface) *PutJob { r := &PutJob{ transport: tp, @@ -91,8 +123,6 @@ func New(tp elastictransport.Interface) *PutJob { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -367,7 +397,7 @@ func (r *PutJob) Pretty(pretty bool) *PutJob { return r } -// Cron A cron string which defines the intervals when the rollup job should be +// A cron string which defines the intervals when the rollup job should be // executed. When the interval // triggers, the indexer attempts to rollup the data in the index pattern. The // cron pattern is unrelated @@ -378,13 +408,17 @@ func (r *PutJob) Pretty(pretty bool) *PutJob { // cron pattern is defined just like a Watcher cron schedule. // API name: cron func (r *PutJob) Cron(cron string) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Cron = cron return r } -// Groups Defines the grouping fields and aggregations that are defined for this rollup +// Defines the grouping fields and aggregations that are defined for this rollup // job. These fields will then be // available later for aggregating into buckets. These aggs and fields can be // used in any combination. Think of @@ -395,45 +429,65 @@ func (r *PutJob) Cron(cron string) *PutJob { // enough flexibility that you simply need to determine which fields are needed, // not in what order they are needed. // API name: groups -func (r *PutJob) Groups(groups *types.Groupings) *PutJob { +func (r *PutJob) Groups(groups types.GroupingsVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Groups = *groups + r.req.Groups = *groups.GroupingsCaster() return r } // API name: headers -func (r *PutJob) Headers(httpheaders types.HttpHeaders) *PutJob { - r.req.Headers = httpheaders +func (r *PutJob) Headers(httpheaders types.HttpHeadersVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Headers = *httpheaders.HttpHeadersCaster() return r } -// IndexPattern The index or index pattern to roll up. Supports wildcard-style patterns +// The index or index pattern to roll up. Supports wildcard-style patterns // (`logstash-*`). The job attempts to // rollup the entire index or index-pattern. // API name: index_pattern func (r *PutJob) IndexPattern(indexpattern string) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.IndexPattern = indexpattern return r } -// Metrics Defines the metrics to collect for each grouping tuple. By default, only the +// Defines the metrics to collect for each grouping tuple. By default, only the // doc_counts are collected for each // group. To make rollup useful, you will often add metrics like averages, mins, // maxes, etc. Metrics are defined // on a per-field basis and for each field you configure which metric should be // collected. // API name: metrics -func (r *PutJob) Metrics(metrics ...types.FieldMetric) *PutJob { - r.req.Metrics = metrics +func (r *PutJob) Metrics(metrics ...types.FieldMetricVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range metrics { + r.req.Metrics = append(r.req.Metrics, *v.FieldMetricCaster()) + + } return r } -// PageSize The number of bucket results that are processed on each iteration of the +// The number of bucket results that are processed on each iteration of the // rollup indexer. A larger value tends // to execute faster, but requires more memory during processing. This value has // no effect on how the data is @@ -441,25 +495,40 @@ func (r *PutJob) Metrics(metrics ...types.FieldMetric) *PutJob { // indexer. // API name: page_size func (r *PutJob) PageSize(pagesize int) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.PageSize = pagesize return r } -// RollupIndex The index that contains the rollup results. The index can be shared with +// The index that contains the rollup results. The index can be shared with // other rollup jobs. The data is stored so that it doesn’t interfere with // unrelated jobs. // API name: rollup_index func (r *PutJob) RollupIndex(indexname string) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RollupIndex = indexname return r } -// Timeout Time to wait for the request to complete. +// Time to wait for the request to complete. // API name: timeout -func (r *PutJob) Timeout(duration types.Duration) *PutJob { - r.req.Timeout = duration +func (r *PutJob) Timeout(duration types.DurationVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() return r } diff --git a/typedapi/rollup/putjob/request.go b/typedapi/rollup/putjob/request.go index dd3c7ba9b8..2d9814f297 100644 --- a/typedapi/rollup/putjob/request.go +++ b/typedapi/rollup/putjob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putjob @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putjob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/put_job/CreateRollupJobRequest.ts#L27-L89 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/put_job/CreateRollupJobRequest.ts#L27-L105 type Request struct { // Cron A cron string which defines the intervals when the rollup job should be diff --git a/typedapi/rollup/putjob/response.go b/typedapi/rollup/putjob/response.go index db45531b64..a3ab012388 100644 --- a/typedapi/rollup/putjob/response.go +++ b/typedapi/rollup/putjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putjob // Response holds the response body struct for the package putjob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/put_job/CreateRollupJobResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/put_job/CreateRollupJobResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/rollup/rollupsearch/request.go b/typedapi/rollup/rollupsearch/request.go index 4dc98e8eff..5062f1722d 100644 --- a/typedapi/rollup/rollupsearch/request.go +++ b/typedapi/rollup/rollupsearch/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package rollupsearch @@ -33,12 +33,12 @@ import ( // Request holds the request body struct for the package rollupsearch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/rollup_search/RollupSearchRequest.ts#L27-L57 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/rollup_search/RollupSearchRequest.ts#L27-L109 type Request struct { // Aggregations Specifies aggregations. Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` - // Query Specifies a DSL query. + // Query Specifies a DSL query that is subject to some limitations. Query *types.Query `json:"query,omitempty"` // Size Must be zero if set, as rollups work on pre-aggregated data. Size *int `json:"size,omitempty"` diff --git a/typedapi/rollup/rollupsearch/response.go b/typedapi/rollup/rollupsearch/response.go index 8a894c9f9e..8ba706cbe0 100644 --- a/typedapi/rollup/rollupsearch/response.go +++ b/typedapi/rollup/rollupsearch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package rollupsearch @@ -34,7 +34,7 @@ import ( // Response holds the response body struct for the package rollupsearch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/rollup_search/RollupSearchResponse.ts#L27-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/rollup_search/RollupSearchResponse.ts#L27-L36 type Response struct { Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` Hits types.HitsMetadata `json:"hits"` diff --git a/typedapi/rollup/rollupsearch/rollup_search.go b/typedapi/rollup/rollupsearch/rollup_search.go index e7bf0ffa6e..e188b9bda6 100644 --- a/typedapi/rollup/rollupsearch/rollup_search.go +++ b/typedapi/rollup/rollupsearch/rollup_search.go @@ -16,9 +16,54 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// Enables searching rolled-up data using the standard Query DSL. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Search rolled-up data. +// The rollup search endpoint is needed because, internally, rolled-up documents +// utilize a different document structure than the original data. +// It rewrites standard Query DSL into a format that matches the rollup +// documents then takes the response and rewrites it back to what a client would +// expect given the original query. +// +// The request body supports a subset of features from the regular search API. +// The following functionality is not available: +// +// `size`: Because rollups work on pre-aggregated data, no search hits can be +// returned and so size must be set to zero or omitted entirely. +// `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are +// similarly disallowed. +// +// **Searching both historical rollup and non-rollup data** +// +// The rollup search API has the capability to search across both "live" +// non-rollup data and the aggregated rollup data. +// This is done by simply adding the live indices to the URI. For example: +// +// ``` +// GET sensor-1,sensor_rollup/_rollup_search +// +// { +// "size": 0, +// "aggregations": { +// "max_temperature": { +// "max": { +// "field": "temperature" +// } +// } +// } +// } +// +// ``` +// +// The rollup search endpoint does two things when the search runs: +// +// * The original request is sent to the non-rollup index unaltered. +// * A rewritten version of the original request is sent to the rollup index. +// +// When the two responses are received, the endpoint rewrites the rollup +// response and merges the two together. +// During the merging process, if there is any overlap in buckets between the +// two responses, the buckets from the non-rollup index are used. package rollupsearch import ( @@ -81,9 +126,54 @@ func NewRollupSearchFunc(tp elastictransport.Interface) NewRollupSearch { } } -// Enables searching rolled-up data using the standard Query DSL. +// Search rolled-up data. +// The rollup search endpoint is needed because, internally, rolled-up documents +// utilize a different document structure than the original data. +// It rewrites standard Query DSL into a format that matches the rollup +// documents then takes the response and rewrites it back to what a client would +// expect given the original query. +// +// The request body supports a subset of features from the regular search API. +// The following functionality is not available: +// +// `size`: Because rollups work on pre-aggregated data, no search hits can be +// returned and so size must be set to zero or omitted entirely. +// `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are +// similarly disallowed. +// +// **Searching both historical rollup and non-rollup data** +// +// The rollup search API has the capability to search across both "live" +// non-rollup data and the aggregated rollup data. +// This is done by simply adding the live indices to the URI. For example: +// +// ``` +// GET sensor-1,sensor_rollup/_rollup_search +// +// { +// "size": 0, +// "aggregations": { +// "max_temperature": { +// "max": { +// "field": "temperature" +// } +// } +// } +// } +// +// ``` // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-search.html +// The rollup search endpoint does two things when the search runs: +// +// * The original request is sent to the non-rollup index unaltered. +// * A rewritten version of the original request is sent to the rollup index. +// +// When the two responses are received, the endpoint rewrites the rollup +// response and merges the two together. +// During the merging process, if there is any overlap in buckets between the +// two responses, the buckets from the non-rollup index are used. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search func New(tp elastictransport.Interface) *RollupSearch { r := &RollupSearch{ transport: tp, @@ -91,8 +181,6 @@ func New(tp elastictransport.Interface) *RollupSearch { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -306,7 +394,19 @@ func (r *RollupSearch) Header(key, value string) *RollupSearch { return r } -// Index Enables searching rolled-up data using the standard Query DSL. +// Index A comma-separated list of data streams and indices used to limit the request. +// This parameter has the following rules: +// +// * At least one data stream, index, or wildcard expression must be specified. +// This target can include a rollup or non-rollup index. For data streams, the +// stream's backing indices can only serve as non-rollup indices. Omitting the +// parameter or using `_all` are not permitted. +// * Multiple non-rollup indices may be specified. +// * Only one rollup index may be specified. If more than one are supplied, an +// exception occurs. +// * Wildcard expressions (`*`) may be used. If they match more than one rollup +// index, an exception occurs. However, you can use an expression to match +// multiple non-rollup indices or data streams. // API Name: index func (r *RollupSearch) _index(index string) *RollupSearch { r.paramSet |= indexMask @@ -377,27 +477,57 @@ func (r *RollupSearch) Pretty(pretty bool) *RollupSearch { return r } -// Aggregations Specifies aggregations. +// Specifies aggregations. // API name: aggregations func (r *RollupSearch) Aggregations(aggregations map[string]types.Aggregations) *RollupSearch { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aggregations = aggregations + return r +} +func (r *RollupSearch) AddAggregation(key string, value types.AggregationsVariant) *RollupSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + + tmp[key] = *value.AggregationsCaster() + + r.req.Aggregations = tmp return r } -// Query Specifies a DSL query. +// Specifies a DSL query that is subject to some limitations. // API name: query -func (r *RollupSearch) Query(query *types.Query) *RollupSearch { +func (r *RollupSearch) Query(query types.QueryVariant) *RollupSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// Size Must be zero if set, as rollups work on pre-aggregated data. +// Must be zero if set, as rollups work on pre-aggregated data. // API name: size func (r *RollupSearch) Size(size int) *RollupSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r diff --git a/typedapi/rollup/startjob/response.go b/typedapi/rollup/startjob/response.go index cb8a99ded1..750e738c24 100644 --- a/typedapi/rollup/startjob/response.go +++ b/typedapi/rollup/startjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package startjob // Response holds the response body struct for the package startjob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/start_job/StartRollupJobResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/start_job/StartRollupJobResponse.ts#L20-L22 type Response struct { Started bool `json:"started"` } diff --git a/typedapi/rollup/startjob/start_job.go b/typedapi/rollup/startjob/start_job.go index fd10d38c9b..95cad6fc07 100644 --- a/typedapi/rollup/startjob/start_job.go +++ b/typedapi/rollup/startjob/start_job.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Starts an existing, stopped rollup job. +// Start rollup jobs. +// If you try to start a job that does not exist, an exception occurs. +// If you try to start a job that is already started, nothing happens. package startjob import ( @@ -76,9 +78,11 @@ func NewStartJobFunc(tp elastictransport.Interface) NewStartJob { } } -// Starts an existing, stopped rollup job. +// Start rollup jobs. +// If you try to start a job that does not exist, an exception occurs. +// If you try to start a job that is already started, nothing happens. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-start-job.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job func New(tp elastictransport.Interface) *StartJob { r := &StartJob{ transport: tp, diff --git a/typedapi/rollup/stopjob/response.go b/typedapi/rollup/stopjob/response.go index 73e138f9ce..20dd92e4df 100644 --- a/typedapi/rollup/stopjob/response.go +++ b/typedapi/rollup/stopjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package stopjob // Response holds the response body struct for the package stopjob // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/stop_job/StopRollupJobResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/stop_job/StopRollupJobResponse.ts#L20-L22 type Response struct { Stopped bool `json:"stopped"` } diff --git a/typedapi/rollup/stopjob/stop_job.go b/typedapi/rollup/stopjob/stop_job.go index 14c5a09359..0840024aa3 100644 --- a/typedapi/rollup/stopjob/stop_job.go +++ b/typedapi/rollup/stopjob/stop_job.go @@ -16,9 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Stops an existing, started rollup job. +// Stop rollup jobs. +// If you try to stop a job that does not exist, an exception occurs. +// If you try to stop a job that is already stopped, nothing happens. +// +// Since only a stopped job can be deleted, it can be useful to block the API +// until the indexer has fully stopped. +// This is accomplished with the `wait_for_completion` query parameter, and +// optionally a timeout. For example: +// +// ``` +// POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s +// ``` +// The parameter blocks the API call from returning until either the job has +// moved to STOPPED or the specified time has elapsed. +// If the specified time elapses without the job moving to STOPPED, a timeout +// exception occurs. package stopjob import ( @@ -76,9 +91,24 @@ func NewStopJobFunc(tp elastictransport.Interface) NewStopJob { } } -// Stops an existing, started rollup job. +// Stop rollup jobs. +// If you try to stop a job that does not exist, an exception occurs. +// If you try to stop a job that is already stopped, nothing happens. +// +// Since only a stopped job can be deleted, it can be useful to block the API +// until the indexer has fully stopped. +// This is accomplished with the `wait_for_completion` query parameter, and +// optionally a timeout. For example: +// +// ``` +// POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s +// ``` +// The parameter blocks the API call from returning until either the job has +// moved to STOPPED or the specified time has elapsed. +// If the specified time elapses without the job moving to STOPPED, a timeout +// exception occurs. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-stop-job.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job func New(tp elastictransport.Interface) *StopJob { r := &StopJob{ transport: tp, @@ -304,6 +334,10 @@ func (r *StopJob) _id(id string) *StopJob { // Timeout If `wait_for_completion` is `true`, the API blocks for (at maximum) the // specified duration while waiting for the job to stop. // If more than `timeout` time has passed, the API throws a timeout exception. +// NOTE: Even if a timeout occurs, the stop request is still processing and +// eventually moves the job to STOPPED. +// The timeout simply means the API call itself timed out while waiting for the +// status change. // API name: timeout func (r *StopJob) Timeout(duration string) *StopJob { r.values.Set("timeout", duration) diff --git a/typedapi/searchablesnapshots/cachestats/cache_stats.go b/typedapi/searchablesnapshots/cachestats/cache_stats.go index 56c2b968b8..c3d343c29d 100644 --- a/typedapi/searchablesnapshots/cachestats/cache_stats.go +++ b/typedapi/searchablesnapshots/cachestats/cache_stats.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieve node-level cache statistics about searchable snapshots. +// Get cache statistics. +// Get statistics about the shared cache for partially mounted indices. package cachestats import ( @@ -74,9 +75,10 @@ func NewCacheStatsFunc(tp elastictransport.Interface) NewCacheStats { } } -// Retrieve node-level cache statistics about searchable snapshots. +// Get cache statistics. +// Get statistics about the shared cache for partially mounted indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-cache-stats func New(tp elastictransport.Interface) *CacheStats { r := &CacheStats{ transport: tp, @@ -299,9 +301,7 @@ func (r *CacheStats) Header(key, value string) *CacheStats { return r } -// NodeId A comma-separated list of node IDs or names to limit the returned -// information; use `_local` to return information from the node you're -// connecting to, leave empty to get information from all nodes +// NodeId The names of the nodes in the cluster to target. // API Name: nodeid func (r *CacheStats) NodeId(nodeid string) *CacheStats { r.paramSet |= nodeidMask diff --git a/typedapi/searchablesnapshots/cachestats/response.go b/typedapi/searchablesnapshots/cachestats/response.go index 39504f117a..632b95bd22 100644 --- a/typedapi/searchablesnapshots/cachestats/response.go +++ b/typedapi/searchablesnapshots/cachestats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package cachestats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package cachestats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/searchable_snapshots/cache_stats/Response.ts#L24-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/searchable_snapshots/cache_stats/Response.ts#L24-L28 type Response struct { Nodes map[string]types.Node `json:"nodes"` } diff --git a/typedapi/searchablesnapshots/clearcache/clear_cache.go b/typedapi/searchablesnapshots/clearcache/clear_cache.go index ae89233f07..e050c5785e 100644 --- a/typedapi/searchablesnapshots/clearcache/clear_cache.go +++ b/typedapi/searchablesnapshots/clearcache/clear_cache.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Clear the cache of searchable snapshots. +// Clear the cache. +// Clear indices and data streams from the shared cache for partially mounted +// indices. package clearcache import ( @@ -75,9 +77,11 @@ func NewClearCacheFunc(tp elastictransport.Interface) NewClearCache { } } -// Clear the cache of searchable snapshots. +// Clear the cache. +// Clear indices and data streams from the shared cache for partially mounted +// indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache func New(tp elastictransport.Interface) *ClearCache { r := &ClearCache{ transport: tp, @@ -300,7 +304,9 @@ func (r *ClearCache) Header(key, value string) *ClearCache { return r } -// Index A comma-separated list of index names +// Index A comma-separated list of data streams, indices, and aliases to clear from +// the cache. +// It supports wildcards (`*`). // API Name: index func (r *ClearCache) Index(index string) *ClearCache { r.paramSet |= indexMask @@ -340,20 +346,6 @@ func (r *ClearCache) IgnoreUnavailable(ignoreunavailable bool) *ClearCache { return r } -// API name: pretty -func (r *ClearCache) Pretty(pretty bool) *ClearCache { - r.values.Set("pretty", strconv.FormatBool(pretty)) - - return r -} - -// API name: human -func (r *ClearCache) Human(human bool) *ClearCache { - r.values.Set("human", strconv.FormatBool(human)) - - return r -} - // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -375,3 +367,25 @@ func (r *ClearCache) FilterPath(filterpaths ...string) *ClearCache { return r } + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ClearCache) Human(human bool) *ClearCache { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ClearCache) Pretty(pretty bool) *ClearCache { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/searchablesnapshots/clearcache/response.go b/typedapi/searchablesnapshots/clearcache/response.go index c441fdc3dc..3913d50a48 100644 --- a/typedapi/searchablesnapshots/clearcache/response.go +++ b/typedapi/searchablesnapshots/clearcache/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package clearcache @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearcache // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/searchable_snapshots/clear_cache/SearchableSnapshotsClearCacheResponse.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/searchable_snapshots/clear_cache/SearchableSnapshotsClearCacheResponse.ts#L22-L25 type Response = json.RawMessage diff --git a/typedapi/searchablesnapshots/mount/mount.go b/typedapi/searchablesnapshots/mount/mount.go index 3ed9fed56c..680f2178f3 100644 --- a/typedapi/searchablesnapshots/mount/mount.go +++ b/typedapi/searchablesnapshots/mount/mount.go @@ -16,9 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Mount a snapshot as a searchable index. +// Mount a snapshot. +// Mount a snapshot as a searchable snapshot index. +// Do not use this API for snapshots managed by index lifecycle management +// (ILM). +// Manually mounting ILM-managed snapshots can interfere with ILM processes. package mount import ( @@ -86,9 +90,13 @@ func NewMountFunc(tp elastictransport.Interface) NewMount { } } -// Mount a snapshot as a searchable index. +// Mount a snapshot. +// Mount a snapshot as a searchable snapshot index. +// Do not use this API for snapshots managed by index lifecycle management +// (ILM). +// Manually mounting ILM-managed snapshots can interfere with ILM processes. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-api-mount-snapshot.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount func New(tp elastictransport.Interface) *Mount { r := &Mount{ transport: tp, @@ -96,8 +104,6 @@ func New(tp elastictransport.Interface) *Mount { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -317,7 +323,7 @@ func (r *Mount) Header(key, value string) *Mount { return r } -// Repository The name of the repository containing the snapshot of the index to mount +// Repository The name of the repository containing the snapshot of the index to mount. // API Name: repository func (r *Mount) _repository(repository string) *Mount { r.paramSet |= repositoryMask @@ -326,7 +332,7 @@ func (r *Mount) _repository(repository string) *Mount { return r } -// Snapshot The name of the snapshot of the index to mount +// Snapshot The name of the snapshot of the index to mount. // API Name: snapshot func (r *Mount) _snapshot(snapshot string) *Mount { r.paramSet |= snapshotMask @@ -335,7 +341,10 @@ func (r *Mount) _snapshot(snapshot string) *Mount { return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: master_timeout func (r *Mount) MasterTimeout(duration string) *Mount { r.values.Set("master_timeout", duration) @@ -343,7 +352,7 @@ func (r *Mount) MasterTimeout(duration string) *Mount { return r } -// WaitForCompletion Should this request wait until the operation has completed before returning +// WaitForCompletion If true, the request blocks until the operation is complete. // API name: wait_for_completion func (r *Mount) WaitForCompletion(waitforcompletion bool) *Mount { r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) @@ -351,8 +360,7 @@ func (r *Mount) WaitForCompletion(waitforcompletion bool) *Mount { return r } -// Storage Selects the kind of local storage used to accelerate searches. Experimental, -// and defaults to `full_copy` +// Storage The mount option for the searchable snapshot index. // API name: storage func (r *Mount) Storage(storage string) *Mount { r.values.Set("storage", storage) @@ -404,30 +412,75 @@ func (r *Mount) Pretty(pretty bool) *Mount { return r } +// The names of settings that should be removed from the index when it is +// mounted. // API name: ignore_index_settings func (r *Mount) IgnoreIndexSettings(ignoreindexsettings ...string) *Mount { - r.req.IgnoreIndexSettings = ignoreindexsettings + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ignoreindexsettings { + + r.req.IgnoreIndexSettings = append(r.req.IgnoreIndexSettings, v) + } return r } +// The name of the index contained in the snapshot whose data is to be mounted. +// If no `renamed_index` is specified, this name will also be used to create the +// new index. // API name: index func (r *Mount) Index(indexname string) *Mount { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Index = indexname return r } +// The settings that should be added to the index when it is mounted. // API name: index_settings func (r *Mount) IndexSettings(indexsettings map[string]json.RawMessage) *Mount { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.IndexSettings = indexsettings + return r +} + +func (r *Mount) AddIndexSetting(key string, value json.RawMessage) *Mount { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + var tmp map[string]json.RawMessage + if r.req.IndexSettings == nil { + r.req.IndexSettings = make(map[string]json.RawMessage) + } else { + tmp = r.req.IndexSettings + } + + tmp[key] = value + + r.req.IndexSettings = tmp return r } +// The name of the index that will be created. // API name: renamed_index func (r *Mount) RenamedIndex(indexname string) *Mount { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RenamedIndex = &indexname return r diff --git a/typedapi/searchablesnapshots/mount/request.go b/typedapi/searchablesnapshots/mount/request.go index eab51030da..47f9b48ba2 100644 --- a/typedapi/searchablesnapshots/mount/request.go +++ b/typedapi/searchablesnapshots/mount/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package mount @@ -30,12 +30,20 @@ import ( // Request holds the request body struct for the package mount // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/searchable_snapshots/mount/SearchableSnapshotsMountRequest.ts#L26-L49 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/searchable_snapshots/mount/SearchableSnapshotsMountRequest.ts#L26-L92 type Request struct { - IgnoreIndexSettings []string `json:"ignore_index_settings,omitempty"` - Index string `json:"index"` - IndexSettings map[string]json.RawMessage `json:"index_settings,omitempty"` - RenamedIndex *string `json:"renamed_index,omitempty"` + + // IgnoreIndexSettings The names of settings that should be removed from the index when it is + // mounted. + IgnoreIndexSettings []string `json:"ignore_index_settings,omitempty"` + // Index The name of the index contained in the snapshot whose data is to be mounted. + // If no `renamed_index` is specified, this name will also be used to create the + // new index. + Index string `json:"index"` + // IndexSettings The settings that should be added to the index when it is mounted. + IndexSettings map[string]json.RawMessage `json:"index_settings,omitempty"` + // RenamedIndex The name of the index that will be created. + RenamedIndex *string `json:"renamed_index,omitempty"` } // NewRequest returns a Request diff --git a/typedapi/searchablesnapshots/mount/response.go b/typedapi/searchablesnapshots/mount/response.go index 9851774fbc..4f153f81c2 100644 --- a/typedapi/searchablesnapshots/mount/response.go +++ b/typedapi/searchablesnapshots/mount/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package mount @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mount // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/searchable_snapshots/mount/SearchableSnapshotsMountResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/searchable_snapshots/mount/SearchableSnapshotsMountResponse.ts#L22-L26 type Response struct { Snapshot types.MountedSnapshot `json:"snapshot"` } diff --git a/typedapi/searchablesnapshots/stats/response.go b/typedapi/searchablesnapshots/stats/response.go index 8d2cbee383..79bc8ae7fd 100644 --- a/typedapi/searchablesnapshots/stats/response.go +++ b/typedapi/searchablesnapshots/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package stats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/searchable_snapshots/stats/SearchableSnapshotsStatsResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/searchable_snapshots/stats/SearchableSnapshotsStatsResponse.ts#L22-L27 type Response struct { Stats json.RawMessage `json:"stats,omitempty"` Total json.RawMessage `json:"total,omitempty"` diff --git a/typedapi/searchablesnapshots/stats/stats.go b/typedapi/searchablesnapshots/stats/stats.go index 878cca9e1a..e853c4c9e0 100644 --- a/typedapi/searchablesnapshots/stats/stats.go +++ b/typedapi/searchablesnapshots/stats/stats.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieve shard-level statistics about searchable snapshots. +// Get searchable snapshot statistics. package stats import ( @@ -75,9 +75,9 @@ func NewStatsFunc(tp elastictransport.Interface) NewStats { } } -// Retrieve shard-level statistics about searchable snapshots. +// Get searchable snapshot statistics. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats func New(tp elastictransport.Interface) *Stats { r := &Stats{ transport: tp, @@ -296,7 +296,8 @@ func (r *Stats) Header(key, value string) *Stats { return r } -// Index A comma-separated list of index names +// Index A comma-separated list of data streams and indices to retrieve statistics +// for. // API Name: index func (r *Stats) Index(index string) *Stats { r.paramSet |= indexMask diff --git a/typedapi/searchapplication/delete/delete.go b/typedapi/searchapplication/delete/delete.go index 8575b175d0..d8af05177a 100644 --- a/typedapi/searchapplication/delete/delete.go +++ b/typedapi/searchapplication/delete/delete.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete a search application. +// // Remove a search application and its associated alias. Indices attached to the // search application are not removed. package delete @@ -79,10 +80,11 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { } // Delete a search application. +// // Remove a search application and its associated alias. Indices attached to the // search application are not removed. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-search-application.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete func New(tp elastictransport.Interface) *Delete { r := &Delete{ transport: tp, @@ -294,7 +296,7 @@ func (r *Delete) Header(key, value string) *Delete { return r } -// Name The name of the search application to delete +// Name The name of the search application to delete. // API Name: name func (r *Delete) _name(name string) *Delete { r.paramSet |= nameMask diff --git a/typedapi/searchapplication/delete/response.go b/typedapi/searchapplication/delete/response.go index 8935e1f062..e078429cc9 100644 --- a/typedapi/searchapplication/delete/response.go +++ b/typedapi/searchapplication/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/search_application/delete/SearchApplicationsDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/delete/SearchApplicationsDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/searchapplication/deletebehavioralanalytics/delete_behavioral_analytics.go b/typedapi/searchapplication/deletebehavioralanalytics/delete_behavioral_analytics.go index 0121c31e04..fab6d461d6 100644 --- a/typedapi/searchapplication/deletebehavioralanalytics/delete_behavioral_analytics.go +++ b/typedapi/searchapplication/deletebehavioralanalytics/delete_behavioral_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete a behavioral analytics collection. // The associated data stream is also deleted. @@ -80,7 +80,7 @@ func NewDeleteBehavioralAnalyticsFunc(tp elastictransport.Interface) NewDeleteBe // Delete a behavioral analytics collection. // The associated data stream is also deleted. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-analytics-collection.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics func New(tp elastictransport.Interface) *DeleteBehavioralAnalytics { r := &DeleteBehavioralAnalytics{ transport: tp, diff --git a/typedapi/searchapplication/deletebehavioralanalytics/response.go b/typedapi/searchapplication/deletebehavioralanalytics/response.go index b4c9a14d26..0c9f467c8d 100644 --- a/typedapi/searchapplication/deletebehavioralanalytics/response.go +++ b/typedapi/searchapplication/deletebehavioralanalytics/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletebehavioralanalytics // Response holds the response body struct for the package deletebehavioralanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/search_application/delete_behavioral_analytics/BehavioralAnalyticsDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/delete_behavioral_analytics/BehavioralAnalyticsDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/searchapplication/get/get.go b/typedapi/searchapplication/get/get.go index 6787ce6e1b..cb5aec50ab 100644 --- a/typedapi/searchapplication/get/get.go +++ b/typedapi/searchapplication/get/get.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get search application details. package get @@ -78,7 +78,7 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { // Get search application details. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-search-application.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get func New(tp elastictransport.Interface) *Get { r := &Get{ transport: tp, diff --git a/typedapi/searchapplication/get/response.go b/typedapi/searchapplication/get/response.go index caa712b82f..8bd4e669d0 100644 --- a/typedapi/searchapplication/get/response.go +++ b/typedapi/searchapplication/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package get @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/search_application/get/SearchApplicationsGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/get/SearchApplicationsGetResponse.ts#L22-L24 type Response struct { // AnalyticsCollectionName Analytics collection associated to the Search Application. diff --git a/typedapi/searchapplication/getbehavioralanalytics/get_behavioral_analytics.go b/typedapi/searchapplication/getbehavioralanalytics/get_behavioral_analytics.go index 108f1bebc9..535fa8fc1b 100644 --- a/typedapi/searchapplication/getbehavioralanalytics/get_behavioral_analytics.go +++ b/typedapi/searchapplication/getbehavioralanalytics/get_behavioral_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get behavioral analytics collections. package getbehavioralanalytics @@ -76,7 +76,7 @@ func NewGetBehavioralAnalyticsFunc(tp elastictransport.Interface) NewGetBehavior // Get behavioral analytics collections. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/list-analytics-collection.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics func New(tp elastictransport.Interface) *GetBehavioralAnalytics { r := &GetBehavioralAnalytics{ transport: tp, diff --git a/typedapi/searchapplication/getbehavioralanalytics/response.go b/typedapi/searchapplication/getbehavioralanalytics/response.go index 239a89879b..59f5568d50 100644 --- a/typedapi/searchapplication/getbehavioralanalytics/response.go +++ b/typedapi/searchapplication/getbehavioralanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getbehavioralanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getbehavioralanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/search_application/get_behavioral_analytics/BehavioralAnalyticsGetResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/get_behavioral_analytics/BehavioralAnalyticsGetResponse.ts#L24-L27 type Response map[string]types.AnalyticsCollection diff --git a/typedapi/searchapplication/list/list.go b/typedapi/searchapplication/list/list.go index b2223ff073..dff210d99f 100644 --- a/typedapi/searchapplication/list/list.go +++ b/typedapi/searchapplication/list/list.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns the existing search applications. +// Get search applications. +// Get information about search applications. package list import ( @@ -68,9 +69,10 @@ func NewListFunc(tp elastictransport.Interface) NewList { } } -// Returns the existing search applications. +// Get search applications. +// Get information about search applications. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/list-search-applications.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics func New(tp elastictransport.Interface) *List { r := &List{ transport: tp, diff --git a/typedapi/searchapplication/list/response.go b/typedapi/searchapplication/list/response.go index d0b909a10a..e6955f80ce 100644 --- a/typedapi/searchapplication/list/response.go +++ b/typedapi/searchapplication/list/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package list @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package list // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/search_application/list/SearchApplicationsListResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/list/SearchApplicationsListResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Results []types.SearchApplication `json:"results"` diff --git a/typedapi/searchapplication/postbehavioralanalyticsevent/post_behavioral_analytics_event.go b/typedapi/searchapplication/postbehavioralanalyticsevent/post_behavioral_analytics_event.go new file mode 100644 index 0000000000..78e22a406f --- /dev/null +++ b/typedapi/searchapplication/postbehavioralanalyticsevent/post_behavioral_analytics_event.go @@ -0,0 +1,431 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Create a behavioral analytics collection event. +package postbehavioralanalyticsevent + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + collectionnameMask = iota + 1 + + eventtypeMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PostBehavioralAnalyticsEvent struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req any + deferred []func(request any) error + buf *gobytes.Buffer + + paramSet int + + collectionname string + eventtype string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPostBehavioralAnalyticsEvent type alias for index. +type NewPostBehavioralAnalyticsEvent func(collectionname, eventtype string) *PostBehavioralAnalyticsEvent + +// NewPostBehavioralAnalyticsEventFunc returns a new instance of PostBehavioralAnalyticsEvent with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPostBehavioralAnalyticsEventFunc(tp elastictransport.Interface) NewPostBehavioralAnalyticsEvent { + return func(collectionname, eventtype string) *PostBehavioralAnalyticsEvent { + n := New(tp) + + n._collectionname(collectionname) + + n._eventtype(eventtype) + + return n + } +} + +// Create a behavioral analytics collection event. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event +func New(tp elastictransport.Interface) *PostBehavioralAnalyticsEvent { + r := &PostBehavioralAnalyticsEvent{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PostBehavioralAnalyticsEvent) Raw(raw io.Reader) *PostBehavioralAnalyticsEvent { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PostBehavioralAnalyticsEvent) Request(req any) *PostBehavioralAnalyticsEvent { + r.req = req + + return r +} + +// Payload allows to set the request property with the appropriate payload. +func (r *PostBehavioralAnalyticsEvent) Payload(payload any) *PostBehavioralAnalyticsEvent { + r.req = payload + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PostBehavioralAnalyticsEvent) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PostBehavioralAnalyticsEvent: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == collectionnameMask|eventtypeMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "collectionname", r.collectionname) + } + path.WriteString(r.collectionname) + path.WriteString("/") + path.WriteString("event") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "eventtype", r.eventtype) + } + path.WriteString(r.eventtype) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PostBehavioralAnalyticsEvent) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_application.post_behavioral_analytics_event") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.post_behavioral_analytics_event") + if reader := instrument.RecordRequestBody(ctx, "search_application.post_behavioral_analytics_event", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.post_behavioral_analytics_event") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PostBehavioralAnalyticsEvent query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a postbehavioralanalyticsevent.Response +func (r PostBehavioralAnalyticsEvent) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.post_behavioral_analytics_event") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + if res.StatusCode == 404 { + data, err := io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(gobytes.NewReader(data)).Decode(&errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + err = json.NewDecoder(gobytes.NewReader(data)).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PostBehavioralAnalyticsEvent headers map. +func (r *PostBehavioralAnalyticsEvent) Header(key, value string) *PostBehavioralAnalyticsEvent { + r.headers.Set(key, value) + + return r +} + +// CollectionName The name of the behavioral analytics collection. +// API Name: collectionname +func (r *PostBehavioralAnalyticsEvent) _collectionname(collectionname string) *PostBehavioralAnalyticsEvent { + r.paramSet |= collectionnameMask + r.collectionname = collectionname + + return r +} + +// EventType The analytics event type. +// API Name: eventtype +func (r *PostBehavioralAnalyticsEvent) _eventtype(eventtype string) *PostBehavioralAnalyticsEvent { + r.paramSet |= eventtypeMask + r.eventtype = eventtype + + return r +} + +// Debug Whether the response type has to include more details +// API name: debug +func (r *PostBehavioralAnalyticsEvent) Debug(debug bool) *PostBehavioralAnalyticsEvent { + r.values.Set("debug", strconv.FormatBool(debug)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PostBehavioralAnalyticsEvent) ErrorTrace(errortrace bool) *PostBehavioralAnalyticsEvent { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PostBehavioralAnalyticsEvent) FilterPath(filterpaths ...string) *PostBehavioralAnalyticsEvent { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PostBehavioralAnalyticsEvent) Human(human bool) *PostBehavioralAnalyticsEvent { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PostBehavioralAnalyticsEvent) Pretty(pretty bool) *PostBehavioralAnalyticsEvent { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/searchapplication/postbehavioralanalyticsevent/request.go b/typedapi/searchapplication/postbehavioralanalyticsevent/request.go new file mode 100644 index 0000000000..3dab68f60d --- /dev/null +++ b/typedapi/searchapplication/postbehavioralanalyticsevent/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package postbehavioralanalyticsevent + +import ( + "encoding/json" +) + +// Request holds the request body struct for the package postbehavioralanalyticsevent +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/post_behavioral_analytics_event/BehavioralAnalyticsEventPostRequest.ts#L24-L58 +type Request = json.RawMessage + +// NewRequest returns a Request +func NewRequest() *Request { + r := new(json.RawMessage) + + return r +} diff --git a/typedapi/searchapplication/postbehavioralanalyticsevent/response.go b/typedapi/searchapplication/postbehavioralanalyticsevent/response.go new file mode 100644 index 0000000000..063409d059 --- /dev/null +++ b/typedapi/searchapplication/postbehavioralanalyticsevent/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package postbehavioralanalyticsevent + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package postbehavioralanalyticsevent +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/post_behavioral_analytics_event/BehavioralAnalyticsEventPostResponse.ts#L22-L47 +type Response struct { + Accepted bool `json:"accepted"` + Event json.RawMessage `json:"event,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/searchapplication/put/put.go b/typedapi/searchapplication/put/put.go index 5f795aeddf..1287506d44 100644 --- a/typedapi/searchapplication/put/put.go +++ b/typedapi/searchapplication/put/put.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create or update a search application. package put @@ -83,7 +83,7 @@ func NewPutFunc(tp elastictransport.Interface) NewPut { // Create or update a search application. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-search-application.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put func New(tp elastictransport.Interface) *Put { r := &Put{ transport: tp, @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *Put { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -368,27 +366,43 @@ func (r *Put) Pretty(pretty bool) *Put { return r } -// AnalyticsCollectionName Analytics collection associated to the Search Application. +// Analytics collection associated to the Search Application. // API name: analytics_collection_name func (r *Put) AnalyticsCollectionName(name string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AnalyticsCollectionName = &name return r } -// Indices Indices that are part of the Search Application. +// Indices that are part of the Search Application. // API name: indices func (r *Put) Indices(indices ...string) *Put { - r.req.Indices = indices + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range indices { + r.req.Indices = append(r.req.Indices, v) + + } return r } -// Template Search template to use on search operations. +// Search template to use on search operations. // API name: template -func (r *Put) Template(template *types.SearchApplicationTemplate) *Put { +func (r *Put) Template(template types.SearchApplicationTemplateVariant) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Template = template + r.req.Template = template.SearchApplicationTemplateCaster() return r } diff --git a/typedapi/searchapplication/put/request.go b/typedapi/searchapplication/put/request.go index 934b1ef56f..c4661fc103 100644 --- a/typedapi/searchapplication/put/request.go +++ b/typedapi/searchapplication/put/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package put @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package put // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/search_application/put/SearchApplicationsPutRequest.ts#L23-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/put/SearchApplicationsPutRequest.ts#L23-L57 type Request = types.SearchApplicationParameters // NewRequest returns a Request diff --git a/typedapi/searchapplication/put/response.go b/typedapi/searchapplication/put/response.go index f34fd06ec9..8c276bbdd0 100644 --- a/typedapi/searchapplication/put/response.go +++ b/typedapi/searchapplication/put/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package put @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package put // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/search_application/put/SearchApplicationsPutResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/put/SearchApplicationsPutResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/searchapplication/putbehavioralanalytics/put_behavioral_analytics.go b/typedapi/searchapplication/putbehavioralanalytics/put_behavioral_analytics.go index 6c8274a8b5..086363b921 100644 --- a/typedapi/searchapplication/putbehavioralanalytics/put_behavioral_analytics.go +++ b/typedapi/searchapplication/putbehavioralanalytics/put_behavioral_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create a behavioral analytics collection. package putbehavioralanalytics @@ -78,7 +78,7 @@ func NewPutBehavioralAnalyticsFunc(tp elastictransport.Interface) NewPutBehavior // Create a behavioral analytics collection. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-analytics-collection.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics func New(tp elastictransport.Interface) *PutBehavioralAnalytics { r := &PutBehavioralAnalytics{ transport: tp, diff --git a/typedapi/searchapplication/putbehavioralanalytics/response.go b/typedapi/searchapplication/putbehavioralanalytics/response.go index 1ebd049452..fff86649c8 100644 --- a/typedapi/searchapplication/putbehavioralanalytics/response.go +++ b/typedapi/searchapplication/putbehavioralanalytics/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putbehavioralanalytics // Response holds the response body struct for the package putbehavioralanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/search_application/put_behavioral_analytics/BehavioralAnalyticsPutResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/put_behavioral_analytics/BehavioralAnalyticsPutResponse.ts#L23-L25 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/searchapplication/renderquery/render_query.go b/typedapi/searchapplication/renderquery/render_query.go new file mode 100644 index 0000000000..76ed6cca3d --- /dev/null +++ b/typedapi/searchapplication/renderquery/render_query.go @@ -0,0 +1,409 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Render a search application query. +// Generate an Elasticsearch query using the specified query parameters and the +// search template associated with the search application or a default template +// if none is specified. +// If a parameter used in the search template is not specified in `params`, the +// parameter's default value will be used. +// The API returns the specific Elasticsearch query that would be generated and +// run by calling the search application search API. +// +// You must have `read` privileges on the backing alias of the search +// application. +package renderquery + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type RenderQuery struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRenderQuery type alias for index. +type NewRenderQuery func(name string) *RenderQuery + +// NewRenderQueryFunc returns a new instance of RenderQuery with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRenderQueryFunc(tp elastictransport.Interface) NewRenderQuery { + return func(name string) *RenderQuery { + n := New(tp) + + n._name(name) + + return n + } +} + +// Render a search application query. +// Generate an Elasticsearch query using the specified query parameters and the +// search template associated with the search application or a default template +// if none is specified. +// If a parameter used in the search template is not specified in `params`, the +// parameter's default value will be used. +// The API returns the specific Elasticsearch query that would be generated and +// run by calling the search application search API. +// +// You must have `read` privileges on the backing alias of the search +// application. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-render-query +func New(tp elastictransport.Interface) *RenderQuery { + r := &RenderQuery{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *RenderQuery) Raw(raw io.Reader) *RenderQuery { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *RenderQuery) Request(req *Request) *RenderQuery { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *RenderQuery) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for RenderQuery: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_render_query") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r RenderQuery) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_application.render_query") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.render_query") + if reader := instrument.RecordRequestBody(ctx, "search_application.render_query", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.render_query") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the RenderQuery query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a renderquery.Response +func (r RenderQuery) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.render_query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the RenderQuery headers map. +func (r *RenderQuery) Header(key, value string) *RenderQuery { + r.headers.Set(key, value) + + return r +} + +// Name The name of the search application to render teh query for. +// API Name: name +func (r *RenderQuery) _name(name string) *RenderQuery { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *RenderQuery) ErrorTrace(errortrace bool) *RenderQuery { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *RenderQuery) FilterPath(filterpaths ...string) *RenderQuery { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *RenderQuery) Human(human bool) *RenderQuery { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *RenderQuery) Pretty(pretty bool) *RenderQuery { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: params +func (r *RenderQuery) Params(params map[string]json.RawMessage) *RenderQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Params = params + return r +} + +func (r *RenderQuery) AddParam(key string, value json.RawMessage) *RenderQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Params == nil { + r.req.Params = make(map[string]json.RawMessage) + } else { + tmp = r.req.Params + } + + tmp[key] = value + + r.req.Params = tmp + return r +} diff --git a/typedapi/searchapplication/renderquery/request.go b/typedapi/searchapplication/renderquery/request.go new file mode 100644 index 0000000000..b94ef4a26c --- /dev/null +++ b/typedapi/searchapplication/renderquery/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package renderquery + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package renderquery +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/render_query/SearchApplicationsRenderQueryRequest.ts#L24-L54 +type Request struct { + Params map[string]json.RawMessage `json:"params,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Params: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Renderquery request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/searchapplication/renderquery/response.go b/typedapi/searchapplication/renderquery/response.go new file mode 100644 index 0000000000..226ca696b5 --- /dev/null +++ b/typedapi/searchapplication/renderquery/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package renderquery + +// Response holds the response body struct for the package renderquery +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/render_query/SearchApplicationsRenderQueryResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/searchapplication/search/request.go b/typedapi/searchapplication/search/request.go index 8f17302bca..0dc37e1850 100644 --- a/typedapi/searchapplication/search/request.go +++ b/typedapi/searchapplication/search/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package search @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/search_application/search/SearchApplicationsSearchRequest.ts#L24-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/search/SearchApplicationsSearchRequest.ts#L24-L61 type Request struct { // Params Query parameters specific to this request, which will override any defaults diff --git a/typedapi/searchapplication/search/response.go b/typedapi/searchapplication/search/response.go index ee847f5510..941e776a06 100644 --- a/typedapi/searchapplication/search/response.go +++ b/typedapi/searchapplication/search/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package search @@ -34,22 +34,46 @@ import ( // Response holds the response body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/search_application/search/SearchApplicationsSearchResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/search/SearchApplicationsSearchResponse.ts#L22-L24 type Response struct { - Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` - Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` - Fields map[string]json.RawMessage `json:"fields,omitempty"` - Hits types.HitsMetadata `json:"hits"` - MaxScore *types.Float64 `json:"max_score,omitempty"` - NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` - PitId *string `json:"pit_id,omitempty"` - Profile *types.Profile `json:"profile,omitempty"` - ScrollId_ *string `json:"_scroll_id,omitempty"` + Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` + Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Hits The returned documents and metadata. + Hits types.HitsMetadata `json:"hits"` + MaxScore *types.Float64 `json:"max_score,omitempty"` + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *types.Profile `json:"profile,omitempty"` + // ScrollId_ The identifier for the search and its search context. + // You can use this scroll ID with the scroll API to retrieve the next batch of + // search results for the request. + // This property is returned only if the `scroll` query parameter is specified + // in the request. + ScrollId_ *string `json:"_scroll_id,omitempty"` + // Shards_ A count of shards used for the request. Shards_ types.ShardStatistics `json:"_shards"` Suggest map[string][]types.Suggest `json:"suggest,omitempty"` TerminatedEarly *bool `json:"terminated_early,omitempty"` - TimedOut bool `json:"timed_out"` - Took int64 `json:"took"` + // TimedOut If `true`, the request timed out before completion; returned results may be + // partial or empty. + TimedOut bool `json:"timed_out"` + // Took The number of milliseconds it took Elasticsearch to run the request. + // This value is calculated by measuring the time elapsed between receipt of a + // request on the coordinating node and the time at which the coordinating node + // is ready to send the response. + // It includes: + // + // * Communication time between the coordinating node and data nodes + // * Time the request spends in the search thread pool, queued for execution + // * Actual run time + // + // It does not include: + // + // * Time needed to send the request to Elasticsearch + // * Time needed to serialize the JSON response + // * Time needed to send the response to a client + Took int64 `json:"took"` } // NewResponse returns a Response diff --git a/typedapi/searchapplication/search/search.go b/typedapi/searchapplication/search/search.go index 75e2e60613..ebc59be289 100644 --- a/typedapi/searchapplication/search/search.go +++ b/typedapi/searchapplication/search/search.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Run a search application search. // Generate and run an Elasticsearch query that uses the specified query @@ -93,7 +93,7 @@ func NewSearchFunc(tp elastictransport.Interface) NewSearch { // Unspecified template parameters are assigned their default values if // applicable. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-application-search.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search func New(tp elastictransport.Interface) *Search { r := &Search{ transport: tp, @@ -101,8 +101,6 @@ func New(tp elastictransport.Interface) *Search { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -382,12 +380,33 @@ func (r *Search) Pretty(pretty bool) *Search { return r } -// Params Query parameters specific to this request, which will override any defaults +// Query parameters specific to this request, which will override any defaults // specified in the template. // API name: params func (r *Search) Params(params map[string]json.RawMessage) *Search { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Params = params + return r +} + +func (r *Search) AddParam(key string, value json.RawMessage) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Params == nil { + r.req.Params = make(map[string]json.RawMessage) + } else { + tmp = r.req.Params + } + + tmp[key] = value + r.req.Params = tmp return r } diff --git a/typedapi/security/activateuserprofile/activate_user_profile.go b/typedapi/security/activateuserprofile/activate_user_profile.go index f959d0ee38..d805f9281b 100644 --- a/typedapi/security/activateuserprofile/activate_user_profile.go +++ b/typedapi/security/activateuserprofile/activate_user_profile.go @@ -16,11 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Activate a user profile. // // Create or update a user profile on behalf of another user. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// The calling application must have either an `access_token` or a combination +// of `username` and `password` for the user that the profile document is +// intended for. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// This API creates or updates a profile document for end users with information +// that is extracted from the user's authentication object including `username`, +// `full_name,` `roles`, and the authentication realm. +// For example, in the JWT `access_token` case, the profile user's `username` is +// extracted from the JWT token claim pointed to by the `claims.principal` +// setting of the JWT realm that authenticated the token. +// +// When updating a profile document, the API enables the document if it was +// disabled. +// Any updates do not change existing content for either the `labels` or `data` +// fields. package activateuserprofile import ( @@ -80,7 +101,28 @@ func NewActivateUserProfileFunc(tp elastictransport.Interface) NewActivateUserPr // // Create or update a user profile on behalf of another user. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-activate-user-profile.html +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// The calling application must have either an `access_token` or a combination +// of `username` and `password` for the user that the profile document is +// intended for. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// This API creates or updates a profile document for end users with information +// that is extracted from the user's authentication object including `username`, +// `full_name,` `roles`, and the authentication realm. +// For example, in the JWT `access_token` case, the profile user's `username` is +// extracted from the JWT token claim pointed to by the `claims.principal` +// setting of the JWT realm that authenticated the token. +// +// When updating a profile document, the API enables the document if it was +// disabled. +// Any updates do not change existing content for either the `labels` or `data` +// fields. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-activate-user-profile func New(tp elastictransport.Interface) *ActivateUserProfile { r := &ActivateUserProfile{ transport: tp, @@ -88,8 +130,6 @@ func New(tp elastictransport.Interface) *ActivateUserProfile { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -343,31 +383,58 @@ func (r *ActivateUserProfile) Pretty(pretty bool) *ActivateUserProfile { return r } +// The user's Elasticsearch access token or JWT. +// Both `access` and `id` JWT token types are supported and they depend on the +// underlying JWT realm configuration. +// If you specify the `access_token` grant type, this parameter is required. +// It is not valid with other grant types. // API name: access_token func (r *ActivateUserProfile) AccessToken(accesstoken string) *ActivateUserProfile { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.AccessToken = &accesstoken return r } +// The type of grant. // API name: grant_type func (r *ActivateUserProfile) GrantType(granttype granttype.GrantType) *ActivateUserProfile { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.GrantType = granttype - return r } +// The user's password. +// If you specify the `password` grant type, this parameter is required. +// It is not valid with other grant types. // API name: password func (r *ActivateUserProfile) Password(password string) *ActivateUserProfile { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Password = &password return r } +// The username that identifies the user. +// If you specify the `password` grant type, this parameter is required. +// It is not valid with other grant types. // API name: username func (r *ActivateUserProfile) Username(username string) *ActivateUserProfile { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Username = &username diff --git a/typedapi/security/activateuserprofile/request.go b/typedapi/security/activateuserprofile/request.go index 67527fa2f8..101dc49881 100644 --- a/typedapi/security/activateuserprofile/request.go +++ b/typedapi/security/activateuserprofile/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package activateuserprofile @@ -29,12 +29,25 @@ import ( // Request holds the request body struct for the package activateuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/activate_user_profile/Request.ts#L23-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/activate_user_profile/Request.ts#L23-L76 type Request struct { - AccessToken *string `json:"access_token,omitempty"` - GrantType granttype.GrantType `json:"grant_type"` - Password *string `json:"password,omitempty"` - Username *string `json:"username,omitempty"` + + // AccessToken The user's Elasticsearch access token or JWT. + // Both `access` and `id` JWT token types are supported and they depend on the + // underlying JWT realm configuration. + // If you specify the `access_token` grant type, this parameter is required. + // It is not valid with other grant types. + AccessToken *string `json:"access_token,omitempty"` + // GrantType The type of grant. + GrantType granttype.GrantType `json:"grant_type"` + // Password The user's password. + // If you specify the `password` grant type, this parameter is required. + // It is not valid with other grant types. + Password *string `json:"password,omitempty"` + // Username The username that identifies the user. + // If you specify the `password` grant type, this parameter is required. + // It is not valid with other grant types. + Username *string `json:"username,omitempty"` } // NewRequest returns a Request diff --git a/typedapi/security/activateuserprofile/response.go b/typedapi/security/activateuserprofile/response.go index 7a806b6c79..cc343fc898 100644 --- a/typedapi/security/activateuserprofile/response.go +++ b/typedapi/security/activateuserprofile/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package activateuserprofile @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package activateuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/activate_user_profile/Response.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/activate_user_profile/Response.ts#L22-L24 type Response struct { Data map[string]json.RawMessage `json:"data"` Doc_ types.UserProfileHitMetadata `json:"_doc"` diff --git a/typedapi/security/authenticate/authenticate.go b/typedapi/security/authenticate/authenticate.go index db1cc10b28..71e89f0d47 100644 --- a/typedapi/security/authenticate/authenticate.go +++ b/typedapi/security/authenticate/authenticate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Authenticate a user. // @@ -88,7 +88,7 @@ func NewAuthenticateFunc(tp elastictransport.Interface) NewAuthenticate { // the user. // If the user cannot be authenticated, this API returns a 401 status code. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-authenticate.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate func New(tp elastictransport.Interface) *Authenticate { r := &Authenticate{ transport: tp, diff --git a/typedapi/security/authenticate/response.go b/typedapi/security/authenticate/response.go index b7ba5ed76c..2fd603c82a 100644 --- a/typedapi/security/authenticate/response.go +++ b/typedapi/security/authenticate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package authenticate @@ -26,19 +26,19 @@ import ( // Response holds the response body struct for the package authenticate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/authenticate/SecurityAuthenticateResponse.ts#L25-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/authenticate/SecurityAuthenticateResponse.ts#L24-L42 type Response struct { - ApiKey *types.ApiKey `json:"api_key,omitempty"` - AuthenticationRealm types.RealmInfo `json:"authentication_realm"` - AuthenticationType string `json:"authentication_type"` - Email *string `json:"email,omitempty"` - Enabled bool `json:"enabled"` - FullName *string `json:"full_name,omitempty"` - LookupRealm types.RealmInfo `json:"lookup_realm"` - Metadata types.Metadata `json:"metadata"` - Roles []string `json:"roles"` - Token *types.AuthenticateToken `json:"token,omitempty"` - Username string `json:"username"` + ApiKey *types.AuthenticateApiKey `json:"api_key,omitempty"` + AuthenticationRealm types.RealmInfo `json:"authentication_realm"` + AuthenticationType string `json:"authentication_type"` + Email *string `json:"email,omitempty"` + Enabled bool `json:"enabled"` + FullName *string `json:"full_name,omitempty"` + LookupRealm types.RealmInfo `json:"lookup_realm"` + Metadata types.Metadata `json:"metadata"` + Roles []string `json:"roles"` + Token *types.AuthenticateToken `json:"token,omitempty"` + Username string `json:"username"` } // NewResponse returns a Response diff --git a/typedapi/security/bulkdeleterole/bulk_delete_role.go b/typedapi/security/bulkdeleterole/bulk_delete_role.go index 3771a25325..2bf7be9f9f 100644 --- a/typedapi/security/bulkdeleterole/bulk_delete_role.go +++ b/typedapi/security/bulkdeleterole/bulk_delete_role.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Bulk delete roles. // @@ -86,7 +86,7 @@ func NewBulkDeleteRoleFunc(tp elastictransport.Interface) NewBulkDeleteRole { // The bulk delete roles API cannot delete roles that are defined in roles // files. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-delete-role.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-delete-role func New(tp elastictransport.Interface) *BulkDeleteRole { r := &BulkDeleteRole{ transport: tp, @@ -94,8 +94,6 @@ func New(tp elastictransport.Interface) *BulkDeleteRole { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -357,10 +355,17 @@ func (r *BulkDeleteRole) Pretty(pretty bool) *BulkDeleteRole { return r } -// Names An array of role names to delete +// An array of role names to delete // API name: names func (r *BulkDeleteRole) Names(names ...string) *BulkDeleteRole { - r.req.Names = names + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range names { + r.req.Names = append(r.req.Names, v) + + } return r } diff --git a/typedapi/security/bulkdeleterole/request.go b/typedapi/security/bulkdeleterole/request.go index 1baa5bd2d4..d7a7fc48e3 100644 --- a/typedapi/security/bulkdeleterole/request.go +++ b/typedapi/security/bulkdeleterole/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package bulkdeleterole @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package bulkdeleterole // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/bulk_delete_role/SecurityBulkDeleteRoleRequest.ts#L23-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/bulk_delete_role/SecurityBulkDeleteRoleRequest.ts#L23-L50 type Request struct { // Names An array of role names to delete diff --git a/typedapi/security/bulkdeleterole/response.go b/typedapi/security/bulkdeleterole/response.go index 8f5a981e6e..58fd1747a8 100644 --- a/typedapi/security/bulkdeleterole/response.go +++ b/typedapi/security/bulkdeleterole/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package bulkdeleterole @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package bulkdeleterole // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/bulk_delete_role/SecurityBulkDeleteRoleResponse.ts#L22-L37 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/bulk_delete_role/SecurityBulkDeleteRoleResponse.ts#L22-L37 type Response struct { // Deleted Array of deleted roles diff --git a/typedapi/security/bulkputrole/bulk_put_role.go b/typedapi/security/bulkputrole/bulk_put_role.go index bbb3399e44..39db1e5587 100644 --- a/typedapi/security/bulkputrole/bulk_put_role.go +++ b/typedapi/security/bulkputrole/bulk_put_role.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Bulk create or update roles. // @@ -86,7 +86,7 @@ func NewBulkPutRoleFunc(tp elastictransport.Interface) NewBulkPutRole { // The bulk create or update roles API cannot update roles that are defined in // roles files. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-put-role.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-put-role func New(tp elastictransport.Interface) *BulkPutRole { r := &BulkPutRole{ transport: tp, @@ -94,8 +94,6 @@ func New(tp elastictransport.Interface) *BulkPutRole { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -357,11 +355,32 @@ func (r *BulkPutRole) Pretty(pretty bool) *BulkPutRole { return r } -// Roles A dictionary of role name to RoleDescriptor objects to add or update +// A dictionary of role name to RoleDescriptor objects to add or update // API name: roles func (r *BulkPutRole) Roles(roles map[string]types.RoleDescriptor) *BulkPutRole { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Roles = roles + return r +} + +func (r *BulkPutRole) AddRole(key string, value types.RoleDescriptorVariant) *BulkPutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.RoleDescriptor + if r.req.Roles == nil { + r.req.Roles = make(map[string]types.RoleDescriptor) + } else { + tmp = r.req.Roles + } + + tmp[key] = *value.RoleDescriptorCaster() + r.req.Roles = tmp return r } diff --git a/typedapi/security/bulkputrole/request.go b/typedapi/security/bulkputrole/request.go index bbcc3b0543..77ab7e5290 100644 --- a/typedapi/security/bulkputrole/request.go +++ b/typedapi/security/bulkputrole/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package bulkputrole @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package bulkputrole // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/bulk_put_role/SecurityBulkPutRoleRequest.ts#L25-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/bulk_put_role/SecurityBulkPutRoleRequest.ts#L25-L52 type Request struct { // Roles A dictionary of role name to RoleDescriptor objects to add or update diff --git a/typedapi/security/bulkputrole/response.go b/typedapi/security/bulkputrole/response.go index 2903bd68fa..cde8759050 100644 --- a/typedapi/security/bulkputrole/response.go +++ b/typedapi/security/bulkputrole/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package bulkputrole @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package bulkputrole // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/bulk_put_role/SecurityBulkPutRoleResponse.ts#L22-L41 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/bulk_put_role/SecurityBulkPutRoleResponse.ts#L22-L41 type Response struct { // Created Array of created roles diff --git a/typedapi/security/bulkupdateapikeys/bulk_update_api_keys.go b/typedapi/security/bulkupdateapikeys/bulk_update_api_keys.go index f867bc08ee..9130eaca4b 100644 --- a/typedapi/security/bulkupdateapikeys/bulk_update_api_keys.go +++ b/typedapi/security/bulkupdateapikeys/bulk_update_api_keys.go @@ -16,21 +16,52 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Updates the attributes of multiple existing API keys. +// Bulk update API keys. +// Update the attributes for multiple API keys. +// +// IMPORTANT: It is not possible to use an API key as the authentication +// credential for this API. To update API keys, the owner user's credentials are +// required. +// +// This API is similar to the update API key API but enables you to apply the +// same update to multiple API keys in one API call. This operation can greatly +// improve performance over making individual updates. +// +// It is not possible to update expired or invalidated API keys. +// +// This API supports updates to API key access scope, metadata and expiration. +// The access scope of each API key is derived from the `role_descriptors` you +// specify in the request and a snapshot of the owner user's permissions at the +// time of the request. +// The snapshot of the owner's permissions is updated automatically on every +// call. +// +// IMPORTANT: If you don't specify `role_descriptors` in the request, a call to +// this API might still change an API key's access scope. This change can occur +// if the owner user's permissions have changed since the API key was created or +// last modified. +// +// A successful request returns a JSON structure that contains the IDs of all +// updated API keys, the IDs of API keys that already had the requested changes +// and did not require an update, and error details for any failed update. package bulkupdateapikeys import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -45,6 +76,10 @@ type BulkUpdateApiKeys struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -65,14 +100,43 @@ func NewBulkUpdateApiKeysFunc(tp elastictransport.Interface) NewBulkUpdateApiKey } } -// Updates the attributes of multiple existing API keys. +// Bulk update API keys. +// Update the attributes for multiple API keys. +// +// IMPORTANT: It is not possible to use an API key as the authentication +// credential for this API. To update API keys, the owner user's credentials are +// required. +// +// This API is similar to the update API key API but enables you to apply the +// same update to multiple API keys in one API call. This operation can greatly +// improve performance over making individual updates. +// +// It is not possible to update expired or invalidated API keys. +// +// This API supports updates to API key access scope, metadata and expiration. +// The access scope of each API key is derived from the `role_descriptors` you +// specify in the request and a snapshot of the owner user's permissions at the +// time of the request. +// The snapshot of the owner's permissions is updated automatically on every +// call. +// +// IMPORTANT: If you don't specify `role_descriptors` in the request, a call to +// this API might still change an API key's access scope. This change can occur +// if the owner user's permissions have changed since the API key was created or +// last modified. +// +// A successful request returns a JSON structure that contains the IDs of all +// updated API keys, the IDs of API keys that already had the requested changes +// and did not require an update, and error details for any failed update. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-update-api-keys.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-update-api-keys func New(tp elastictransport.Interface) *BulkUpdateApiKeys { r := &BulkUpdateApiKeys{ transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -84,6 +148,21 @@ func New(tp elastictransport.Interface) *BulkUpdateApiKeys { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *BulkUpdateApiKeys) Raw(raw io.Reader) *BulkUpdateApiKeys { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *BulkUpdateApiKeys) Request(req *Request) *BulkUpdateApiKeys { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *BulkUpdateApiKeys) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -93,6 +172,31 @@ func (r *BulkUpdateApiKeys) HttpRequest(ctx context.Context) (*http.Request, err var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for BulkUpdateApiKeys: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -182,13 +286,7 @@ func (r BulkUpdateApiKeys) Perform(providedCtx context.Context) (*http.Response, } // Do runs the request through the transport, handle the response and returns a bulkupdateapikeys.Response -func (r BulkUpdateApiKeys) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r BulkUpdateApiKeys) IsSuccess(providedCtx context.Context) (bool, error) { +func (r BulkUpdateApiKeys) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -199,30 +297,46 @@ func (r BulkUpdateApiKeys) IsSuccess(providedCtx context.Context) (bool, error) ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the BulkUpdateApiKeys query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode } - return false, nil + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the BulkUpdateApiKeys headers map. @@ -231,3 +345,133 @@ func (r *BulkUpdateApiKeys) Header(key, value string) *BulkUpdateApiKeys { return r } + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *BulkUpdateApiKeys) ErrorTrace(errortrace bool) *BulkUpdateApiKeys { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *BulkUpdateApiKeys) FilterPath(filterpaths ...string) *BulkUpdateApiKeys { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *BulkUpdateApiKeys) Human(human bool) *BulkUpdateApiKeys { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *BulkUpdateApiKeys) Pretty(pretty bool) *BulkUpdateApiKeys { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Expiration time for the API keys. +// By default, API keys never expire. +// This property can be omitted to leave the value unchanged. +// API name: expiration +func (r *BulkUpdateApiKeys) Expiration(duration types.DurationVariant) *BulkUpdateApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Expiration = *duration.DurationCaster() + + return r +} + +// The API key identifiers. +// API name: ids +func (r *BulkUpdateApiKeys) Ids(ids ...string) *BulkUpdateApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Ids = make([]string, len(ids)) + r.req.Ids = ids + + return r +} + +// Arbitrary nested metadata to associate with the API keys. +// Within the `metadata` object, top-level keys beginning with an underscore +// (`_`) are reserved for system usage. +// Any information specified with this parameter fully replaces metadata +// previously associated with the API key. +// API name: metadata +func (r *BulkUpdateApiKeys) Metadata(metadata types.MetadataVariant) *BulkUpdateApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} + +// The role descriptors to assign to the API keys. +// An API key's effective permissions are an intersection of its assigned +// privileges and the point-in-time snapshot of permissions of the owner user. +// You can assign new privileges by specifying them in this parameter. +// To remove assigned privileges, supply the `role_descriptors` parameter as an +// empty object `{}`. +// If an API key has no assigned privileges, it inherits the owner user's full +// permissions. +// The snapshot of the owner's permissions is always updated, whether you supply +// the `role_descriptors` parameter. +// The structure of a role descriptor is the same as the request for the create +// API keys API. +// API name: role_descriptors +func (r *BulkUpdateApiKeys) RoleDescriptors(roledescriptors map[string]types.RoleDescriptor) *BulkUpdateApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RoleDescriptors = roledescriptors + return r +} + +func (r *BulkUpdateApiKeys) AddRoleDescriptor(key string, value types.RoleDescriptorVariant) *BulkUpdateApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.RoleDescriptor + if r.req.RoleDescriptors == nil { + r.req.RoleDescriptors = make(map[string]types.RoleDescriptor) + } else { + tmp = r.req.RoleDescriptors + } + + tmp[key] = *value.RoleDescriptorCaster() + + r.req.RoleDescriptors = tmp + return r +} diff --git a/typedapi/security/bulkupdateapikeys/request.go b/typedapi/security/bulkupdateapikeys/request.go new file mode 100644 index 0000000000..528d5413f9 --- /dev/null +++ b/typedapi/security/bulkupdateapikeys/request.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package bulkupdateapikeys + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package bulkupdateapikeys +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/bulk_update_api_keys/SecurityBulkUpdateApiKeysRequest.ts#L26-L83 +type Request struct { + + // Expiration Expiration time for the API keys. + // By default, API keys never expire. + // This property can be omitted to leave the value unchanged. + Expiration types.Duration `json:"expiration,omitempty"` + // Ids The API key identifiers. + Ids []string `json:"ids"` + // Metadata Arbitrary nested metadata to associate with the API keys. + // Within the `metadata` object, top-level keys beginning with an underscore + // (`_`) are reserved for system usage. + // Any information specified with this parameter fully replaces metadata + // previously associated with the API key. + Metadata types.Metadata `json:"metadata,omitempty"` + // RoleDescriptors The role descriptors to assign to the API keys. + // An API key's effective permissions are an intersection of its assigned + // privileges and the point-in-time snapshot of permissions of the owner user. + // You can assign new privileges by specifying them in this parameter. + // To remove assigned privileges, supply the `role_descriptors` parameter as an + // empty object `{}`. + // If an API key has no assigned privileges, it inherits the owner user's full + // permissions. + // The snapshot of the owner's permissions is always updated, whether you supply + // the `role_descriptors` parameter. + // The structure of a role descriptor is the same as the request for the create + // API keys API. + RoleDescriptors map[string]types.RoleDescriptor `json:"role_descriptors,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + RoleDescriptors: make(map[string]types.RoleDescriptor, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Bulkupdateapikeys request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expiration": + if err := dec.Decode(&s.Expiration); err != nil { + return fmt.Errorf("%s | %w", "Expiration", err) + } + + case "ids": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + + s.Ids = append(s.Ids, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Ids); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "role_descriptors": + if s.RoleDescriptors == nil { + s.RoleDescriptors = make(map[string]types.RoleDescriptor, 0) + } + if err := dec.Decode(&s.RoleDescriptors); err != nil { + return fmt.Errorf("%s | %w", "RoleDescriptors", err) + } + + } + } + return nil +} diff --git a/typedapi/security/bulkupdateapikeys/response.go b/typedapi/security/bulkupdateapikeys/response.go new file mode 100644 index 0000000000..7423ae46ad --- /dev/null +++ b/typedapi/security/bulkupdateapikeys/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package bulkupdateapikeys + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package bulkupdateapikeys +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/bulk_update_api_keys/SecurityBulkUpdateApiKeysResponse.ts#L22-L28 +type Response struct { + Errors *types.BulkError `json:"errors,omitempty"` + Noops []string `json:"noops"` + Updated []string `json:"updated"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/security/changepassword/change_password.go b/typedapi/security/changepassword/change_password.go index 16cc18dd61..0f7d3005f4 100644 --- a/typedapi/security/changepassword/change_password.go +++ b/typedapi/security/changepassword/change_password.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Change passwords. // @@ -86,7 +86,7 @@ func NewChangePasswordFunc(tp elastictransport.Interface) NewChangePassword { // // Change the passwords of users in the native realm and built-in users. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-change-password.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-change-password func New(tp elastictransport.Interface) *ChangePassword { r := &ChangePassword{ transport: tp, @@ -94,8 +94,6 @@ func New(tp elastictransport.Interface) *ChangePassword { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -384,21 +382,30 @@ func (r *ChangePassword) Pretty(pretty bool) *ChangePassword { return r } -// Password The new password value. Passwords must be at least 6 characters long. +// The new password value. Passwords must be at least 6 characters long. // API name: password func (r *ChangePassword) Password(password string) *ChangePassword { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Password = &password return r } -// PasswordHash A hash of the new password value. This must be produced using the same +// A hash of the new password value. This must be produced using the same // hashing algorithm as has been configured for password storage. For more // details, // see the explanation of the `xpack.security.authc.password_hashing.algorithm` // setting. // API name: password_hash func (r *ChangePassword) PasswordHash(passwordhash string) *ChangePassword { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.PasswordHash = &passwordhash diff --git a/typedapi/security/changepassword/request.go b/typedapi/security/changepassword/request.go index abaa030bd3..fd51cff238 100644 --- a/typedapi/security/changepassword/request.go +++ b/typedapi/security/changepassword/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package changepassword @@ -31,7 +31,7 @@ import ( // Request holds the request body struct for the package changepassword // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/change_password/SecurityChangePasswordRequest.ts#L23-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/change_password/SecurityChangePasswordRequest.ts#L23-L65 type Request struct { // Password The new password value. Passwords must be at least 6 characters long. diff --git a/typedapi/security/changepassword/response.go b/typedapi/security/changepassword/response.go index 28d060b59e..8441ad1123 100644 --- a/typedapi/security/changepassword/response.go +++ b/typedapi/security/changepassword/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package changepassword // Response holds the response body struct for the package changepassword // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/change_password/SecurityChangePasswordResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/change_password/SecurityChangePasswordResponse.ts#L20-L22 type Response struct { } diff --git a/typedapi/security/clearapikeycache/clear_api_key_cache.go b/typedapi/security/clearapikeycache/clear_api_key_cache.go index b14fe2dbeb..cd3fdd0c39 100644 --- a/typedapi/security/clearapikeycache/clear_api_key_cache.go +++ b/typedapi/security/clearapikeycache/clear_api_key_cache.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Clear the API key cache. // @@ -86,7 +86,7 @@ func NewClearApiKeyCacheFunc(tp elastictransport.Interface) NewClearApiKeyCache // The cache is also automatically cleared on state changes of the security // index. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-api-key-cache.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-api-key-cache func New(tp elastictransport.Interface) *ClearApiKeyCache { r := &ClearApiKeyCache{ transport: tp, diff --git a/typedapi/security/clearapikeycache/response.go b/typedapi/security/clearapikeycache/response.go index 68c23e4e45..76af867cfe 100644 --- a/typedapi/security/clearapikeycache/response.go +++ b/typedapi/security/clearapikeycache/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package clearapikeycache @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearapikeycache // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/clear_api_key_cache/SecurityClearApiKeyCacheResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/clear_api_key_cache/SecurityClearApiKeyCacheResponse.ts#L25-L32 type Response struct { ClusterName string `json:"cluster_name"` NodeStats types.NodeStatistics `json:"_nodes"` diff --git a/typedapi/security/clearcachedprivileges/clear_cached_privileges.go b/typedapi/security/clearcachedprivileges/clear_cached_privileges.go index e0becc206d..b6395eccc0 100644 --- a/typedapi/security/clearcachedprivileges/clear_cached_privileges.go +++ b/typedapi/security/clearcachedprivileges/clear_cached_privileges.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Clear the privileges cache. // @@ -86,7 +86,7 @@ func NewClearCachedPrivilegesFunc(tp elastictransport.Interface) NewClearCachedP // The cache is also automatically cleared for applications that have their // privileges updated. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-privilege-cache.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-privileges func New(tp elastictransport.Interface) *ClearCachedPrivileges { r := &ClearCachedPrivileges{ transport: tp, @@ -300,7 +300,9 @@ func (r *ClearCachedPrivileges) Header(key, value string) *ClearCachedPrivileges return r } -// Application A comma-separated list of application names +// Application A comma-separated list of applications. +// To clear all applications, use an asterism (`*`). +// It does not support other wildcard patterns. // API Name: application func (r *ClearCachedPrivileges) _application(application string) *ClearCachedPrivileges { r.paramSet |= applicationMask diff --git a/typedapi/security/clearcachedprivileges/response.go b/typedapi/security/clearcachedprivileges/response.go index e22e938f3b..7296819140 100644 --- a/typedapi/security/clearcachedprivileges/response.go +++ b/typedapi/security/clearcachedprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package clearcachedprivileges @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearcachedprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/clear_cached_privileges/SecurityClearCachedPrivilegesResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/clear_cached_privileges/SecurityClearCachedPrivilegesResponse.ts#L25-L32 type Response struct { ClusterName string `json:"cluster_name"` NodeStats types.NodeStatistics `json:"_nodes"` diff --git a/typedapi/security/clearcachedrealms/clear_cached_realms.go b/typedapi/security/clearcachedrealms/clear_cached_realms.go index d6ff1b2e2c..8e382868b1 100644 --- a/typedapi/security/clearcachedrealms/clear_cached_realms.go +++ b/typedapi/security/clearcachedrealms/clear_cached_realms.go @@ -16,12 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Clear the user cache. // -// Evict users from the user cache. You can completely clear the cache or evict -// specific users. +// Evict users from the user cache. +// You can completely clear the cache or evict specific users. +// +// User credentials are cached in memory on each node to avoid connecting to a +// remote authentication service or hitting the disk for every incoming request. +// There are realm settings that you can use to configure the user cache. +// For more information, refer to the documentation about controlling the user +// cache. package clearcachedrealms import ( @@ -81,10 +87,16 @@ func NewClearCachedRealmsFunc(tp elastictransport.Interface) NewClearCachedRealm // Clear the user cache. // -// Evict users from the user cache. You can completely clear the cache or evict -// specific users. +// Evict users from the user cache. +// You can completely clear the cache or evict specific users. +// +// User credentials are cached in memory on each node to avoid connecting to a +// remote authentication service or hitting the disk for every incoming request. +// There are realm settings that you can use to configure the user cache. +// For more information, refer to the documentation about controlling the user +// cache. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-cache.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-realms func New(tp elastictransport.Interface) *ClearCachedRealms { r := &ClearCachedRealms{ transport: tp, @@ -298,7 +310,9 @@ func (r *ClearCachedRealms) Header(key, value string) *ClearCachedRealms { return r } -// Realms Comma-separated list of realms to clear +// Realms A comma-separated list of realms. +// To clear all realms, use an asterisk (`*`). +// It does not support other wildcard patterns. // API Name: realms func (r *ClearCachedRealms) _realms(realms string) *ClearCachedRealms { r.paramSet |= realmsMask @@ -307,7 +321,9 @@ func (r *ClearCachedRealms) _realms(realms string) *ClearCachedRealms { return r } -// Usernames Comma-separated list of usernames to clear from the cache +// Usernames A comma-separated list of the users to clear from the cache. +// If you do not specify this parameter, the API evicts all users from the user +// cache. // API name: usernames func (r *ClearCachedRealms) Usernames(usernames ...string) *ClearCachedRealms { tmp := []string{} diff --git a/typedapi/security/clearcachedrealms/response.go b/typedapi/security/clearcachedrealms/response.go index fb2cc44ee6..961026796f 100644 --- a/typedapi/security/clearcachedrealms/response.go +++ b/typedapi/security/clearcachedrealms/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package clearcachedrealms @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearcachedrealms // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/clear_cached_realms/SecurityClearCachedRealmsResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/clear_cached_realms/SecurityClearCachedRealmsResponse.ts#L25-L32 type Response struct { ClusterName string `json:"cluster_name"` NodeStats types.NodeStatistics `json:"_nodes"` diff --git a/typedapi/security/clearcachedroles/clear_cached_roles.go b/typedapi/security/clearcachedroles/clear_cached_roles.go index 91e1228aae..20e0411ceb 100644 --- a/typedapi/security/clearcachedroles/clear_cached_roles.go +++ b/typedapi/security/clearcachedroles/clear_cached_roles.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Clear the roles cache. // @@ -82,7 +82,7 @@ func NewClearCachedRolesFunc(tp elastictransport.Interface) NewClearCachedRoles // // Evict roles from the native role cache. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-role-cache.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-roles func New(tp elastictransport.Interface) *ClearCachedRoles { r := &ClearCachedRoles{ transport: tp, @@ -296,7 +296,9 @@ func (r *ClearCachedRoles) Header(key, value string) *ClearCachedRoles { return r } -// Name Role name +// Name A comma-separated list of roles to evict from the role cache. +// To evict all roles, use an asterisk (`*`). +// It does not support other wildcard patterns. // API Name: name func (r *ClearCachedRoles) _name(name string) *ClearCachedRoles { r.paramSet |= nameMask diff --git a/typedapi/security/clearcachedroles/response.go b/typedapi/security/clearcachedroles/response.go index 83b01b831e..cf8b032ab6 100644 --- a/typedapi/security/clearcachedroles/response.go +++ b/typedapi/security/clearcachedroles/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package clearcachedroles @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearcachedroles // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/clear_cached_roles/ClearCachedRolesResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/clear_cached_roles/ClearCachedRolesResponse.ts#L25-L32 type Response struct { ClusterName string `json:"cluster_name"` NodeStats types.NodeStatistics `json:"_nodes"` diff --git a/typedapi/security/clearcachedservicetokens/clear_cached_service_tokens.go b/typedapi/security/clearcachedservicetokens/clear_cached_service_tokens.go index 9148cfefc1..9279ee6e79 100644 --- a/typedapi/security/clearcachedservicetokens/clear_cached_service_tokens.go +++ b/typedapi/security/clearcachedservicetokens/clear_cached_service_tokens.go @@ -16,11 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Clear service account token caches. // // Evict a subset of all entries from the service account token caches. +// Two separate caches exist for service account tokens: one cache for tokens +// backed by the `service_tokens` file, and another for tokens backed by the +// `.security` index. +// This API clears matching entries from both caches. +// +// The cache for service account tokens backed by the `.security` index is +// cleared automatically on state changes of the security index. +// The cache for tokens backed by the `service_tokens` file is cleared +// automatically on file changes. package clearcachedservicetokens import ( @@ -91,8 +100,17 @@ func NewClearCachedServiceTokensFunc(tp elastictransport.Interface) NewClearCach // Clear service account token caches. // // Evict a subset of all entries from the service account token caches. +// Two separate caches exist for service account tokens: one cache for tokens +// backed by the `service_tokens` file, and another for tokens backed by the +// `.security` index. +// This API clears matching entries from both caches. +// +// The cache for service account tokens backed by the `.security` index is +// cleared automatically on state changes of the security index. +// The cache for tokens backed by the `service_tokens` file is cleared +// automatically on file changes. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-service-token-caches.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-service-tokens func New(tp elastictransport.Interface) *ClearCachedServiceTokens { r := &ClearCachedServiceTokens{ transport: tp, @@ -322,7 +340,7 @@ func (r *ClearCachedServiceTokens) Header(key, value string) *ClearCachedService return r } -// Namespace An identifier for the namespace +// Namespace The namespace, which is a top-level grouping of service accounts. // API Name: namespace func (r *ClearCachedServiceTokens) _namespace(namespace string) *ClearCachedServiceTokens { r.paramSet |= namespaceMask @@ -331,7 +349,7 @@ func (r *ClearCachedServiceTokens) _namespace(namespace string) *ClearCachedServ return r } -// Service An identifier for the service name +// Service The name of the service, which must be unique within its namespace. // API Name: service func (r *ClearCachedServiceTokens) _service(service string) *ClearCachedServiceTokens { r.paramSet |= serviceMask @@ -340,7 +358,10 @@ func (r *ClearCachedServiceTokens) _service(service string) *ClearCachedServiceT return r } -// Name A comma-separated list of service token names +// Name A comma-separated list of token names to evict from the service account token +// caches. +// Use a wildcard (`*`) to evict all tokens that belong to a service account. +// It does not support other wildcard patterns. // API Name: name func (r *ClearCachedServiceTokens) _name(name string) *ClearCachedServiceTokens { r.paramSet |= nameMask diff --git a/typedapi/security/clearcachedservicetokens/response.go b/typedapi/security/clearcachedservicetokens/response.go index cdc99ccd93..d60b0152ce 100644 --- a/typedapi/security/clearcachedservicetokens/response.go +++ b/typedapi/security/clearcachedservicetokens/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package clearcachedservicetokens @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearcachedservicetokens // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/clear_cached_service_tokens/ClearCachedServiceTokensResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/clear_cached_service_tokens/ClearCachedServiceTokensResponse.ts#L25-L32 type Response struct { ClusterName string `json:"cluster_name"` NodeStats types.NodeStatistics `json:"_nodes"` diff --git a/typedapi/security/createapikey/create_api_key.go b/typedapi/security/createapikey/create_api_key.go index d5eda2b277..d59ecec99b 100644 --- a/typedapi/security/createapikey/create_api_key.go +++ b/typedapi/security/createapikey/create_api_key.go @@ -16,17 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create an API key. // // Create an API key for access without requiring basic authentication. +// +// IMPORTANT: If the credential that is used to authenticate this request is an +// API key, the derived API key cannot have any privileges. +// If you specify privileges, the API returns an error. +// // A successful request returns a JSON structure that contains the API key, its // unique id, and its name. // If applicable, it also returns expiration information for the API key in // milliseconds. +// // NOTE: By default, API keys never expire. You can specify expiration // information when you create the API keys. +// +// The API keys are created by the Elasticsearch API key service, which is +// automatically enabled. +// To configure or turn off the API key service, refer to API key service +// setting documentation. package createapikey import ( @@ -85,14 +96,25 @@ func NewCreateApiKeyFunc(tp elastictransport.Interface) NewCreateApiKey { // Create an API key. // // Create an API key for access without requiring basic authentication. +// +// IMPORTANT: If the credential that is used to authenticate this request is an +// API key, the derived API key cannot have any privileges. +// If you specify privileges, the API returns an error. +// // A successful request returns a JSON structure that contains the API key, its // unique id, and its name. // If applicable, it also returns expiration information for the API key in // milliseconds. +// // NOTE: By default, API keys never expire. You can specify expiration // information when you create the API keys. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html +// The API keys are created by the Elasticsearch API key service, which is +// automatically enabled. +// To configure or turn off the API key service, refer to API key service +// setting documentation. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key func New(tp elastictransport.Interface) *CreateApiKey { r := &CreateApiKey{ transport: tp, @@ -100,8 +122,6 @@ func New(tp elastictransport.Interface) *CreateApiKey { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -363,44 +383,90 @@ func (r *CreateApiKey) Pretty(pretty bool) *CreateApiKey { return r } -// Expiration Expiration time for the API key. By default, API keys never expire. +// The expiration time for the API key. +// By default, API keys never expire. // API name: expiration -func (r *CreateApiKey) Expiration(duration types.Duration) *CreateApiKey { - r.req.Expiration = duration +func (r *CreateApiKey) Expiration(duration types.DurationVariant) *CreateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Expiration = *duration.DurationCaster() return r } -// Metadata Arbitrary metadata that you want to associate with the API key. It supports +// Arbitrary metadata that you want to associate with the API key. It supports // nested data structure. Within the metadata object, keys beginning with `_` // are reserved for system usage. // API name: metadata -func (r *CreateApiKey) Metadata(metadata types.Metadata) *CreateApiKey { - r.req.Metadata = metadata +func (r *CreateApiKey) Metadata(metadata types.MetadataVariant) *CreateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() return r } -// Name Specifies the name for this API key. +// A name for the API key. // API name: name func (r *CreateApiKey) Name(name string) *CreateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Name = &name return r } -// RoleDescriptors An array of role descriptors for this API key. This parameter is optional. -// When it is not specified or is an empty array, then the API key will have a -// point in time snapshot of permissions of the authenticated user. If you -// supply role descriptors then the resultant permissions would be an -// intersection of API keys permissions and authenticated user’s permissions -// thereby limiting the access scope for API keys. The structure of role -// descriptor is the same as the request for create role API. For more details, -// see create or update roles API. +// An array of role descriptors for this API key. +// When it is not specified or it is an empty array, the API key will have a +// point in time snapshot of permissions of the authenticated user. +// If you supply role descriptors, the resultant permissions are an intersection +// of API keys permissions and the authenticated user's permissions thereby +// limiting the access scope for API keys. +// The structure of role descriptor is the same as the request for the create +// role API. +// For more details, refer to the create or update roles API. +// +// NOTE: Due to the way in which this permission intersection is calculated, it +// is not possible to create an API key that is a child of another API key, +// unless the derived key is created without any privileges. +// In this case, you must explicitly specify a role descriptor with no +// privileges. +// The derived API key can be used for authentication; it will not have +// authority to call Elasticsearch APIs. // API name: role_descriptors func (r *CreateApiKey) RoleDescriptors(roledescriptors map[string]types.RoleDescriptor) *CreateApiKey { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RoleDescriptors = roledescriptors + return r +} + +func (r *CreateApiKey) AddRoleDescriptor(key string, value types.RoleDescriptorVariant) *CreateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.RoleDescriptor + if r.req.RoleDescriptors == nil { + r.req.RoleDescriptors = make(map[string]types.RoleDescriptor) + } else { + tmp = r.req.RoleDescriptors + } + + tmp[key] = *value.RoleDescriptorCaster() + r.req.RoleDescriptors = tmp return r } diff --git a/typedapi/security/createapikey/request.go b/typedapi/security/createapikey/request.go index 0c0e6f144e..72d648849e 100644 --- a/typedapi/security/createapikey/request.go +++ b/typedapi/security/createapikey/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package createapikey @@ -32,25 +32,35 @@ import ( // Request holds the request body struct for the package createapikey // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/create_api_key/SecurityCreateApiKeyRequest.ts#L26-L60 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/create_api_key/SecurityCreateApiKeyRequest.ts#L26-L86 type Request struct { - // Expiration Expiration time for the API key. By default, API keys never expire. + // Expiration The expiration time for the API key. + // By default, API keys never expire. Expiration types.Duration `json:"expiration,omitempty"` // Metadata Arbitrary metadata that you want to associate with the API key. It supports // nested data structure. Within the metadata object, keys beginning with `_` // are reserved for system usage. Metadata types.Metadata `json:"metadata,omitempty"` - // Name Specifies the name for this API key. + // Name A name for the API key. Name *string `json:"name,omitempty"` - // RoleDescriptors An array of role descriptors for this API key. This parameter is optional. - // When it is not specified or is an empty array, then the API key will have a - // point in time snapshot of permissions of the authenticated user. If you - // supply role descriptors then the resultant permissions would be an - // intersection of API keys permissions and authenticated user’s permissions - // thereby limiting the access scope for API keys. The structure of role - // descriptor is the same as the request for create role API. For more details, - // see create or update roles API. + // RoleDescriptors An array of role descriptors for this API key. + // When it is not specified or it is an empty array, the API key will have a + // point in time snapshot of permissions of the authenticated user. + // If you supply role descriptors, the resultant permissions are an intersection + // of API keys permissions and the authenticated user's permissions thereby + // limiting the access scope for API keys. + // The structure of role descriptor is the same as the request for the create + // role API. + // For more details, refer to the create or update roles API. + // + // NOTE: Due to the way in which this permission intersection is calculated, it + // is not possible to create an API key that is a child of another API key, + // unless the derived key is created without any privileges. + // In this case, you must explicitly specify a role descriptor with no + // privileges. + // The derived API key can be used for authentication; it will not have + // authority to call Elasticsearch APIs. RoleDescriptors map[string]types.RoleDescriptor `json:"role_descriptors,omitempty"` } diff --git a/typedapi/security/createapikey/response.go b/typedapi/security/createapikey/response.go index 51236077ba..f9dda525bc 100644 --- a/typedapi/security/createapikey/response.go +++ b/typedapi/security/createapikey/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package createapikey // Response holds the response body struct for the package createapikey // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/create_api_key/SecurityCreateApiKeyResponse.ts#L23-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/create_api_key/SecurityCreateApiKeyResponse.ts#L23-L50 type Response struct { // ApiKey Generated API key. diff --git a/typedapi/security/createcrossclusterapikey/create_cross_cluster_api_key.go b/typedapi/security/createcrossclusterapikey/create_cross_cluster_api_key.go index 026f7335bb..d5c9fa69e1 100644 --- a/typedapi/security/createcrossclusterapikey/create_cross_cluster_api_key.go +++ b/typedapi/security/createcrossclusterapikey/create_cross_cluster_api_key.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create a cross-cluster API key. // @@ -131,7 +131,7 @@ func NewCreateCrossClusterApiKeyFunc(tp elastictransport.Interface) NewCreateCro // Attempting to update them with the update REST API key API or the bulk update // REST API keys API will result in an error. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-cross-cluster-api-key.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-cross-cluster-api-key func New(tp elastictransport.Interface) *CreateCrossClusterApiKey { r := &CreateCrossClusterApiKey{ transport: tp, @@ -139,8 +139,6 @@ func New(tp elastictransport.Interface) *CreateCrossClusterApiKey { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -394,7 +392,7 @@ func (r *CreateCrossClusterApiKey) Pretty(pretty bool) *CreateCrossClusterApiKey return r } -// Access The access to be granted to this API key. +// The access to be granted to this API key. // The access is composed of permissions for cross-cluster search and // cross-cluster replication. // At least one of them must be specified. @@ -404,36 +402,55 @@ func (r *CreateCrossClusterApiKey) Pretty(pretty bool) *CreateCrossClusterApiKey // The creation process automatically converts the access specification to a // role descriptor which has relevant privileges assigned accordingly. // API name: access -func (r *CreateCrossClusterApiKey) Access(access *types.Access) *CreateCrossClusterApiKey { +func (r *CreateCrossClusterApiKey) Access(access types.AccessVariant) *CreateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Access = *access + r.req.Access = *access.AccessCaster() return r } -// Expiration Expiration time for the API key. +// Expiration time for the API key. // By default, API keys never expire. // API name: expiration -func (r *CreateCrossClusterApiKey) Expiration(duration types.Duration) *CreateCrossClusterApiKey { - r.req.Expiration = duration +func (r *CreateCrossClusterApiKey) Expiration(duration types.DurationVariant) *CreateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Expiration = *duration.DurationCaster() return r } -// Metadata Arbitrary metadata that you want to associate with the API key. +// Arbitrary metadata that you want to associate with the API key. // It supports nested data structure. // Within the metadata object, keys beginning with `_` are reserved for system // usage. // API name: metadata -func (r *CreateCrossClusterApiKey) Metadata(metadata types.Metadata) *CreateCrossClusterApiKey { - r.req.Metadata = metadata +func (r *CreateCrossClusterApiKey) Metadata(metadata types.MetadataVariant) *CreateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() return r } -// Name Specifies the name for this API key. +// Specifies the name for this API key. // API name: name func (r *CreateCrossClusterApiKey) Name(name string) *CreateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Name = name return r diff --git a/typedapi/security/createcrossclusterapikey/request.go b/typedapi/security/createcrossclusterapikey/request.go index 978d6f7320..e6a459275e 100644 --- a/typedapi/security/createcrossclusterapikey/request.go +++ b/typedapi/security/createcrossclusterapikey/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package createcrossclusterapikey @@ -32,7 +32,7 @@ import ( // Request holds the request body struct for the package createcrossclusterapikey // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/create_cross_cluster_api_key/CreateCrossClusterApiKeyRequest.ts#L25-L73 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/create_cross_cluster_api_key/CreateCrossClusterApiKeyRequest.ts#L25-L80 type Request struct { // Access The access to be granted to this API key. diff --git a/typedapi/security/createcrossclusterapikey/response.go b/typedapi/security/createcrossclusterapikey/response.go index 197196cc67..cb6195f0c6 100644 --- a/typedapi/security/createcrossclusterapikey/response.go +++ b/typedapi/security/createcrossclusterapikey/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package createcrossclusterapikey // Response holds the response body struct for the package createcrossclusterapikey // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/create_cross_cluster_api_key/CreateCrossClusterApiKeyResponse.ts#L23-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/create_cross_cluster_api_key/CreateCrossClusterApiKeyResponse.ts#L23-L48 type Response struct { // ApiKey Generated API key. diff --git a/typedapi/security/createservicetoken/create_service_token.go b/typedapi/security/createservicetoken/create_service_token.go index 48aba065a1..0b55031e38 100644 --- a/typedapi/security/createservicetoken/create_service_token.go +++ b/typedapi/security/createservicetoken/create_service_token.go @@ -16,12 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create a service account token. // // Create a service accounts token for access without requiring basic // authentication. +// +// NOTE: Service account tokens never expire. +// You must actively delete them if they are no longer needed. package createservicetoken import ( @@ -93,7 +96,10 @@ func NewCreateServiceTokenFunc(tp elastictransport.Interface) NewCreateServiceTo // Create a service accounts token for access without requiring basic // authentication. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-service-token.html +// NOTE: Service account tokens never expire. +// You must actively delete them if they are no longer needed. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token func New(tp elastictransport.Interface) *CreateServiceToken { r := &CreateServiceToken{ transport: tp, @@ -344,7 +350,7 @@ func (r *CreateServiceToken) Header(key, value string) *CreateServiceToken { return r } -// Namespace An identifier for the namespace +// Namespace The name of the namespace, which is a top-level grouping of service accounts. // API Name: namespace func (r *CreateServiceToken) _namespace(namespace string) *CreateServiceToken { r.paramSet |= namespaceMask @@ -353,7 +359,7 @@ func (r *CreateServiceToken) _namespace(namespace string) *CreateServiceToken { return r } -// Service An identifier for the service name +// Service The name of the service. // API Name: service func (r *CreateServiceToken) _service(service string) *CreateServiceToken { r.paramSet |= serviceMask @@ -362,7 +368,18 @@ func (r *CreateServiceToken) _service(service string) *CreateServiceToken { return r } -// Name An identifier for the token name +// Name The name for the service account token. +// If omitted, a random name will be generated. +// +// Token names must be at least one and no more than 256 characters. +// They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and +// underscores (`_`), but cannot begin with an underscore. +// +// NOTE: Token names must be unique in the context of the associated service +// account. +// They must also be globally unique with their fully qualified names, which are +// comprised of the service account principal and token name, such as +// `//`. // API Name: name func (r *CreateServiceToken) Name(name string) *CreateServiceToken { r.paramSet |= nameMask diff --git a/typedapi/security/createservicetoken/response.go b/typedapi/security/createservicetoken/response.go index 108c06c2b0..b1230ca999 100644 --- a/typedapi/security/createservicetoken/response.go +++ b/typedapi/security/createservicetoken/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package createservicetoken @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package createservicetoken // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/create_service_token/CreateServiceTokenResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/create_service_token/CreateServiceTokenResponse.ts#L22-L30 type Response struct { Created bool `json:"created"` Token types.ServiceToken `json:"token"` diff --git a/typedapi/security/delegatepki/delegate_pki.go b/typedapi/security/delegatepki/delegate_pki.go new file mode 100644 index 0000000000..f7b8d37be5 --- /dev/null +++ b/typedapi/security/delegatepki/delegate_pki.go @@ -0,0 +1,391 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Delegate PKI authentication. +// +// This API implements the exchange of an X509Certificate chain for an +// Elasticsearch access token. +// The certificate chain is validated, according to RFC 5280, by sequentially +// considering the trust configuration of every installed PKI realm that has +// `delegation.enabled` set to `true`. +// A successfully trusted client certificate is also subject to the validation +// of the subject distinguished name according to thw `username_pattern` of the +// respective realm. +// +// This API is called by smart and trusted proxies, such as Kibana, which +// terminate the user's TLS session but still want to authenticate the user by +// using a PKI realm—-​as if the user connected directly to Elasticsearch. +// +// IMPORTANT: The association between the subject public key in the target +// certificate and the corresponding private key is not validated. +// This is part of the TLS authentication process and it is delegated to the +// proxy that calls this API. +// The proxy is trusted to have performed the TLS authentication and this API +// translates that authentication into an Elasticsearch access token. +package delegatepki + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DelegatePki struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDelegatePki type alias for index. +type NewDelegatePki func() *DelegatePki + +// NewDelegatePkiFunc returns a new instance of DelegatePki with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDelegatePkiFunc(tp elastictransport.Interface) NewDelegatePki { + return func() *DelegatePki { + n := New(tp) + + return n + } +} + +// Delegate PKI authentication. +// +// This API implements the exchange of an X509Certificate chain for an +// Elasticsearch access token. +// The certificate chain is validated, according to RFC 5280, by sequentially +// considering the trust configuration of every installed PKI realm that has +// `delegation.enabled` set to `true`. +// A successfully trusted client certificate is also subject to the validation +// of the subject distinguished name according to thw `username_pattern` of the +// respective realm. +// +// This API is called by smart and trusted proxies, such as Kibana, which +// terminate the user's TLS session but still want to authenticate the user by +// using a PKI realm—-​as if the user connected directly to Elasticsearch. +// +// IMPORTANT: The association between the subject public key in the target +// certificate and the corresponding private key is not validated. +// This is part of the TLS authentication process and it is delegated to the +// proxy that calls this API. +// The proxy is trusted to have performed the TLS authentication and this API +// translates that authentication into an Elasticsearch access token. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delegate-pki +func New(tp elastictransport.Interface) *DelegatePki { + r := &DelegatePki{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *DelegatePki) Raw(raw io.Reader) *DelegatePki { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *DelegatePki) Request(req *Request) *DelegatePki { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DelegatePki) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for DelegatePki: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("delegate_pki") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DelegatePki) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.delegate_pki") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.delegate_pki") + if reader := instrument.RecordRequestBody(ctx, "security.delegate_pki", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.delegate_pki") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DelegatePki query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a delegatepki.Response +func (r DelegatePki) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delegate_pki") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the DelegatePki headers map. +func (r *DelegatePki) Header(key, value string) *DelegatePki { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DelegatePki) ErrorTrace(errortrace bool) *DelegatePki { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DelegatePki) FilterPath(filterpaths ...string) *DelegatePki { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DelegatePki) Human(human bool) *DelegatePki { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DelegatePki) Pretty(pretty bool) *DelegatePki { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The X509Certificate chain, which is represented as an ordered string array. +// Each string in the array is a base64-encoded (Section 4 of RFC4648 - not +// base64url-encoded) of the certificate's DER encoding. +// +// The first element is the target certificate that contains the subject +// distinguished name that is requesting access. +// This may be followed by additional certificates; each subsequent certificate +// is used to certify the previous one. +// API name: x509_certificate_chain +func (r *DelegatePki) X509CertificateChain(x509certificatechains ...string) *DelegatePki { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range x509certificatechains { + + r.req.X509CertificateChain = append(r.req.X509CertificateChain, v) + + } + return r +} diff --git a/typedapi/security/delegatepki/request.go b/typedapi/security/delegatepki/request.go new file mode 100644 index 0000000000..00141848fd --- /dev/null +++ b/typedapi/security/delegatepki/request.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package delegatepki + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package delegatepki +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/delegate_pki/SecurityDelegatePkiRequest.ts#L22-L57 +type Request struct { + + // X509CertificateChain The X509Certificate chain, which is represented as an ordered string array. + // Each string in the array is a base64-encoded (Section 4 of RFC4648 - not + // base64url-encoded) of the certificate's DER encoding. + // + // The first element is the target certificate that contains the subject + // distinguished name that is requesting access. + // This may be followed by additional certificates; each subsequent certificate + // is used to certify the previous one. + X509CertificateChain []string `json:"x509_certificate_chain"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Delegatepki request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/security/delegatepki/response.go b/typedapi/security/delegatepki/response.go new file mode 100644 index 0000000000..e81715023c --- /dev/null +++ b/typedapi/security/delegatepki/response.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package delegatepki + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package delegatepki +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/delegate_pki/SecurityDelegatePkiResponse.ts#L24-L41 +type Response struct { + + // AccessToken An access token associated with the subject distinguished name of the + // client's certificate. + AccessToken string `json:"access_token"` + Authentication *types.Authentication `json:"authentication,omitempty"` + // ExpiresIn The amount of time (in seconds) before the token expires. + ExpiresIn int64 `json:"expires_in"` + // Type The type of token. + Type string `json:"type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/security/deleteprivileges/delete_privileges.go b/typedapi/security/deleteprivileges/delete_privileges.go index e804102136..35f49f982b 100644 --- a/typedapi/security/deleteprivileges/delete_privileges.go +++ b/typedapi/security/deleteprivileges/delete_privileges.go @@ -16,9 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_security` cluster privilege (or a greater privilege such as +// `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. package deleteprivileges import ( @@ -84,7 +91,14 @@ func NewDeletePrivilegesFunc(tp elastictransport.Interface) NewDeletePrivileges // Delete application privileges. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-privilege.html +// To use this API, you must have one of the following privileges: +// +// * The `manage_security` cluster privilege (or a greater privilege such as +// `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-privileges func New(tp elastictransport.Interface) *DeletePrivileges { r := &DeletePrivileges{ transport: tp, @@ -302,7 +316,8 @@ func (r *DeletePrivileges) Header(key, value string) *DeletePrivileges { return r } -// Application Application name +// Application The name of the application. +// Application privileges are always associated with exactly one application. // API Name: application func (r *DeletePrivileges) _application(application string) *DeletePrivileges { r.paramSet |= applicationMask @@ -311,7 +326,7 @@ func (r *DeletePrivileges) _application(application string) *DeletePrivileges { return r } -// Name Privilege name +// Name The name of the privilege. // API Name: name func (r *DeletePrivileges) _name(name string) *DeletePrivileges { r.paramSet |= nameMask diff --git a/typedapi/security/deleteprivileges/response.go b/typedapi/security/deleteprivileges/response.go index 5f3b5918aa..770957045d 100644 --- a/typedapi/security/deleteprivileges/response.go +++ b/typedapi/security/deleteprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deleteprivileges @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package deleteprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/delete_privileges/SecurityDeletePrivilegesResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/delete_privileges/SecurityDeletePrivilegesResponse.ts#L23-L26 type Response map[string]map[string]types.FoundStatus diff --git a/typedapi/security/deleterole/delete_role.go b/typedapi/security/deleterole/delete_role.go index 0005fc942d..19985045b2 100644 --- a/typedapi/security/deleterole/delete_role.go +++ b/typedapi/security/deleterole/delete_role.go @@ -16,11 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete roles. // // Delete roles in the native realm. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The delete roles API cannot remove roles that are defined in roles files. package deleterole import ( @@ -82,8 +85,11 @@ func NewDeleteRoleFunc(tp elastictransport.Interface) NewDeleteRole { // Delete roles. // // Delete roles in the native realm. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The delete roles API cannot remove roles that are defined in roles files. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role func New(tp elastictransport.Interface) *DeleteRole { r := &DeleteRole{ transport: tp, @@ -295,7 +301,7 @@ func (r *DeleteRole) Header(key, value string) *DeleteRole { return r } -// Name Role name +// Name The name of the role. // API Name: name func (r *DeleteRole) _name(name string) *DeleteRole { r.paramSet |= nameMask diff --git a/typedapi/security/deleterole/response.go b/typedapi/security/deleterole/response.go index 8c807eb696..13c5d7c317 100644 --- a/typedapi/security/deleterole/response.go +++ b/typedapi/security/deleterole/response.go @@ -16,14 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deleterole // Response holds the response body struct for the package deleterole // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/delete_role/SecurityDeleteRoleResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/delete_role/SecurityDeleteRoleResponse.ts#L20-L28 type Response struct { + + // Found If the role is successfully deleted, `found` is `true`. + // Otherwise, `found` is `false`. Found bool `json:"found"` } diff --git a/typedapi/security/deleterolemapping/delete_role_mapping.go b/typedapi/security/deleterolemapping/delete_role_mapping.go index a8d6f4230e..cd9d460474 100644 --- a/typedapi/security/deleterolemapping/delete_role_mapping.go +++ b/typedapi/security/deleterolemapping/delete_role_mapping.go @@ -16,9 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete role mappings. +// +// Role mappings define which roles are assigned to each user. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. +// The delete role mappings API cannot remove role mappings that are defined in +// role mapping files. package deleterolemapping import ( @@ -79,7 +85,13 @@ func NewDeleteRoleMappingFunc(tp elastictransport.Interface) NewDeleteRoleMappin // Delete role mappings. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role-mapping.html +// Role mappings define which roles are assigned to each user. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. +// The delete role mappings API cannot remove role mappings that are defined in +// role mapping files. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping func New(tp elastictransport.Interface) *DeleteRoleMapping { r := &DeleteRoleMapping{ transport: tp, @@ -291,7 +303,9 @@ func (r *DeleteRoleMapping) Header(key, value string) *DeleteRoleMapping { return r } -// Name Role-mapping name +// Name The distinct name that identifies the role mapping. +// The name is used solely as an identifier to facilitate interaction via the +// API; it does not affect the behavior of the mapping in any way. // API Name: name func (r *DeleteRoleMapping) _name(name string) *DeleteRoleMapping { r.paramSet |= nameMask diff --git a/typedapi/security/deleterolemapping/response.go b/typedapi/security/deleterolemapping/response.go index 15d66f6f9a..f630f4bb9f 100644 --- a/typedapi/security/deleterolemapping/response.go +++ b/typedapi/security/deleterolemapping/response.go @@ -16,14 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deleterolemapping // Response holds the response body struct for the package deleterolemapping // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/delete_role_mapping/SecurityDeleteRoleMappingResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/delete_role_mapping/SecurityDeleteRoleMappingResponse.ts#L20-L28 type Response struct { + + // Found If the mapping is successfully deleted, `found` is `true`. + // Otherwise, `found` is `false`. Found bool `json:"found"` } diff --git a/typedapi/security/deleteservicetoken/delete_service_token.go b/typedapi/security/deleteservicetoken/delete_service_token.go index fa9d8033d2..cc612af863 100644 --- a/typedapi/security/deleteservicetoken/delete_service_token.go +++ b/typedapi/security/deleteservicetoken/delete_service_token.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete service account tokens. // @@ -93,7 +93,7 @@ func NewDeleteServiceTokenFunc(tp elastictransport.Interface) NewDeleteServiceTo // // Delete service account tokens for a service in a specified namespace. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-service-token.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-service-token func New(tp elastictransport.Interface) *DeleteServiceToken { r := &DeleteServiceToken{ transport: tp, @@ -321,7 +321,7 @@ func (r *DeleteServiceToken) Header(key, value string) *DeleteServiceToken { return r } -// Namespace An identifier for the namespace +// Namespace The namespace, which is a top-level grouping of service accounts. // API Name: namespace func (r *DeleteServiceToken) _namespace(namespace string) *DeleteServiceToken { r.paramSet |= namespaceMask @@ -330,7 +330,7 @@ func (r *DeleteServiceToken) _namespace(namespace string) *DeleteServiceToken { return r } -// Service An identifier for the service name +// Service The service name. // API Name: service func (r *DeleteServiceToken) _service(service string) *DeleteServiceToken { r.paramSet |= serviceMask @@ -339,7 +339,7 @@ func (r *DeleteServiceToken) _service(service string) *DeleteServiceToken { return r } -// Name An identifier for the token name +// Name The name of the service account token. // API Name: name func (r *DeleteServiceToken) _name(name string) *DeleteServiceToken { r.paramSet |= nameMask diff --git a/typedapi/security/deleteservicetoken/response.go b/typedapi/security/deleteservicetoken/response.go index 25bede4ffc..d09a93b7a6 100644 --- a/typedapi/security/deleteservicetoken/response.go +++ b/typedapi/security/deleteservicetoken/response.go @@ -16,14 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deleteservicetoken // Response holds the response body struct for the package deleteservicetoken // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/delete_service_token/DeleteServiceTokenResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/delete_service_token/DeleteServiceTokenResponse.ts#L20-L28 type Response struct { + + // Found If the service account token is successfully deleted, the request returns + // `{"found": true}`. + // Otherwise, the response will have status code 404 and `found` is set to + // `false`. Found bool `json:"found"` } diff --git a/typedapi/security/deleteuser/delete_user.go b/typedapi/security/deleteuser/delete_user.go index 2673c1c834..59e516ff7f 100644 --- a/typedapi/security/deleteuser/delete_user.go +++ b/typedapi/security/deleteuser/delete_user.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete users. // @@ -83,7 +83,7 @@ func NewDeleteUserFunc(tp elastictransport.Interface) NewDeleteUser { // // Delete users from the native realm. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user func New(tp elastictransport.Interface) *DeleteUser { r := &DeleteUser{ transport: tp, @@ -295,7 +295,7 @@ func (r *DeleteUser) Header(key, value string) *DeleteUser { return r } -// Username username +// Username An identifier for the user. // API Name: username func (r *DeleteUser) _username(username string) *DeleteUser { r.paramSet |= usernameMask diff --git a/typedapi/security/deleteuser/response.go b/typedapi/security/deleteuser/response.go index 507a4a4482..3ffcfa4a4c 100644 --- a/typedapi/security/deleteuser/response.go +++ b/typedapi/security/deleteuser/response.go @@ -16,14 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deleteuser // Response holds the response body struct for the package deleteuser // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/delete_user/SecurityDeleteUserResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/delete_user/SecurityDeleteUserResponse.ts#L20-L28 type Response struct { + + // Found If the user is successfully deleted, the request returns `{"found": true}`. + // Otherwise, `found` is set to `false`. Found bool `json:"found"` } diff --git a/typedapi/security/disableuser/disable_user.go b/typedapi/security/disableuser/disable_user.go index 9073b19412..9bf1407f8a 100644 --- a/typedapi/security/disableuser/disable_user.go +++ b/typedapi/security/disableuser/disable_user.go @@ -16,11 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Disable users. // // Disable users in the native realm. +// By default, when you create users, they are enabled. +// You can use this API to revoke a user's access to Elasticsearch. package disableuser import ( @@ -82,8 +84,10 @@ func NewDisableUserFunc(tp elastictransport.Interface) NewDisableUser { // Disable users. // // Disable users in the native realm. +// By default, when you create users, they are enabled. +// You can use this API to revoke a user's access to Elasticsearch. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user func New(tp elastictransport.Interface) *DisableUser { r := &DisableUser{ transport: tp, @@ -297,7 +301,7 @@ func (r *DisableUser) Header(key, value string) *DisableUser { return r } -// Username The username of the user to disable +// Username An identifier for the user. // API Name: username func (r *DisableUser) _username(username string) *DisableUser { r.paramSet |= usernameMask diff --git a/typedapi/security/disableuser/response.go b/typedapi/security/disableuser/response.go index 1e4b7c5684..6d3d689f9f 100644 --- a/typedapi/security/disableuser/response.go +++ b/typedapi/security/disableuser/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package disableuser // Response holds the response body struct for the package disableuser // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/disable_user/SecurityDisableUserResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/disable_user/SecurityDisableUserResponse.ts#L20-L22 type Response struct { } diff --git a/typedapi/security/disableuserprofile/disable_user_profile.go b/typedapi/security/disableuserprofile/disable_user_profile.go index 16dd59a273..3d7819f813 100644 --- a/typedapi/security/disableuserprofile/disable_user_profile.go +++ b/typedapi/security/disableuserprofile/disable_user_profile.go @@ -16,11 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Disable a user profile. // // Disable user profiles so that they are not visible in user profile searches. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// When you activate a user profile, its automatically enabled and visible in +// user profile searches. You can use the disable user profile API to disable a +// user profile so it’s not visible in these searches. +// To re-enable a disabled user profile, use the enable user profile API . package disableuserprofile import ( @@ -83,7 +94,18 @@ func NewDisableUserProfileFunc(tp elastictransport.Interface) NewDisableUserProf // // Disable user profiles so that they are not visible in user profile searches. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user-profile.html +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// When you activate a user profile, its automatically enabled and visible in +// user profile searches. You can use the disable user profile API to disable a +// user profile so it’s not visible in these searches. +// To re-enable a disabled user profile, use the enable user profile API . +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user-profile func New(tp elastictransport.Interface) *DisableUserProfile { r := &DisableUserProfile{ transport: tp, @@ -307,9 +329,10 @@ func (r *DisableUserProfile) _uid(uid string) *DisableUserProfile { } // Refresh If 'true', Elasticsearch refreshes the affected shards to make this operation -// visible to search, if 'wait_for' then wait for a refresh to make this -// operation -// visible to search, if 'false' do nothing with refreshes. +// visible to search. +// If 'wait_for', it waits for a refresh to make this operation visible to +// search. +// If 'false', it does nothing with refreshes. // API name: refresh func (r *DisableUserProfile) Refresh(refresh refresh.Refresh) *DisableUserProfile { r.values.Set("refresh", refresh.String()) diff --git a/typedapi/security/disableuserprofile/response.go b/typedapi/security/disableuserprofile/response.go index 0558898fed..3fd8c27f74 100644 --- a/typedapi/security/disableuserprofile/response.go +++ b/typedapi/security/disableuserprofile/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package disableuserprofile // Response holds the response body struct for the package disableuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/disable_user_profile/Response.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/disable_user_profile/Response.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/security/enableuser/enable_user.go b/typedapi/security/enableuser/enable_user.go index 269491b6a0..c0ebae3262 100644 --- a/typedapi/security/enableuser/enable_user.go +++ b/typedapi/security/enableuser/enable_user.go @@ -16,11 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Enable users. // // Enable users in the native realm. +// By default, when you create users, they are enabled. package enableuser import ( @@ -82,8 +83,9 @@ func NewEnableUserFunc(tp elastictransport.Interface) NewEnableUser { // Enable users. // // Enable users in the native realm. +// By default, when you create users, they are enabled. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user func New(tp elastictransport.Interface) *EnableUser { r := &EnableUser{ transport: tp, @@ -297,7 +299,7 @@ func (r *EnableUser) Header(key, value string) *EnableUser { return r } -// Username The username of the user to enable +// Username An identifier for the user. // API Name: username func (r *EnableUser) _username(username string) *EnableUser { r.paramSet |= usernameMask diff --git a/typedapi/security/enableuser/response.go b/typedapi/security/enableuser/response.go index c7e99226c2..339c4f8664 100644 --- a/typedapi/security/enableuser/response.go +++ b/typedapi/security/enableuser/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package enableuser // Response holds the response body struct for the package enableuser // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/enable_user/SecurityEnableUserResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/enable_user/SecurityEnableUserResponse.ts#L20-L22 type Response struct { } diff --git a/typedapi/security/enableuserprofile/enable_user_profile.go b/typedapi/security/enableuserprofile/enable_user_profile.go index f6e9821ef8..831ac9385c 100644 --- a/typedapi/security/enableuserprofile/enable_user_profile.go +++ b/typedapi/security/enableuserprofile/enable_user_profile.go @@ -16,11 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Enable a user profile. // // Enable user profiles to make them visible in user profile searches. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// When you activate a user profile, it's automatically enabled and visible in +// user profile searches. +// If you later disable the user profile, you can use the enable user profile +// API to make the profile visible in these searches again. package enableuserprofile import ( @@ -83,7 +94,18 @@ func NewEnableUserProfileFunc(tp elastictransport.Interface) NewEnableUserProfil // // Enable user profiles to make them visible in user profile searches. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user-profile.html +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// When you activate a user profile, it's automatically enabled and visible in +// user profile searches. +// If you later disable the user profile, you can use the enable user profile +// API to make the profile visible in these searches again. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user-profile func New(tp elastictransport.Interface) *EnableUserProfile { r := &EnableUserProfile{ transport: tp, @@ -297,7 +319,7 @@ func (r *EnableUserProfile) Header(key, value string) *EnableUserProfile { return r } -// Uid Unique identifier for the user profile. +// Uid A unique identifier for the user profile. // API Name: uid func (r *EnableUserProfile) _uid(uid string) *EnableUserProfile { r.paramSet |= uidMask @@ -307,9 +329,10 @@ func (r *EnableUserProfile) _uid(uid string) *EnableUserProfile { } // Refresh If 'true', Elasticsearch refreshes the affected shards to make this operation -// visible to search, if 'wait_for' then wait for a refresh to make this -// operation -// visible to search, if 'false' do nothing with refreshes. +// visible to search. +// If 'wait_for', it waits for a refresh to make this operation visible to +// search. +// If 'false', nothing is done with refreshes. // API name: refresh func (r *EnableUserProfile) Refresh(refresh refresh.Refresh) *EnableUserProfile { r.values.Set("refresh", refresh.String()) diff --git a/typedapi/security/enableuserprofile/response.go b/typedapi/security/enableuserprofile/response.go index f83a871621..2c27912dee 100644 --- a/typedapi/security/enableuserprofile/response.go +++ b/typedapi/security/enableuserprofile/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package enableuserprofile // Response holds the response body struct for the package enableuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/enable_user_profile/Response.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/enable_user_profile/Response.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/security/enrollkibana/enroll_kibana.go b/typedapi/security/enrollkibana/enroll_kibana.go index 89c7cb2d5a..ed06d5b7b3 100644 --- a/typedapi/security/enrollkibana/enroll_kibana.go +++ b/typedapi/security/enrollkibana/enroll_kibana.go @@ -16,12 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Enroll Kibana. // // Enable a Kibana instance to configure itself for communication with a secured // Elasticsearch cluster. +// +// NOTE: This API is currently intended for internal use only by Kibana. +// Kibana uses this API internally to configure itself for communications with +// an Elasticsearch cluster that already has security features enabled. package enrollkibana import ( @@ -76,7 +80,11 @@ func NewEnrollKibanaFunc(tp elastictransport.Interface) NewEnrollKibana { // Enable a Kibana instance to configure itself for communication with a secured // Elasticsearch cluster. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-kibana-enrollment.html +// NOTE: This API is currently intended for internal use only by Kibana. +// Kibana uses this API internally to configure itself for communications with +// an Elasticsearch cluster that already has security features enabled. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-kibana func New(tp elastictransport.Interface) *EnrollKibana { r := &EnrollKibana{ transport: tp, diff --git a/typedapi/security/enrollkibana/response.go b/typedapi/security/enrollkibana/response.go index 1bb44c3f24..a1ef574ea1 100644 --- a/typedapi/security/enrollkibana/response.go +++ b/typedapi/security/enrollkibana/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package enrollkibana @@ -26,8 +26,13 @@ import ( // Response holds the response body struct for the package enrollkibana // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/enroll_kibana/Response.ts#L20-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/enroll_kibana/Response.ts#L20-L29 type Response struct { + + // HttpCa The CA certificate used to sign the node certificates that Elasticsearch uses + // for TLS on the HTTP layer. + // The certificate is returned as a Base64 encoded string of the ASN.1 DER + // encoding of the certificate. HttpCa string `json:"http_ca"` Token types.KibanaToken `json:"token"` } diff --git a/typedapi/security/enrollnode/enroll_node.go b/typedapi/security/enrollnode/enroll_node.go index b686b470aa..567d9b46a1 100644 --- a/typedapi/security/enrollnode/enroll_node.go +++ b/typedapi/security/enrollnode/enroll_node.go @@ -16,12 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Enroll a node. // // Enroll a new node to allow it to join an existing cluster with security // features enabled. +// +// The response contains all the necessary information for the joining node to +// bootstrap discovery and security related settings so that it can successfully +// join the cluster. +// The response contains key and certificate material that allows the caller to +// generate valid signed certificates for the HTTP layer of all nodes in the +// cluster. package enrollnode import ( @@ -76,7 +83,14 @@ func NewEnrollNodeFunc(tp elastictransport.Interface) NewEnrollNode { // Enroll a new node to allow it to join an existing cluster with security // features enabled. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-node-enrollment.html +// The response contains all the necessary information for the joining node to +// bootstrap discovery and security related settings so that it can successfully +// join the cluster. +// The response contains key and certificate material that allows the caller to +// generate valid signed certificates for the HTTP layer of all nodes in the +// cluster. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-node func New(tp elastictransport.Interface) *EnrollNode { r := &EnrollNode{ transport: tp, diff --git a/typedapi/security/enrollnode/response.go b/typedapi/security/enrollnode/response.go index 4c2961c53f..a4c6d1a553 100644 --- a/typedapi/security/enrollnode/response.go +++ b/typedapi/security/enrollnode/response.go @@ -16,20 +16,36 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package enrollnode // Response holds the response body struct for the package enrollnode // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/enroll_node/Response.ts#L20-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/enroll_node/Response.ts#L20-L47 type Response struct { - HttpCaCert string `json:"http_ca_cert"` - HttpCaKey string `json:"http_ca_key"` - NodesAddresses []string `json:"nodes_addresses"` - TransportCaCert string `json:"transport_ca_cert"` - TransportCert string `json:"transport_cert"` - TransportKey string `json:"transport_key"` + + // HttpCaCert The CA certificate that can be used by the new node in order to sign its + // certificate for the HTTP layer, as a Base64 encoded string of the ASN.1 DER + // encoding of the certificate. + HttpCaCert string `json:"http_ca_cert"` + // HttpCaKey The CA private key that can be used by the new node in order to sign its + // certificate for the HTTP layer, as a Base64 encoded string of the ASN.1 DER + // encoding of the key. + HttpCaKey string `json:"http_ca_key"` + // NodesAddresses A list of transport addresses in the form of `host:port` for the nodes that + // are already members of the cluster. + NodesAddresses []string `json:"nodes_addresses"` + // TransportCaCert The CA certificate that is used to sign the TLS certificate for the transport + // layer, as a Base64 encoded string of the ASN.1 DER encoding of the + // certificate. + TransportCaCert string `json:"transport_ca_cert"` + // TransportCert The certificate that the node can use for TLS for its transport layer, as a + // Base64 encoded string of the ASN.1 DER encoding of the certificate. + TransportCert string `json:"transport_cert"` + // TransportKey The private key that the node can use for TLS for its transport layer, as a + // Base64 encoded string of the ASN.1 DER encoding of the key. + TransportKey string `json:"transport_key"` } // NewResponse returns a Response diff --git a/typedapi/security/getapikey/get_api_key.go b/typedapi/security/getapikey/get_api_key.go index 33f9f6daf8..a71cad37c7 100644 --- a/typedapi/security/getapikey/get_api_key.go +++ b/typedapi/security/getapikey/get_api_key.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get API key information. // @@ -84,7 +84,7 @@ func NewGetApiKeyFunc(tp elastictransport.Interface) NewGetApiKey { // (including `manage_security`), this API returns all API keys regardless of // ownership. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-api-key.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key func New(tp elastictransport.Interface) *GetApiKey { r := &GetApiKey{ transport: tp, diff --git a/typedapi/security/getapikey/response.go b/typedapi/security/getapikey/response.go index 5ebc41fe32..500e7dbf25 100644 --- a/typedapi/security/getapikey/response.go +++ b/typedapi/security/getapikey/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getapikey @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getapikey // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_api_key/SecurityGetApiKeyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_api_key/SecurityGetApiKeyResponse.ts#L22-L24 type Response struct { ApiKeys []types.ApiKey `json:"api_keys"` } diff --git a/typedapi/security/getbuiltinprivileges/get_builtin_privileges.go b/typedapi/security/getbuiltinprivileges/get_builtin_privileges.go index bcb92afab9..75d0f701cc 100644 --- a/typedapi/security/getbuiltinprivileges/get_builtin_privileges.go +++ b/typedapi/security/getbuiltinprivileges/get_builtin_privileges.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get builtin privileges. // @@ -76,7 +76,7 @@ func NewGetBuiltinPrivilegesFunc(tp elastictransport.Interface) NewGetBuiltinPri // Get the list of cluster privileges and index privileges that are available in // this version of Elasticsearch. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-builtin-privileges.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-builtin-privileges func New(tp elastictransport.Interface) *GetBuiltinPrivileges { r := &GetBuiltinPrivileges{ transport: tp, diff --git a/typedapi/security/getbuiltinprivileges/response.go b/typedapi/security/getbuiltinprivileges/response.go index 1785790722..631310b5b0 100644 --- a/typedapi/security/getbuiltinprivileges/response.go +++ b/typedapi/security/getbuiltinprivileges/response.go @@ -16,16 +16,29 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getbuiltinprivileges +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterprivilege" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/remoteclusterprivilege" +) + // Response holds the response body struct for the package getbuiltinprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_builtin_privileges/SecurityGetBuiltinPrivilegesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_builtin_privileges/SecurityGetBuiltinPrivilegesResponse.ts#L26-L42 type Response struct { - Cluster []string `json:"cluster"` - Index []string `json:"index"` + + // Cluster The list of cluster privileges that are understood by this version of + // Elasticsearch. + Cluster []clusterprivilege.ClusterPrivilege `json:"cluster"` + // Index The list of index privileges that are understood by this version of + // Elasticsearch. + Index []string `json:"index"` + // RemoteCluster The list of remote_cluster privileges that are understood by this version of + // Elasticsearch. + RemoteCluster []remoteclusterprivilege.RemoteClusterPrivilege `json:"remote_cluster"` } // NewResponse returns a Response diff --git a/typedapi/security/getprivileges/get_privileges.go b/typedapi/security/getprivileges/get_privileges.go index 64c20d3c37..dd74390cd8 100644 --- a/typedapi/security/getprivileges/get_privileges.go +++ b/typedapi/security/getprivileges/get_privileges.go @@ -16,9 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `read_security` cluster privilege (or a greater privilege such as +// `manage_security` or `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. package getprivileges import ( @@ -79,7 +86,14 @@ func NewGetPrivilegesFunc(tp elastictransport.Interface) NewGetPrivileges { // Get application privileges. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-privileges.html +// To use this API, you must have one of the following privileges: +// +// * The `read_security` cluster privilege (or a greater privilege such as +// `manage_security` or `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-privileges func New(tp elastictransport.Interface) *GetPrivileges { r := &GetPrivileges{ transport: tp, @@ -317,7 +331,10 @@ func (r *GetPrivileges) Header(key, value string) *GetPrivileges { return r } -// Application Application name +// Application The name of the application. +// Application privileges are always associated with exactly one application. +// If you do not specify this parameter, the API returns information about all +// privileges for all applications. // API Name: application func (r *GetPrivileges) Application(application string) *GetPrivileges { r.paramSet |= applicationMask @@ -326,7 +343,9 @@ func (r *GetPrivileges) Application(application string) *GetPrivileges { return r } -// Name Privilege name +// Name The name of the privilege. +// If you do not specify this parameter, the API returns information about all +// privileges for the requested application. // API Name: name func (r *GetPrivileges) Name(name string) *GetPrivileges { r.paramSet |= nameMask diff --git a/typedapi/security/getprivileges/response.go b/typedapi/security/getprivileges/response.go index c7dcf67823..42979f823f 100644 --- a/typedapi/security/getprivileges/response.go +++ b/typedapi/security/getprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getprivileges @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_privileges/SecurityGetPrivilegesResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_privileges/SecurityGetPrivilegesResponse.ts#L23-L29 type Response map[string]map[string]types.PrivilegesActions diff --git a/typedapi/security/getrole/get_role.go b/typedapi/security/getrole/get_role.go index ccac3b3382..89e053d549 100644 --- a/typedapi/security/getrole/get_role.go +++ b/typedapi/security/getrole/get_role.go @@ -16,11 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get roles. // // Get roles in the native realm. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The get roles API cannot retrieve roles that are defined in roles files. package getrole import ( @@ -79,8 +82,11 @@ func NewGetRoleFunc(tp elastictransport.Interface) NewGetRole { // Get roles. // // Get roles in the native realm. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The get roles API cannot retrieve roles that are defined in roles files. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role func New(tp elastictransport.Interface) *GetRole { r := &GetRole{ transport: tp, @@ -299,9 +305,10 @@ func (r *GetRole) Header(key, value string) *GetRole { return r } -// Name The name of the role. You can specify multiple roles as a comma-separated -// list. If you do not specify this parameter, the API returns information about -// all roles. +// Name The name of the role. +// You can specify multiple roles as a comma-separated list. +// If you do not specify this parameter, the API returns information about all +// roles. // API Name: name func (r *GetRole) Name(name string) *GetRole { r.paramSet |= nameMask diff --git a/typedapi/security/getrole/response.go b/typedapi/security/getrole/response.go index 8690a9adaf..4d65640edf 100644 --- a/typedapi/security/getrole/response.go +++ b/typedapi/security/getrole/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getrole @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getrole // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_role/SecurityGetRoleResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_role/SecurityGetRoleResponse.ts#L23-L31 type Response map[string]types.Role diff --git a/typedapi/security/getrolemapping/get_role_mapping.go b/typedapi/security/getrolemapping/get_role_mapping.go index 1f0f5ceec2..b0210e4034 100644 --- a/typedapi/security/getrolemapping/get_role_mapping.go +++ b/typedapi/security/getrolemapping/get_role_mapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get role mappings. // @@ -88,7 +88,7 @@ func NewGetRoleMappingFunc(tp elastictransport.Interface) NewGetRoleMapping { // The get role mappings API cannot retrieve role mappings that are defined in // role mapping files. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role-mapping.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role-mapping func New(tp elastictransport.Interface) *GetRoleMapping { r := &GetRoleMapping{ transport: tp, diff --git a/typedapi/security/getrolemapping/response.go b/typedapi/security/getrolemapping/response.go index 434dc45c01..d76fe6dfed 100644 --- a/typedapi/security/getrolemapping/response.go +++ b/typedapi/security/getrolemapping/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getrolemapping @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getrolemapping // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_role_mapping/SecurityGetRoleMappingResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_role_mapping/SecurityGetRoleMappingResponse.ts#L23-L29 type Response map[string]types.SecurityRoleMapping diff --git a/typedapi/security/getserviceaccounts/get_service_accounts.go b/typedapi/security/getserviceaccounts/get_service_accounts.go index 37b7c57cf6..58834899e2 100644 --- a/typedapi/security/getserviceaccounts/get_service_accounts.go +++ b/typedapi/security/getserviceaccounts/get_service_accounts.go @@ -16,11 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get service accounts. // // Get a list of service accounts that match the provided path parameters. +// +// NOTE: Currently, only the `elastic/fleet-server` service account is +// available. package getserviceaccounts import ( @@ -83,7 +86,10 @@ func NewGetServiceAccountsFunc(tp elastictransport.Interface) NewGetServiceAccou // // Get a list of service accounts that match the provided path parameters. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-service-accounts.html +// NOTE: Currently, only the `elastic/fleet-server` service account is +// available. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts func New(tp elastictransport.Interface) *GetServiceAccounts { r := &GetServiceAccounts{ transport: tp, @@ -321,9 +327,9 @@ func (r *GetServiceAccounts) Header(key, value string) *GetServiceAccounts { return r } -// Namespace Name of the namespace. Omit this parameter to retrieve information about all -// service accounts. If you omit this parameter, you must also omit the -// `service` parameter. +// Namespace The name of the namespace. +// Omit this parameter to retrieve information about all service accounts. +// If you omit this parameter, you must also omit the `service` parameter. // API Name: namespace func (r *GetServiceAccounts) Namespace(namespace string) *GetServiceAccounts { r.paramSet |= namespaceMask @@ -332,8 +338,9 @@ func (r *GetServiceAccounts) Namespace(namespace string) *GetServiceAccounts { return r } -// Service Name of the service name. Omit this parameter to retrieve information about -// all service accounts that belong to the specified `namespace`. +// Service The service name. +// Omit this parameter to retrieve information about all service accounts that +// belong to the specified `namespace`. // API Name: service func (r *GetServiceAccounts) Service(service string) *GetServiceAccounts { r.paramSet |= serviceMask diff --git a/typedapi/security/getserviceaccounts/response.go b/typedapi/security/getserviceaccounts/response.go index 3bd9f22003..c35daccd76 100644 --- a/typedapi/security/getserviceaccounts/response.go +++ b/typedapi/security/getserviceaccounts/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getserviceaccounts @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getserviceaccounts // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_service_accounts/GetServiceAccountsResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_service_accounts/GetServiceAccountsResponse.ts#L23-L29 type Response map[string]types.RoleDescriptorWrapper diff --git a/typedapi/security/getservicecredentials/get_service_credentials.go b/typedapi/security/getservicecredentials/get_service_credentials.go index 52f829472a..b968c48b65 100644 --- a/typedapi/security/getservicecredentials/get_service_credentials.go +++ b/typedapi/security/getservicecredentials/get_service_credentials.go @@ -16,9 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get service account credentials. +// +// To use this API, you must have at least the `read_security` cluster privilege +// (or a greater privilege such as `manage_service_account` or +// `manage_security`). +// +// The response includes service account tokens that were created with the +// create service account tokens API as well as file-backed tokens from all +// nodes of the cluster. +// +// NOTE: For tokens backed by the `service_tokens` file, the API collects them +// from all nodes of the cluster. +// Tokens with the same name from different nodes are assumed to be the same +// token and are only counted once towards the total number of service tokens. package getservicecredentials import ( @@ -83,7 +96,20 @@ func NewGetServiceCredentialsFunc(tp elastictransport.Interface) NewGetServiceCr // Get service account credentials. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-service-credentials.html +// To use this API, you must have at least the `read_security` cluster privilege +// (or a greater privilege such as `manage_service_account` or +// `manage_security`). +// +// The response includes service account tokens that were created with the +// create service account tokens API as well as file-backed tokens from all +// nodes of the cluster. +// +// NOTE: For tokens backed by the `service_tokens` file, the API collects them +// from all nodes of the cluster. +// Tokens with the same name from different nodes are assumed to be the same +// token and are only counted once towards the total number of service tokens. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-credentials func New(tp elastictransport.Interface) *GetServiceCredentials { r := &GetServiceCredentials{ transport: tp, @@ -303,7 +329,7 @@ func (r *GetServiceCredentials) Header(key, value string) *GetServiceCredentials return r } -// Namespace Name of the namespace. +// Namespace The name of the namespace. // API Name: namespace func (r *GetServiceCredentials) _namespace(namespace string) *GetServiceCredentials { r.paramSet |= namespaceMask @@ -312,7 +338,7 @@ func (r *GetServiceCredentials) _namespace(namespace string) *GetServiceCredenti return r } -// Service Name of the service name. +// Service The service name. // API Name: service func (r *GetServiceCredentials) _service(service string) *GetServiceCredentials { r.paramSet |= serviceMask diff --git a/typedapi/security/getservicecredentials/response.go b/typedapi/security/getservicecredentials/response.go index 2012e036a3..24c7b88171 100644 --- a/typedapi/security/getservicecredentials/response.go +++ b/typedapi/security/getservicecredentials/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getservicecredentials @@ -26,10 +26,10 @@ import ( // Response holds the response body struct for the package getservicecredentials // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_service_credentials/GetServiceCredentialsResponse.ts#L25-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_service_credentials/GetServiceCredentialsResponse.ts#L25-L34 type Response struct { Count int `json:"count"` - // NodesCredentials Contains service account credentials collected from all nodes of the cluster + // NodesCredentials Service account credentials collected from all nodes of the cluster. NodesCredentials types.NodesCredentials `json:"nodes_credentials"` ServiceAccount string `json:"service_account"` Tokens map[string]types.Metadata `json:"tokens"` diff --git a/typedapi/security/getsettings/get_settings.go b/typedapi/security/getsettings/get_settings.go index 0d9829c461..89cd280072 100644 --- a/typedapi/security/getsettings/get_settings.go +++ b/typedapi/security/getsettings/get_settings.go @@ -16,21 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieve settings for the security system indices +// Get security index settings. +// +// Get the user-configurable settings for the security internal index +// (`.security` and associated indices). +// Only a subset of the index settings — those that are user-configurable—will +// be shown. +// This includes: +// +// * `index.auto_expand_replicas` +// * `index.number_of_replicas` package getsettings import ( "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -65,9 +77,18 @@ func NewGetSettingsFunc(tp elastictransport.Interface) NewGetSettings { } } -// Retrieve settings for the security system indices +// Get security index settings. +// +// Get the user-configurable settings for the security internal index +// (`.security` and associated indices). +// Only a subset of the index settings — those that are user-configurable—will +// be shown. +// This includes: +// +// * `index.auto_expand_replicas` +// * `index.number_of_replicas` // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-settings.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-settings func New(tp elastictransport.Interface) *GetSettings { r := &GetSettings{ transport: tp, @@ -180,8 +201,57 @@ func (r GetSettings) Perform(providedCtx context.Context) (*http.Response, error } // Do runs the request through the transport, handle the response and returns a getsettings.Response -func (r GetSettings) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) +func (r GetSettings) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. @@ -229,3 +299,57 @@ func (r *GetSettings) Header(key, value string) *GetSettings { return r } + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetSettings) MasterTimeout(duration string) *GetSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetSettings) ErrorTrace(errortrace bool) *GetSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetSettings) FilterPath(filterpaths ...string) *GetSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetSettings) Human(human bool) *GetSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetSettings) Pretty(pretty bool) *GetSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/security/getsettings/response.go b/typedapi/security/getsettings/response.go new file mode 100644 index 0000000000..08dab5f044 --- /dev/null +++ b/typedapi/security/getsettings/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package getsettings + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package getsettings +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_settings/SecurityGetSettingsResponse.ts#L21-L36 +type Response struct { + + // Security Settings for the index used for most security configuration, including native + // realm users and roles configured with the API. + Security types.SecuritySettings `json:"security"` + // SecurityProfile Settings for the index used to store profile information. + SecurityProfile types.SecuritySettings `json:"security-profile"` + // SecurityTokens Settings for the index used to store tokens. + SecurityTokens types.SecuritySettings `json:"security-tokens"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/security/gettoken/get_token.go b/typedapi/security/gettoken/get_token.go index 06c90b0a4a..ae5bbf42d4 100644 --- a/typedapi/security/gettoken/get_token.go +++ b/typedapi/security/gettoken/get_token.go @@ -16,11 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get a token. // // Create a bearer token for access without requiring basic authentication. +// The tokens are created by the Elasticsearch Token Service, which is +// automatically enabled when you configure TLS on the HTTP interface. +// Alternatively, you can explicitly enable the +// `xpack.security.authc.token.enabled` setting. +// When you are running in production mode, a bootstrap check prevents you from +// enabling the token service unless you also enable TLS on the HTTP interface. +// +// The get token API takes the same parameters as a typical OAuth 2.0 token API +// except for the use of a JSON request body. +// +// A successful get token API call returns a JSON structure that contains the +// access token, the amount of time (seconds) that the token expires in, the +// type, and the scope if available. +// +// The tokens returned by the get token API have a finite period of time for +// which they are valid and after that time period, they can no longer be used. +// That time period is defined by the `xpack.security.authc.token.timeout` +// setting. +// If you want to invalidate a token immediately, you can do so by using the +// invalidate token API. package gettoken import ( @@ -79,8 +99,28 @@ func NewGetTokenFunc(tp elastictransport.Interface) NewGetToken { // Get a token. // // Create a bearer token for access without requiring basic authentication. +// The tokens are created by the Elasticsearch Token Service, which is +// automatically enabled when you configure TLS on the HTTP interface. +// Alternatively, you can explicitly enable the +// `xpack.security.authc.token.enabled` setting. +// When you are running in production mode, a bootstrap check prevents you from +// enabling the token service unless you also enable TLS on the HTTP interface. +// +// The get token API takes the same parameters as a typical OAuth 2.0 token API +// except for the use of a JSON request body. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-token.html +// A successful get token API call returns a JSON structure that contains the +// access token, the amount of time (seconds) that the token expires in, the +// type, and the scope if available. +// +// The tokens returned by the get token API have a finite period of time for +// which they are valid and after that time period, they can no longer be used. +// That time period is defined by the `xpack.security.authc.token.timeout` +// setting. +// If you want to invalidate a token immediately, you can do so by using the +// invalidate token API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token func New(tp elastictransport.Interface) *GetToken { r := &GetToken{ transport: tp, @@ -88,8 +128,6 @@ func New(tp elastictransport.Interface) *GetToken { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -343,46 +381,90 @@ func (r *GetToken) Pretty(pretty bool) *GetToken { return r } +// The type of grant. +// Supported grant types are: `password`, `_kerberos`, `client_credentials`, and +// `refresh_token`. // API name: grant_type func (r *GetToken) GrantType(granttype accesstokengranttype.AccessTokenGrantType) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.GrantType = &granttype - return r } +// The base64 encoded kerberos ticket. +// If you specify the `_kerberos` grant type, this parameter is required. +// This parameter is not valid with any other supported grant type. // API name: kerberos_ticket func (r *GetToken) KerberosTicket(kerberosticket string) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.KerberosTicket = &kerberosticket return r } +// The user's password. +// If you specify the `password` grant type, this parameter is required. +// This parameter is not valid with any other supported grant type. // API name: password func (r *GetToken) Password(password string) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Password = &password return r } +// The string that was returned when you created the token, which enables you to +// extend its life. +// If you specify the `refresh_token` grant type, this parameter is required. +// This parameter is not valid with any other supported grant type. // API name: refresh_token func (r *GetToken) RefreshToken(refreshtoken string) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RefreshToken = &refreshtoken return r } +// The scope of the token. +// Currently tokens are only issued for a scope of FULL regardless of the value +// sent with the request. // API name: scope func (r *GetToken) Scope(scope string) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Scope = &scope return r } +// The username that identifies the user. +// If you specify the `password` grant type, this parameter is required. +// This parameter is not valid with any other supported grant type. // API name: username func (r *GetToken) Username(username string) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Username = &username return r diff --git a/typedapi/security/gettoken/request.go b/typedapi/security/gettoken/request.go index 741d628006..04475d584c 100644 --- a/typedapi/security/gettoken/request.go +++ b/typedapi/security/gettoken/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package gettoken @@ -33,14 +33,34 @@ import ( // Request holds the request body struct for the package gettoken // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_token/GetUserAccessTokenRequest.ts#L25-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_token/GetUserAccessTokenRequest.ts#L25-L90 type Request struct { - GrantType *accesstokengranttype.AccessTokenGrantType `json:"grant_type,omitempty"` - KerberosTicket *string `json:"kerberos_ticket,omitempty"` - Password *string `json:"password,omitempty"` - RefreshToken *string `json:"refresh_token,omitempty"` - Scope *string `json:"scope,omitempty"` - Username *string `json:"username,omitempty"` + + // GrantType The type of grant. + // Supported grant types are: `password`, `_kerberos`, `client_credentials`, and + // `refresh_token`. + GrantType *accesstokengranttype.AccessTokenGrantType `json:"grant_type,omitempty"` + // KerberosTicket The base64 encoded kerberos ticket. + // If you specify the `_kerberos` grant type, this parameter is required. + // This parameter is not valid with any other supported grant type. + KerberosTicket *string `json:"kerberos_ticket,omitempty"` + // Password The user's password. + // If you specify the `password` grant type, this parameter is required. + // This parameter is not valid with any other supported grant type. + Password *string `json:"password,omitempty"` + // RefreshToken The string that was returned when you created the token, which enables you to + // extend its life. + // If you specify the `refresh_token` grant type, this parameter is required. + // This parameter is not valid with any other supported grant type. + RefreshToken *string `json:"refresh_token,omitempty"` + // Scope The scope of the token. + // Currently tokens are only issued for a scope of FULL regardless of the value + // sent with the request. + Scope *string `json:"scope,omitempty"` + // Username The username that identifies the user. + // If you specify the `password` grant type, this parameter is required. + // This parameter is not valid with any other supported grant type. + Username *string `json:"username,omitempty"` } // NewRequest returns a Request diff --git a/typedapi/security/gettoken/response.go b/typedapi/security/gettoken/response.go index b154bfdd0f..2b3ae91b87 100644 --- a/typedapi/security/gettoken/response.go +++ b/typedapi/security/gettoken/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package gettoken @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettoken // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_token/GetUserAccessTokenResponse.ts#L23-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_token/GetUserAccessTokenResponse.ts#L23-L33 type Response struct { AccessToken string `json:"access_token"` Authentication types.AuthenticatedUser `json:"authentication"` diff --git a/typedapi/security/getuser/get_user.go b/typedapi/security/getuser/get_user.go index cbd77ef937..0571d5cc22 100644 --- a/typedapi/security/getuser/get_user.go +++ b/typedapi/security/getuser/get_user.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get users. // @@ -80,7 +80,7 @@ func NewGetUserFunc(tp elastictransport.Interface) NewGetUser { // // Get information about users in the native realm and built-in users. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user func New(tp elastictransport.Interface) *GetUser { r := &GetUser{ transport: tp, @@ -310,7 +310,8 @@ func (r *GetUser) Username(usernames ...string) *GetUser { return r } -// WithProfileUid If true will return the User Profile ID for a user, if any. +// WithProfileUid Determines whether to retrieve the user profile UID, if it exists, for the +// users. // API name: with_profile_uid func (r *GetUser) WithProfileUid(withprofileuid bool) *GetUser { r.values.Set("with_profile_uid", strconv.FormatBool(withprofileuid)) diff --git a/typedapi/security/getuser/response.go b/typedapi/security/getuser/response.go index 32472132cf..261bd8759e 100644 --- a/typedapi/security/getuser/response.go +++ b/typedapi/security/getuser/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getuser @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getuser // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_user/SecurityGetUserResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_user/SecurityGetUserResponse.ts#L23-L30 type Response map[string]types.User diff --git a/typedapi/security/getuserprivileges/get_user_privileges.go b/typedapi/security/getuserprivileges/get_user_privileges.go index c9b4591744..a4e0929172 100644 --- a/typedapi/security/getuserprivileges/get_user_privileges.go +++ b/typedapi/security/getuserprivileges/get_user_privileges.go @@ -16,9 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get user privileges. +// +// Get the security privileges for the logged in user. +// All users can use this API, but only to determine their own privileges. +// To check the privileges of other users, you must use the run as feature. +// To check whether a user has a specific list of privileges, use the has +// privileges API. package getuserprivileges import ( @@ -70,7 +76,13 @@ func NewGetUserPrivilegesFunc(tp elastictransport.Interface) NewGetUserPrivilege // Get user privileges. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-privileges.html +// Get the security privileges for the logged in user. +// All users can use this API, but only to determine their own privileges. +// To check the privileges of other users, you must use the run as feature. +// To check whether a user has a specific list of privileges, use the has +// privileges API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges func New(tp elastictransport.Interface) *GetUserPrivileges { r := &GetUserPrivileges{ transport: tp, diff --git a/typedapi/security/getuserprivileges/response.go b/typedapi/security/getuserprivileges/response.go index 6c7fe39f11..ccd511d881 100644 --- a/typedapi/security/getuserprivileges/response.go +++ b/typedapi/security/getuserprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getuserprivileges @@ -26,13 +26,15 @@ import ( // Response holds the response body struct for the package getuserprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_user_privileges/SecurityGetUserPrivilegesResponse.ts#L26-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_user_privileges/SecurityGetUserPrivilegesResponse.ts#L28-L38 type Response struct { - Applications []types.ApplicationPrivileges `json:"applications"` - Cluster []string `json:"cluster"` - Global []types.GlobalPrivilege `json:"global"` - Indices []types.UserIndicesPrivileges `json:"indices"` - RunAs []string `json:"run_as"` + Applications []types.ApplicationPrivileges `json:"applications"` + Cluster []string `json:"cluster"` + Global []types.GlobalPrivilege `json:"global"` + Indices []types.UserIndicesPrivileges `json:"indices"` + RemoteCluster []types.RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + RemoteIndices []types.RemoteUserIndicesPrivileges `json:"remote_indices,omitempty"` + RunAs []string `json:"run_as"` } // NewResponse returns a Response diff --git a/typedapi/security/getuserprofile/get_user_profile.go b/typedapi/security/getuserprofile/get_user_profile.go index 9bba9f922a..53254bc83f 100644 --- a/typedapi/security/getuserprofile/get_user_profile.go +++ b/typedapi/security/getuserprofile/get_user_profile.go @@ -16,11 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get a user profile. // // Get a user's profile using the unique profile ID. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. package getuserprofile import ( @@ -82,7 +88,13 @@ func NewGetUserProfileFunc(tp elastictransport.Interface) NewGetUserProfile { // // Get a user's profile using the unique profile ID. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-profile.html +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile func New(tp elastictransport.Interface) *GetUserProfile { r := &GetUserProfile{ transport: tp, @@ -303,9 +315,11 @@ func (r *GetUserProfile) _uid(uids ...string) *GetUserProfile { return r } -// Data List of filters for the `data` field of the profile document. -// To return all content use `data=*`. To return a subset of content -// use `data=` to retrieve content nested under the specified ``. +// Data A comma-separated list of filters for the `data` field of the profile +// document. +// To return all content use `data=*`. +// To return a subset of content use `data=` to retrieve content nested +// under the specified ``. // By default returns no `data` content. // API name: data func (r *GetUserProfile) Data(data ...string) *GetUserProfile { diff --git a/typedapi/security/getuserprofile/response.go b/typedapi/security/getuserprofile/response.go index c6ac75315d..95b2d21421 100644 --- a/typedapi/security/getuserprofile/response.go +++ b/typedapi/security/getuserprofile/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getuserprofile @@ -26,9 +26,15 @@ import ( // Response holds the response body struct for the package getuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_user_profile/Response.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_user_profile/Response.ts#L23-L33 type Response struct { - Errors *types.GetUserProfileErrors `json:"errors,omitempty"` + Errors *types.GetUserProfileErrors `json:"errors,omitempty"` + // Profiles A successful call returns the JSON representation of the user profile and its + // internal versioning numbers. + // The API returns an empty object if no profile document is found for the + // provided `uid`. + // The content of the data field is not returned by default to avoid + // deserializing a potential large payload. Profiles []types.UserProfileWithMetadata `json:"profiles"` } diff --git a/typedapi/security/grantapikey/grant_api_key.go b/typedapi/security/grantapikey/grant_api_key.go index 0aebb43086..a86e4b8184 100644 --- a/typedapi/security/grantapikey/grant_api_key.go +++ b/typedapi/security/grantapikey/grant_api_key.go @@ -16,18 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Grant an API key. // // Create an API key on behalf of another user. // This API is similar to the create API keys API, however it creates the API // key for a user that is different than the user that runs the API. -// The caller must have authentication credentials (either an access token, or a -// username and password) for the user on whose behalf the API key will be -// created. -// It is not possible to use this API to create an API key without that user’s +// The caller must have authentication credentials for the user on whose behalf +// the API key will be created. +// It is not possible to use this API to create an API key without that user's // credentials. +// The supported user authentication credential types are: +// +// * username and password +// * Elasticsearch access tokens +// * JWTs +// // The user, for whom the authentication credentials is provided, can optionally // "run as" (impersonate) another user. // In this case, the API key will be created on behalf of the impersonated user. @@ -35,6 +40,8 @@ // This API is intended be used by applications that need to create and manage // API keys for end users, but cannot guarantee that those users have permission // to create API keys on their own behalf. +// The API keys are created by the Elasticsearch API key service, which is +// automatically enabled. // // A successful grant API key API call returns a JSON structure that contains // the API key, its unique id, and its name. @@ -103,11 +110,16 @@ func NewGrantApiKeyFunc(tp elastictransport.Interface) NewGrantApiKey { // Create an API key on behalf of another user. // This API is similar to the create API keys API, however it creates the API // key for a user that is different than the user that runs the API. -// The caller must have authentication credentials (either an access token, or a -// username and password) for the user on whose behalf the API key will be -// created. -// It is not possible to use this API to create an API key without that user’s +// The caller must have authentication credentials for the user on whose behalf +// the API key will be created. +// It is not possible to use this API to create an API key without that user's // credentials. +// The supported user authentication credential types are: +// +// * username and password +// * Elasticsearch access tokens +// * JWTs +// // The user, for whom the authentication credentials is provided, can optionally // "run as" (impersonate) another user. // In this case, the API key will be created on behalf of the impersonated user. @@ -115,6 +127,8 @@ func NewGrantApiKeyFunc(tp elastictransport.Interface) NewGrantApiKey { // This API is intended be used by applications that need to create and manage // API keys for end users, but cannot guarantee that those users have permission // to create API keys on their own behalf. +// The API keys are created by the Elasticsearch API key service, which is +// automatically enabled. // // A successful grant API key API call returns a JSON structure that contains // the API key, its unique id, and its name. @@ -124,7 +138,7 @@ func NewGrantApiKeyFunc(tp elastictransport.Interface) NewGrantApiKey { // By default, API keys never expire. You can specify expiration information // when you create the API keys. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-grant-api-key.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-grant-api-key func New(tp elastictransport.Interface) *GrantApiKey { r := &GrantApiKey{ transport: tp, @@ -132,8 +146,6 @@ func New(tp elastictransport.Interface) *GrantApiKey { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -387,57 +399,83 @@ func (r *GrantApiKey) Pretty(pretty bool) *GrantApiKey { return r } -// AccessToken The user’s access token. +// The user's access token. // If you specify the `access_token` grant type, this parameter is required. // It is not valid with other grant types. // API name: access_token func (r *GrantApiKey) AccessToken(accesstoken string) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.AccessToken = &accesstoken return r } -// ApiKey Defines the API key. +// The API key. // API name: api_key -func (r *GrantApiKey) ApiKey(apikey *types.GrantApiKey) *GrantApiKey { +func (r *GrantApiKey) ApiKey(apikey types.GrantApiKeyVariant) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.ApiKey = *apikey + r.req.ApiKey = *apikey.GrantApiKeyCaster() return r } -// GrantType The type of grant. Supported grant types are: `access_token`, `password`. +// The type of grant. Supported grant types are: `access_token`, `password`. // API name: grant_type func (r *GrantApiKey) GrantType(granttype apikeygranttype.ApiKeyGrantType) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.GrantType = granttype - return r } -// Password The user’s password. If you specify the `password` grant type, this parameter -// is required. +// The user's password. +// If you specify the `password` grant type, this parameter is required. // It is not valid with other grant types. // API name: password func (r *GrantApiKey) Password(password string) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Password = &password return r } -// RunAs The name of the user to be impersonated. +// The name of the user to be impersonated. // API name: run_as func (r *GrantApiKey) RunAs(username string) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RunAs = &username return r } -// Username The user name that identifies the user. +// The user name that identifies the user. // If you specify the `password` grant type, this parameter is required. // It is not valid with other grant types. // API name: username func (r *GrantApiKey) Username(username string) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Username = &username return r diff --git a/typedapi/security/grantapikey/request.go b/typedapi/security/grantapikey/request.go index 7e4e001bdf..67cd377d58 100644 --- a/typedapi/security/grantapikey/request.go +++ b/typedapi/security/grantapikey/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package grantapikey @@ -34,19 +34,19 @@ import ( // Request holds the request body struct for the package grantapikey // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/grant_api_key/SecurityGrantApiKeyRequest.ts#L24-L77 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/grant_api_key/SecurityGrantApiKeyRequest.ts#L24-L92 type Request struct { - // AccessToken The user’s access token. + // AccessToken The user's access token. // If you specify the `access_token` grant type, this parameter is required. // It is not valid with other grant types. AccessToken *string `json:"access_token,omitempty"` - // ApiKey Defines the API key. + // ApiKey The API key. ApiKey types.GrantApiKey `json:"api_key"` // GrantType The type of grant. Supported grant types are: `access_token`, `password`. GrantType apikeygranttype.ApiKeyGrantType `json:"grant_type"` - // Password The user’s password. If you specify the `password` grant type, this parameter - // is required. + // Password The user's password. + // If you specify the `password` grant type, this parameter is required. // It is not valid with other grant types. Password *string `json:"password,omitempty"` // RunAs The name of the user to be impersonated. diff --git a/typedapi/security/grantapikey/response.go b/typedapi/security/grantapikey/response.go index 5b35843241..901d389cc3 100644 --- a/typedapi/security/grantapikey/response.go +++ b/typedapi/security/grantapikey/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package grantapikey // Response holds the response body struct for the package grantapikey // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/grant_api_key/SecurityGrantApiKeyResponse.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/grant_api_key/SecurityGrantApiKeyResponse.ts#L23-L31 type Response struct { ApiKey string `json:"api_key"` Encoded string `json:"encoded"` diff --git a/typedapi/security/hasprivileges/has_privileges.go b/typedapi/security/hasprivileges/has_privileges.go index 9280a5c7bb..2e73a98925 100644 --- a/typedapi/security/hasprivileges/has_privileges.go +++ b/typedapi/security/hasprivileges/has_privileges.go @@ -16,11 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Check user privileges. // // Determine whether the specified user has a specified list of privileges. +// All users can use this API, but only to determine their own privileges. +// To check the privileges of other users, you must use the run as feature. package hasprivileges import ( @@ -85,8 +87,10 @@ func NewHasPrivilegesFunc(tp elastictransport.Interface) NewHasPrivileges { // Check user privileges. // // Determine whether the specified user has a specified list of privileges. +// All users can use this API, but only to determine their own privileges. +// To check the privileges of other users, you must use the run as feature. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges func New(tp elastictransport.Interface) *HasPrivileges { r := &HasPrivileges{ transport: tp, @@ -94,8 +98,6 @@ func New(tp elastictransport.Interface) *HasPrivileges { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -374,23 +376,44 @@ func (r *HasPrivileges) Pretty(pretty bool) *HasPrivileges { } // API name: application -func (r *HasPrivileges) Application(applications ...types.ApplicationPrivilegesCheck) *HasPrivileges { - r.req.Application = applications +func (r *HasPrivileges) Application(applications ...types.ApplicationPrivilegesCheckVariant) *HasPrivileges { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range applications { + + r.req.Application = append(r.req.Application, *v.ApplicationPrivilegesCheckCaster()) + } return r } -// Cluster A list of the cluster privileges that you want to check. +// A list of the cluster privileges that you want to check. // API name: cluster func (r *HasPrivileges) Cluster(clusters ...clusterprivilege.ClusterPrivilege) *HasPrivileges { - r.req.Cluster = clusters + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range clusters { + r.req.Cluster = append(r.req.Cluster, v) + + } return r } // API name: index -func (r *HasPrivileges) Index(indices ...types.IndexPrivilegesCheck) *HasPrivileges { - r.req.Index = indices +func (r *HasPrivileges) Index(indices ...types.IndexPrivilegesCheckVariant) *HasPrivileges { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range indices { + r.req.Index = append(r.req.Index, *v.IndexPrivilegesCheckCaster()) + + } return r } diff --git a/typedapi/security/hasprivileges/request.go b/typedapi/security/hasprivileges/request.go index 7d9f42c511..734b682e55 100644 --- a/typedapi/security/hasprivileges/request.go +++ b/typedapi/security/hasprivileges/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package hasprivileges @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package hasprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/has_privileges/SecurityHasPrivilegesRequest.ts#L25-L46 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/has_privileges/SecurityHasPrivilegesRequest.ts#L25-L59 type Request struct { Application []types.ApplicationPrivilegesCheck `json:"application,omitempty"` // Cluster A list of the cluster privileges that you want to check. diff --git a/typedapi/security/hasprivileges/response.go b/typedapi/security/hasprivileges/response.go index d8c8a570b5..ee549cfc95 100644 --- a/typedapi/security/hasprivileges/response.go +++ b/typedapi/security/hasprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package hasprivileges @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package hasprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/has_privileges/SecurityHasPrivilegesResponse.ts#L24-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/has_privileges/SecurityHasPrivilegesResponse.ts#L24-L35 type Response struct { Application types.ApplicationsPrivileges `json:"application"` Cluster map[string]bool `json:"cluster"` diff --git a/typedapi/security/hasprivilegesuserprofile/has_privileges_user_profile.go b/typedapi/security/hasprivilegesuserprofile/has_privileges_user_profile.go index eed7dc9732..8932c67be0 100644 --- a/typedapi/security/hasprivilegesuserprofile/has_privileges_user_profile.go +++ b/typedapi/security/hasprivilegesuserprofile/has_privileges_user_profile.go @@ -16,12 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Check user profile privileges. // // Determine whether the users associated with the specified user profile IDs // have all the requested privileges. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. package hasprivilegesuserprofile import ( @@ -81,7 +87,13 @@ func NewHasPrivilegesUserProfileFunc(tp elastictransport.Interface) NewHasPrivil // Determine whether the users associated with the specified user profile IDs // have all the requested privileges. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges-user-profile.html +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges-user-profile func New(tp elastictransport.Interface) *HasPrivilegesUserProfile { r := &HasPrivilegesUserProfile{ transport: tp, @@ -89,8 +101,6 @@ func New(tp elastictransport.Interface) *HasPrivilegesUserProfile { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -344,19 +354,31 @@ func (r *HasPrivilegesUserProfile) Pretty(pretty bool) *HasPrivilegesUserProfile return r } +// An object containing all the privileges to be checked. // API name: privileges -func (r *HasPrivilegesUserProfile) Privileges(privileges *types.PrivilegesCheck) *HasPrivilegesUserProfile { +func (r *HasPrivilegesUserProfile) Privileges(privileges types.PrivilegesCheckVariant) *HasPrivilegesUserProfile { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Privileges = *privileges + r.req.Privileges = *privileges.PrivilegesCheckCaster() return r } -// Uids A list of profile IDs. The privileges are checked for associated users of the +// A list of profile IDs. The privileges are checked for associated users of the // profiles. // API name: uids func (r *HasPrivilegesUserProfile) Uids(uids ...string) *HasPrivilegesUserProfile { - r.req.Uids = uids + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range uids { + + r.req.Uids = append(r.req.Uids, v) + } return r } diff --git a/typedapi/security/hasprivilegesuserprofile/request.go b/typedapi/security/hasprivilegesuserprofile/request.go index 98bc5c27f8..f765c0ade7 100644 --- a/typedapi/security/hasprivilegesuserprofile/request.go +++ b/typedapi/security/hasprivilegesuserprofile/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package hasprivilegesuserprofile @@ -29,8 +29,10 @@ import ( // Request holds the request body struct for the package hasprivilegesuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/has_privileges_user_profile/Request.ts#L24-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/has_privileges_user_profile/Request.ts#L24-L55 type Request struct { + + // Privileges An object containing all the privileges to be checked. Privileges types.PrivilegesCheck `json:"privileges"` // Uids A list of profile IDs. The privileges are checked for associated users of the // profiles. diff --git a/typedapi/security/hasprivilegesuserprofile/response.go b/typedapi/security/hasprivilegesuserprofile/response.go index a4eb9b6155..0f6e22455a 100644 --- a/typedapi/security/hasprivilegesuserprofile/response.go +++ b/typedapi/security/hasprivilegesuserprofile/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package hasprivilegesuserprofile @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package hasprivilegesuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/has_privileges_user_profile/Response.ts#L23-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/has_privileges_user_profile/Response.ts#L23-L38 type Response struct { // Errors The subset of the requested profile IDs for which an error diff --git a/typedapi/security/invalidateapikey/invalidate_api_key.go b/typedapi/security/invalidateapikey/invalidate_api_key.go index 4962361972..72f8d10977 100644 --- a/typedapi/security/invalidateapikey/invalidate_api_key.go +++ b/typedapi/security/invalidateapikey/invalidate_api_key.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Invalidate API keys. // @@ -25,13 +25,20 @@ // Invalidated API keys fail authentication, but they can still be viewed using // the get API key information and query API key information APIs, for at least // the configured retention period, until they are automatically deleted. -// The `manage_api_key` privilege allows deleting any API keys. -// The `manage_own_api_key` only allows deleting API keys that are owned by the -// user. +// +// To use this API, you must have at least the `manage_security`, +// `manage_api_key`, or `manage_own_api_key` cluster privileges. +// The `manage_security` privilege allows deleting any API key, including both +// REST and cross cluster API keys. +// The `manage_api_key` privilege allows deleting any REST API key, but not +// cross cluster API keys. +// The `manage_own_api_key` only allows deleting REST API keys that are owned by +// the user. // In addition, with the `manage_own_api_key` privilege, an invalidation request // must be issued in one of the three formats: +// // - Set the parameter `owner=true`. -// - Or, set both `username` and `realm_name` to match the user’s identity. +// - Or, set both `username` and `realm_name` to match the user's identity. // - Or, if the request is issued by an API key, that is to say an API key // invalidates itself, specify its ID in the `ids` field. package invalidateapikey @@ -95,17 +102,24 @@ func NewInvalidateApiKeyFunc(tp elastictransport.Interface) NewInvalidateApiKey // Invalidated API keys fail authentication, but they can still be viewed using // the get API key information and query API key information APIs, for at least // the configured retention period, until they are automatically deleted. -// The `manage_api_key` privilege allows deleting any API keys. -// The `manage_own_api_key` only allows deleting API keys that are owned by the -// user. +// +// To use this API, you must have at least the `manage_security`, +// `manage_api_key`, or `manage_own_api_key` cluster privileges. +// The `manage_security` privilege allows deleting any API key, including both +// REST and cross cluster API keys. +// The `manage_api_key` privilege allows deleting any REST API key, but not +// cross cluster API keys. +// The `manage_own_api_key` only allows deleting REST API keys that are owned by +// the user. // In addition, with the `manage_own_api_key` privilege, an invalidation request // must be issued in one of the three formats: +// // - Set the parameter `owner=true`. -// - Or, set both `username` and `realm_name` to match the user’s identity. +// - Or, set both `username` and `realm_name` to match the user's identity. // - Or, if the request is issued by an API key, that is to say an API key // invalidates itself, specify its ID in the `ids` field. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-api-key.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key func New(tp elastictransport.Interface) *InvalidateApiKey { r := &InvalidateApiKey{ transport: tp, @@ -113,8 +127,6 @@ func New(tp elastictransport.Interface) *InvalidateApiKey { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -368,57 +380,91 @@ func (r *InvalidateApiKey) Pretty(pretty bool) *InvalidateApiKey { // API name: id func (r *InvalidateApiKey) Id(id string) *InvalidateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Id = &id return r } -// Ids A list of API key ids. +// A list of API key ids. // This parameter cannot be used with any of `name`, `realm_name`, or // `username`. // API name: ids func (r *InvalidateApiKey) Ids(ids ...string) *InvalidateApiKey { - r.req.Ids = ids + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ids { + r.req.Ids = append(r.req.Ids, v) + + } return r } -// Name An API key name. +// An API key name. // This parameter cannot be used with any of `ids`, `realm_name` or `username`. // API name: name func (r *InvalidateApiKey) Name(name string) *InvalidateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Name = &name return r } -// Owner Can be used to query API keys owned by the currently authenticated user. +// Query API keys owned by the currently authenticated user. // The `realm_name` or `username` parameters cannot be specified when this // parameter is set to `true` as they are assumed to be the currently // authenticated ones. +// +// NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be +// specified if `owner` is `false`. // API name: owner func (r *InvalidateApiKey) Owner(owner bool) *InvalidateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Owner = &owner return r } -// RealmName The name of an authentication realm. +// The name of an authentication realm. // This parameter cannot be used with either `ids` or `name`, or when `owner` // flag is set to `true`. // API name: realm_name func (r *InvalidateApiKey) RealmName(realmname string) *InvalidateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RealmName = &realmname return r } -// Username The username of a user. -// This parameter cannot be used with either `ids` or `name`, or when `owner` +// The username of a user. +// This parameter cannot be used with either `ids` or `name` or when `owner` // flag is set to `true`. // API name: username func (r *InvalidateApiKey) Username(username string) *InvalidateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Username = &username return r diff --git a/typedapi/security/invalidateapikey/request.go b/typedapi/security/invalidateapikey/request.go index ff492ebf7c..5e465710d1 100644 --- a/typedapi/security/invalidateapikey/request.go +++ b/typedapi/security/invalidateapikey/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package invalidateapikey @@ -31,7 +31,7 @@ import ( // Request holds the request body struct for the package invalidateapikey // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/invalidate_api_key/SecurityInvalidateApiKeyRequest.ts#L23-L69 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/invalidate_api_key/SecurityInvalidateApiKeyRequest.ts#L23-L82 type Request struct { Id *string `json:"id,omitempty"` // Ids A list of API key ids. @@ -41,17 +41,20 @@ type Request struct { // Name An API key name. // This parameter cannot be used with any of `ids`, `realm_name` or `username`. Name *string `json:"name,omitempty"` - // Owner Can be used to query API keys owned by the currently authenticated user. + // Owner Query API keys owned by the currently authenticated user. // The `realm_name` or `username` parameters cannot be specified when this // parameter is set to `true` as they are assumed to be the currently // authenticated ones. + // + // NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be + // specified if `owner` is `false`. Owner *bool `json:"owner,omitempty"` // RealmName The name of an authentication realm. // This parameter cannot be used with either `ids` or `name`, or when `owner` // flag is set to `true`. RealmName *string `json:"realm_name,omitempty"` // Username The username of a user. - // This parameter cannot be used with either `ids` or `name`, or when `owner` + // This parameter cannot be used with either `ids` or `name` or when `owner` // flag is set to `true`. Username *string `json:"username,omitempty"` } diff --git a/typedapi/security/invalidateapikey/response.go b/typedapi/security/invalidateapikey/response.go index 3438c0833a..2675355545 100644 --- a/typedapi/security/invalidateapikey/response.go +++ b/typedapi/security/invalidateapikey/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package invalidateapikey @@ -26,12 +26,18 @@ import ( // Response holds the response body struct for the package invalidateapikey // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/invalidate_api_key/SecurityInvalidateApiKeyResponse.ts#L23-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/invalidate_api_key/SecurityInvalidateApiKeyResponse.ts#L23-L46 type Response struct { - ErrorCount int `json:"error_count"` - ErrorDetails []types.ErrorCause `json:"error_details,omitempty"` - InvalidatedApiKeys []string `json:"invalidated_api_keys"` - PreviouslyInvalidatedApiKeys []string `json:"previously_invalidated_api_keys"` + + // ErrorCount The number of errors that were encountered when invalidating the API keys. + ErrorCount int `json:"error_count"` + // ErrorDetails Details about the errors. + // This field is not present in the response when `error_count` is `0`. + ErrorDetails []types.ErrorCause `json:"error_details,omitempty"` + // InvalidatedApiKeys The IDs of the API keys that were invalidated as part of this request. + InvalidatedApiKeys []string `json:"invalidated_api_keys"` + // PreviouslyInvalidatedApiKeys The IDs of the API keys that were already invalidated. + PreviouslyInvalidatedApiKeys []string `json:"previously_invalidated_api_keys"` } // NewResponse returns a Response diff --git a/typedapi/security/invalidatetoken/invalidate_token.go b/typedapi/security/invalidatetoken/invalidate_token.go index 21cde6b9dd..eeea571046 100644 --- a/typedapi/security/invalidatetoken/invalidate_token.go +++ b/typedapi/security/invalidatetoken/invalidate_token.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Invalidate a token. // @@ -30,6 +30,12 @@ // They can also be used exactly once. // If you want to invalidate one or more access or refresh tokens immediately, // use this invalidate token API. +// +// NOTE: While all parameters are optional, at least one of them is required. +// More specifically, either one of `token` or `refresh_token` parameters is +// required. +// If none of these two are specified, then `realm_name` and/or `username` need +// to be specified. package invalidatetoken import ( @@ -97,7 +103,13 @@ func NewInvalidateTokenFunc(tp elastictransport.Interface) NewInvalidateToken { // If you want to invalidate one or more access or refresh tokens immediately, // use this invalidate token API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-token.html +// NOTE: While all parameters are optional, at least one of them is required. +// More specifically, either one of `token` or `refresh_token` parameters is +// required. +// If none of these two are specified, then `realm_name` and/or `username` need +// to be specified. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-token func New(tp elastictransport.Interface) *InvalidateToken { r := &InvalidateToken{ transport: tp, @@ -105,8 +117,6 @@ func New(tp elastictransport.Interface) *InvalidateToken { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -360,31 +370,59 @@ func (r *InvalidateToken) Pretty(pretty bool) *InvalidateToken { return r } +// The name of an authentication realm. +// This parameter cannot be used with either `refresh_token` or `token`. // API name: realm_name func (r *InvalidateToken) RealmName(name string) *InvalidateToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RealmName = &name return r } +// A refresh token. +// This parameter cannot be used if any of `refresh_token`, `realm_name`, or +// `username` are used. // API name: refresh_token func (r *InvalidateToken) RefreshToken(refreshtoken string) *InvalidateToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RefreshToken = &refreshtoken return r } +// An access token. +// This parameter cannot be used if any of `refresh_token`, `realm_name`, or +// `username` are used. // API name: token func (r *InvalidateToken) Token(token string) *InvalidateToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Token = &token return r } +// The username of a user. +// This parameter cannot be used with either `refresh_token` or `token`. // API name: username func (r *InvalidateToken) Username(username string) *InvalidateToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Username = &username return r diff --git a/typedapi/security/invalidatetoken/request.go b/typedapi/security/invalidatetoken/request.go index 5e98f58bb8..1337d4b52e 100644 --- a/typedapi/security/invalidatetoken/request.go +++ b/typedapi/security/invalidatetoken/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package invalidatetoken @@ -31,12 +31,23 @@ import ( // Request holds the request body struct for the package invalidatetoken // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/invalidate_token/SecurityInvalidateTokenRequest.ts#L23-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/invalidate_token/SecurityInvalidateTokenRequest.ts#L23-L71 type Request struct { - RealmName *string `json:"realm_name,omitempty"` + + // RealmName The name of an authentication realm. + // This parameter cannot be used with either `refresh_token` or `token`. + RealmName *string `json:"realm_name,omitempty"` + // RefreshToken A refresh token. + // This parameter cannot be used if any of `refresh_token`, `realm_name`, or + // `username` are used. RefreshToken *string `json:"refresh_token,omitempty"` - Token *string `json:"token,omitempty"` - Username *string `json:"username,omitempty"` + // Token An access token. + // This parameter cannot be used if any of `refresh_token`, `realm_name`, or + // `username` are used. + Token *string `json:"token,omitempty"` + // Username The username of a user. + // This parameter cannot be used with either `refresh_token` or `token`. + Username *string `json:"username,omitempty"` } // NewRequest returns a Request diff --git a/typedapi/security/invalidatetoken/response.go b/typedapi/security/invalidatetoken/response.go index 5acf7f51f8..bf484e9eb3 100644 --- a/typedapi/security/invalidatetoken/response.go +++ b/typedapi/security/invalidatetoken/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package invalidatetoken @@ -26,12 +26,18 @@ import ( // Response holds the response body struct for the package invalidatetoken // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/invalidate_token/SecurityInvalidateTokenResponse.ts#L23-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/invalidate_token/SecurityInvalidateTokenResponse.ts#L23-L46 type Response struct { - ErrorCount int64 `json:"error_count"` - ErrorDetails []types.ErrorCause `json:"error_details,omitempty"` - InvalidatedTokens int64 `json:"invalidated_tokens"` - PreviouslyInvalidatedTokens int64 `json:"previously_invalidated_tokens"` + + // ErrorCount The number of errors that were encountered when invalidating the tokens. + ErrorCount int64 `json:"error_count"` + // ErrorDetails Details about the errors. + // This field is not present in the response when `error_count` is `0`. + ErrorDetails []types.ErrorCause `json:"error_details,omitempty"` + // InvalidatedTokens The number of the tokens that were invalidated as part of this request. + InvalidatedTokens int64 `json:"invalidated_tokens"` + // PreviouslyInvalidatedTokens The number of tokens that were already invalidated. + PreviouslyInvalidatedTokens int64 `json:"previously_invalidated_tokens"` } // NewResponse returns a Response diff --git a/typedapi/security/oidcauthenticate/oidc_authenticate.go b/typedapi/security/oidcauthenticate/oidc_authenticate.go index 0362cd56f2..244917d673 100644 --- a/typedapi/security/oidcauthenticate/oidc_authenticate.go +++ b/typedapi/security/oidcauthenticate/oidc_authenticate.go @@ -16,22 +16,35 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Exchanges an OpenID Connection authentication response message for an -// Elasticsearch access token and refresh token pair +// Authenticate OpenID Connect. +// +// Exchange an OpenID Connect authentication response message for an +// Elasticsearch internal access token and refresh token that can be +// subsequently used for authentication. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. package oidcauthenticate import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -46,6 +59,10 @@ type OidcAuthenticate struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -66,15 +83,26 @@ func NewOidcAuthenticateFunc(tp elastictransport.Interface) NewOidcAuthenticate } } -// Exchanges an OpenID Connection authentication response message for an -// Elasticsearch access token and refresh token pair +// Authenticate OpenID Connect. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-authenticate.html +// Exchange an OpenID Connect authentication response message for an +// Elasticsearch internal access token and refresh token that can be +// subsequently used for authentication. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-authenticate func New(tp elastictransport.Interface) *OidcAuthenticate { r := &OidcAuthenticate{ transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -86,6 +114,21 @@ func New(tp elastictransport.Interface) *OidcAuthenticate { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *OidcAuthenticate) Raw(raw io.Reader) *OidcAuthenticate { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *OidcAuthenticate) Request(req *Request) *OidcAuthenticate { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *OidcAuthenticate) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -95,6 +138,31 @@ func (r *OidcAuthenticate) HttpRequest(ctx context.Context) (*http.Request, erro var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for OidcAuthenticate: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -184,13 +252,7 @@ func (r OidcAuthenticate) Perform(providedCtx context.Context) (*http.Response, } // Do runs the request through the transport, handle the response and returns a oidcauthenticate.Response -func (r OidcAuthenticate) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r OidcAuthenticate) IsSuccess(providedCtx context.Context) (bool, error) { +func (r OidcAuthenticate) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -201,30 +263,46 @@ func (r OidcAuthenticate) IsSuccess(providedCtx context.Context) (bool, error) { ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the OidcAuthenticate query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err } - return false, nil + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the OidcAuthenticate headers map. @@ -233,3 +311,110 @@ func (r *OidcAuthenticate) Header(key, value string) *OidcAuthenticate { return r } + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *OidcAuthenticate) ErrorTrace(errortrace bool) *OidcAuthenticate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *OidcAuthenticate) FilterPath(filterpaths ...string) *OidcAuthenticate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *OidcAuthenticate) Human(human bool) *OidcAuthenticate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *OidcAuthenticate) Pretty(pretty bool) *OidcAuthenticate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Associate a client session with an ID token and mitigate replay attacks. +// This value needs to be the same as the one that was provided to the +// `/_security/oidc/prepare` API or the one that was generated by Elasticsearch +// and included in the response to that call. +// API name: nonce +func (r *OidcAuthenticate) Nonce(nonce string) *OidcAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Nonce = nonce + + return r +} + +// The name of the OpenID Connect realm. +// This property is useful in cases where multiple realms are defined. +// API name: realm +func (r *OidcAuthenticate) Realm(realm string) *OidcAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Realm = &realm + + return r +} + +// The URL to which the OpenID Connect Provider redirected the User Agent in +// response to an authentication request after a successful authentication. +// This URL must be provided as-is (URL encoded), taken from the body of the +// response or as the value of a location header in the response from the OpenID +// Connect Provider. +// API name: redirect_uri +func (r *OidcAuthenticate) RedirectUri(redirecturi string) *OidcAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RedirectUri = redirecturi + + return r +} + +// Maintain state between the authentication request and the response. +// This value needs to be the same as the one that was provided to the +// `/_security/oidc/prepare` API or the one that was generated by Elasticsearch +// and included in the response to that call. +// API name: state +func (r *OidcAuthenticate) State(state string) *OidcAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.State = state + + return r +} diff --git a/typedapi/security/oidcauthenticate/request.go b/typedapi/security/oidcauthenticate/request.go new file mode 100644 index 0000000000..a6962b6047 --- /dev/null +++ b/typedapi/security/oidcauthenticate/request.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package oidcauthenticate + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package oidcauthenticate +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/oidc_authenticate/Request.ts#L22-L61 +type Request struct { + + // Nonce Associate a client session with an ID token and mitigate replay attacks. + // This value needs to be the same as the one that was provided to the + // `/_security/oidc/prepare` API or the one that was generated by Elasticsearch + // and included in the response to that call. + Nonce string `json:"nonce"` + // Realm The name of the OpenID Connect realm. + // This property is useful in cases where multiple realms are defined. + Realm *string `json:"realm,omitempty"` + // RedirectUri The URL to which the OpenID Connect Provider redirected the User Agent in + // response to an authentication request after a successful authentication. + // This URL must be provided as-is (URL encoded), taken from the body of the + // response or as the value of a location header in the response from the OpenID + // Connect Provider. + RedirectUri string `json:"redirect_uri"` + // State Maintain state between the authentication request and the response. + // This value needs to be the same as the one that was provided to the + // `/_security/oidc/prepare` API or the one that was generated by Elasticsearch + // and included in the response to that call. + State string `json:"state"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Oidcauthenticate request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/security/oidcauthenticate/response.go b/typedapi/security/oidcauthenticate/response.go new file mode 100644 index 0000000000..f63621bfd0 --- /dev/null +++ b/typedapi/security/oidcauthenticate/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package oidcauthenticate + +// Response holds the response body struct for the package oidcauthenticate +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/oidc_authenticate/Response.ts#L22-L41 +type Response struct { + + // AccessToken The Elasticsearch access token. + AccessToken string `json:"access_token"` + // ExpiresIn The duration (in seconds) of the tokens. + ExpiresIn int `json:"expires_in"` + // RefreshToken The Elasticsearch refresh token. + RefreshToken string `json:"refresh_token"` + // Type The type of token. + Type string `json:"type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/security/oidclogout/oidc_logout.go b/typedapi/security/oidclogout/oidc_logout.go index d2cad85a41..101c154621 100644 --- a/typedapi/security/oidclogout/oidc_logout.go +++ b/typedapi/security/oidclogout/oidc_logout.go @@ -16,22 +16,39 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Invalidates a refresh token and access token that was generated from the -// OpenID Connect Authenticate API +// Logout of OpenID Connect. +// +// Invalidate an access token and a refresh token that were generated as a +// response to the `/_security/oidc/authenticate` API. +// +// If the OpenID Connect authentication realm in Elasticsearch is accordingly +// configured, the response to this call will contain a URI pointing to the end +// session endpoint of the OpenID Connect Provider in order to perform single +// logout. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. package oidclogout import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -46,6 +63,10 @@ type OidcLogout struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -66,15 +87,30 @@ func NewOidcLogoutFunc(tp elastictransport.Interface) NewOidcLogout { } } -// Invalidates a refresh token and access token that was generated from the -// OpenID Connect Authenticate API +// Logout of OpenID Connect. +// +// Invalidate an access token and a refresh token that were generated as a +// response to the `/_security/oidc/authenticate` API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-logout.html +// If the OpenID Connect authentication realm in Elasticsearch is accordingly +// configured, the response to this call will contain a URI pointing to the end +// session endpoint of the OpenID Connect Provider in order to perform single +// logout. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-logout func New(tp elastictransport.Interface) *OidcLogout { r := &OidcLogout{ transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -86,6 +122,21 @@ func New(tp elastictransport.Interface) *OidcLogout { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *OidcLogout) Raw(raw io.Reader) *OidcLogout { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *OidcLogout) Request(req *Request) *OidcLogout { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *OidcLogout) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -95,6 +146,31 @@ func (r *OidcLogout) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for OidcLogout: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -184,13 +260,7 @@ func (r OidcLogout) Perform(providedCtx context.Context) (*http.Response, error) } // Do runs the request through the transport, handle the response and returns a oidclogout.Response -func (r OidcLogout) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r OidcLogout) IsSuccess(providedCtx context.Context) (bool, error) { +func (r OidcLogout) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -201,30 +271,46 @@ func (r OidcLogout) IsSuccess(providedCtx context.Context) (bool, error) { ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the OidcLogout query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode } - return false, nil + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the OidcLogout headers map. @@ -233,3 +319,73 @@ func (r *OidcLogout) Header(key, value string) *OidcLogout { return r } + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *OidcLogout) ErrorTrace(errortrace bool) *OidcLogout { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *OidcLogout) FilterPath(filterpaths ...string) *OidcLogout { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *OidcLogout) Human(human bool) *OidcLogout { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *OidcLogout) Pretty(pretty bool) *OidcLogout { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The access token to be invalidated. +// API name: access_token +func (r *OidcLogout) AccessToken(accesstoken string) *OidcLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AccessToken = accesstoken + + return r +} + +// The refresh token to be invalidated. +// API name: refresh_token +func (r *OidcLogout) RefreshToken(refreshtoken string) *OidcLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RefreshToken = &refreshtoken + + return r +} diff --git a/typedapi/security/oidclogout/request.go b/typedapi/security/oidclogout/request.go new file mode 100644 index 0000000000..be3e50f71c --- /dev/null +++ b/typedapi/security/oidclogout/request.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package oidclogout + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package oidclogout +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/oidc_logout/Request.ts#L22-L52 +type Request struct { + + // AccessToken The access token to be invalidated. + AccessToken string `json:"access_token"` + // RefreshToken The refresh token to be invalidated. + RefreshToken *string `json:"refresh_token,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Oidclogout request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/security/oidclogout/response.go b/typedapi/security/oidclogout/response.go new file mode 100644 index 0000000000..e3dbe66951 --- /dev/null +++ b/typedapi/security/oidclogout/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package oidclogout + +// Response holds the response body struct for the package oidclogout +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/oidc_logout/Response.ts#L20-L27 +type Response struct { + + // Redirect A URI that points to the end session endpoint of the OpenID Connect Provider + // with all the parameters of the logout request as HTTP GET parameters. + Redirect string `json:"redirect"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/security/oidcprepareauthentication/oidc_prepare_authentication.go b/typedapi/security/oidcprepareauthentication/oidc_prepare_authentication.go index 74da18bc23..3f5f93aeec 100644 --- a/typedapi/security/oidcprepareauthentication/oidc_prepare_authentication.go +++ b/typedapi/security/oidcprepareauthentication/oidc_prepare_authentication.go @@ -16,21 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Creates an OAuth 2.0 authentication request as a URL string +// Prepare OpenID connect authentication. +// +// Create an oAuth 2.0 authentication request as a URL string based on the +// configuration of the OpenID Connect authentication realm in Elasticsearch. +// +// The response of this API is a URL pointing to the Authorization Endpoint of +// the configured OpenID Connect Provider, which can be used to redirect the +// browser of the user in order to continue the authentication process. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. package oidcprepareauthentication import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -45,6 +62,10 @@ type OidcPrepareAuthentication struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -65,14 +86,29 @@ func NewOidcPrepareAuthenticationFunc(tp elastictransport.Interface) NewOidcPrep } } -// Creates an OAuth 2.0 authentication request as a URL string +// Prepare OpenID connect authentication. +// +// Create an oAuth 2.0 authentication request as a URL string based on the +// configuration of the OpenID Connect authentication realm in Elasticsearch. +// +// The response of this API is a URL pointing to the Authorization Endpoint of +// the configured OpenID Connect Provider, which can be used to redirect the +// browser of the user in order to continue the authentication process. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-prepare-authentication.html +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-prepare-authentication func New(tp elastictransport.Interface) *OidcPrepareAuthentication { r := &OidcPrepareAuthentication{ transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -84,6 +120,21 @@ func New(tp elastictransport.Interface) *OidcPrepareAuthentication { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *OidcPrepareAuthentication) Raw(raw io.Reader) *OidcPrepareAuthentication { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *OidcPrepareAuthentication) Request(req *Request) *OidcPrepareAuthentication { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *OidcPrepareAuthentication) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -93,6 +144,31 @@ func (r *OidcPrepareAuthentication) HttpRequest(ctx context.Context) (*http.Requ var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for OidcPrepareAuthentication: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -182,13 +258,7 @@ func (r OidcPrepareAuthentication) Perform(providedCtx context.Context) (*http.R } // Do runs the request through the transport, handle the response and returns a oidcprepareauthentication.Response -func (r OidcPrepareAuthentication) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r OidcPrepareAuthentication) IsSuccess(providedCtx context.Context) (bool, error) { +func (r OidcPrepareAuthentication) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -199,30 +269,46 @@ func (r OidcPrepareAuthentication) IsSuccess(providedCtx context.Context) (bool, ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the OidcPrepareAuthentication query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err } - return false, nil + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the OidcPrepareAuthentication headers map. @@ -231,3 +317,126 @@ func (r *OidcPrepareAuthentication) Header(key, value string) *OidcPrepareAuthen return r } + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *OidcPrepareAuthentication) ErrorTrace(errortrace bool) *OidcPrepareAuthentication { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *OidcPrepareAuthentication) FilterPath(filterpaths ...string) *OidcPrepareAuthentication { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *OidcPrepareAuthentication) Human(human bool) *OidcPrepareAuthentication { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *OidcPrepareAuthentication) Pretty(pretty bool) *OidcPrepareAuthentication { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// In the case of a third party initiated single sign on, this is the issuer +// identifier for the OP that the RP is to send the authentication request to. +// It cannot be specified when *realm* is specified. +// One of *realm* or *iss* is required. +// API name: iss +func (r *OidcPrepareAuthentication) Iss(iss string) *OidcPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Iss = &iss + + return r +} + +// In the case of a third party initiated single sign on, it is a string value +// that is included in the authentication request as the *login_hint* parameter. +// This parameter is not valid when *realm* is specified. +// API name: login_hint +func (r *OidcPrepareAuthentication) LoginHint(loginhint string) *OidcPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LoginHint = &loginhint + + return r +} + +// The value used to associate a client session with an ID token and to mitigate +// replay attacks. +// If the caller of the API does not provide a value, Elasticsearch will +// generate one with sufficient entropy and return it in the response. +// API name: nonce +func (r *OidcPrepareAuthentication) Nonce(nonce string) *OidcPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Nonce = &nonce + + return r +} + +// The name of the OpenID Connect realm in Elasticsearch the configuration of +// which should be used in order to generate the authentication request. +// It cannot be specified when *iss* is specified. +// One of *realm* or *iss* is required. +// API name: realm +func (r *OidcPrepareAuthentication) Realm(realm string) *OidcPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Realm = &realm + + return r +} + +// The value used to maintain state between the authentication request and the +// response, typically used as a Cross-Site Request Forgery mitigation. +// If the caller of the API does not provide a value, Elasticsearch will +// generate one with sufficient entropy and return it in the response. +// API name: state +func (r *OidcPrepareAuthentication) State(state string) *OidcPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.State = &state + + return r +} diff --git a/typedapi/security/oidcprepareauthentication/request.go b/typedapi/security/oidcprepareauthentication/request.go new file mode 100644 index 0000000000..a1a679fbc3 --- /dev/null +++ b/typedapi/security/oidcprepareauthentication/request.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package oidcprepareauthentication + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package oidcprepareauthentication +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/oidc_prepare_authentication/Request.ts#L22-L71 +type Request struct { + + // Iss In the case of a third party initiated single sign on, this is the issuer + // identifier for the OP that the RP is to send the authentication request to. + // It cannot be specified when *realm* is specified. + // One of *realm* or *iss* is required. + Iss *string `json:"iss,omitempty"` + // LoginHint In the case of a third party initiated single sign on, it is a string value + // that is included in the authentication request as the *login_hint* parameter. + // This parameter is not valid when *realm* is specified. + LoginHint *string `json:"login_hint,omitempty"` + // Nonce The value used to associate a client session with an ID token and to mitigate + // replay attacks. + // If the caller of the API does not provide a value, Elasticsearch will + // generate one with sufficient entropy and return it in the response. + Nonce *string `json:"nonce,omitempty"` + // Realm The name of the OpenID Connect realm in Elasticsearch the configuration of + // which should be used in order to generate the authentication request. + // It cannot be specified when *iss* is specified. + // One of *realm* or *iss* is required. + Realm *string `json:"realm,omitempty"` + // State The value used to maintain state between the authentication request and the + // response, typically used as a Cross-Site Request Forgery mitigation. + // If the caller of the API does not provide a value, Elasticsearch will + // generate one with sufficient entropy and return it in the response. + State *string `json:"state,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Oidcprepareauthentication request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/security/oidcprepareauthentication/response.go b/typedapi/security/oidcprepareauthentication/response.go new file mode 100644 index 0000000000..bf07cbd8d1 --- /dev/null +++ b/typedapi/security/oidcprepareauthentication/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package oidcprepareauthentication + +// Response holds the response body struct for the package oidcprepareauthentication +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/oidc_prepare_authentication/Response.ts#L20-L30 +type Response struct { + Nonce string `json:"nonce"` + Realm string `json:"realm"` + // Redirect A URI that points to the authorization endpoint of the OpenID Connect + // Provider with all the parameters of the authentication request as HTTP GET + // parameters. + Redirect string `json:"redirect"` + State string `json:"state"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/security/putprivileges/put_privileges.go b/typedapi/security/putprivileges/put_privileges.go index 4ecb8df9fe..0a86edfa1a 100644 --- a/typedapi/security/putprivileges/put_privileges.go +++ b/typedapi/security/putprivileges/put_privileges.go @@ -16,9 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create or update application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_security` cluster privilege (or a greater privilege such as +// `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. +// +// Application names are formed from a prefix, with an optional suffix that +// conform to the following rules: +// +// * The prefix must begin with a lowercase ASCII letter. +// * The prefix must contain only ASCII letters or digits. +// * The prefix must be at least 3 characters long. +// * If the suffix exists, it must begin with either a dash `-` or `_`. +// * The suffix cannot contain any of the following characters: `\`, `/`, `*`, +// `?`, `"`, `<`, `>`, `|`, `,`, `*`. +// * No part of the name can contain whitespace. +// +// Privilege names must begin with a lowercase ASCII letter and must contain +// only ASCII letters and digits along with the characters `_`, `-`, and `.`. +// +// Action names can contain any number of printable ASCII characters and must +// contain at least one of the following characters: `/`, `*`, `:`. package putprivileges import ( @@ -76,7 +100,31 @@ func NewPutPrivilegesFunc(tp elastictransport.Interface) NewPutPrivileges { // Create or update application privileges. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-privileges.html +// To use this API, you must have one of the following privileges: +// +// * The `manage_security` cluster privilege (or a greater privilege such as +// `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. +// +// Application names are formed from a prefix, with an optional suffix that +// conform to the following rules: +// +// * The prefix must begin with a lowercase ASCII letter. +// * The prefix must contain only ASCII letters or digits. +// * The prefix must be at least 3 characters long. +// * If the suffix exists, it must begin with either a dash `-` or `_`. +// * The suffix cannot contain any of the following characters: `\`, `/`, `*`, +// `?`, `"`, `<`, `>`, `|`, `,`, `*`. +// * No part of the name can contain whitespace. +// +// Privilege names must begin with a lowercase ASCII letter and must contain +// only ASCII letters and digits along with the characters `_`, `-`, and `.`. +// +// Action names can contain any number of printable ASCII characters and must +// contain at least one of the following characters: `/`, `*`, `:`. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-privileges func New(tp elastictransport.Interface) *PutPrivileges { r := &PutPrivileges{ transport: tp, @@ -84,8 +132,6 @@ func New(tp elastictransport.Interface) *PutPrivileges { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { diff --git a/typedapi/security/putprivileges/request.go b/typedapi/security/putprivileges/request.go index 7ee4caa487..568482a9ef 100644 --- a/typedapi/security/putprivileges/request.go +++ b/typedapi/security/putprivileges/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putprivileges @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package putprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/put_privileges/SecurityPutPrivilegesRequest.ts#L25-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/put_privileges/SecurityPutPrivilegesRequest.ts#L25-L67 type Request = map[string]map[string]types.PrivilegesActions // NewRequest returns a Request diff --git a/typedapi/security/putprivileges/response.go b/typedapi/security/putprivileges/response.go index f3e56f8d4f..226f10519f 100644 --- a/typedapi/security/putprivileges/response.go +++ b/typedapi/security/putprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putprivileges @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/put_privileges/SecurityPutPrivilegesResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/put_privileges/SecurityPutPrivilegesResponse.ts#L23-L28 type Response map[string]map[string]types.CreatedStatus diff --git a/typedapi/security/putrole/put_role.go b/typedapi/security/putrole/put_role.go index 7ae6b29afc..74fbb2b14f 100644 --- a/typedapi/security/putrole/put_role.go +++ b/typedapi/security/putrole/put_role.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create or update roles. // @@ -97,7 +97,7 @@ func NewPutRoleFunc(tp elastictransport.Interface) NewPutRole { // files. // File-based role management is not available in Elastic Serverless. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role func New(tp elastictransport.Interface) *PutRole { r := &PutRole{ transport: tp, @@ -105,8 +105,6 @@ func New(tp elastictransport.Interface) *PutRole { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -386,79 +384,164 @@ func (r *PutRole) Pretty(pretty bool) *PutRole { return r } -// Applications A list of application privilege entries. +// A list of application privilege entries. // API name: applications -func (r *PutRole) Applications(applications ...types.ApplicationPrivileges) *PutRole { - r.req.Applications = applications +func (r *PutRole) Applications(applications ...types.ApplicationPrivilegesVariant) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range applications { + + r.req.Applications = append(r.req.Applications, *v.ApplicationPrivilegesCaster()) + } return r } -// Cluster A list of cluster privileges. These privileges define the cluster-level +// A list of cluster privileges. These privileges define the cluster-level // actions for users with this role. // API name: cluster func (r *PutRole) Cluster(clusters ...clusterprivilege.ClusterPrivilege) *PutRole { - r.req.Cluster = clusters + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range clusters { + + r.req.Cluster = append(r.req.Cluster, v) + } return r } -// Description Optional description of the role descriptor +// Optional description of the role descriptor // API name: description func (r *PutRole) Description(description string) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Global An object defining global privileges. A global privilege is a form of cluster +// An object defining global privileges. A global privilege is a form of cluster // privilege that is request-aware. Support for global privileges is currently // limited to the management of application privileges. // API name: global func (r *PutRole) Global(global map[string]json.RawMessage) *PutRole { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Global = global + return r +} +func (r *PutRole) AddGlobal(key string, value json.RawMessage) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Global == nil { + r.req.Global = make(map[string]json.RawMessage) + } else { + tmp = r.req.Global + } + + tmp[key] = value + + r.req.Global = tmp return r } -// Indices A list of indices permissions entries. +// A list of indices permissions entries. // API name: indices -func (r *PutRole) Indices(indices ...types.IndicesPrivileges) *PutRole { - r.req.Indices = indices +func (r *PutRole) Indices(indices ...types.IndicesPrivilegesVariant) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range indices { + + r.req.Indices = append(r.req.Indices, *v.IndicesPrivilegesCaster()) + } return r } -// Metadata Optional metadata. Within the metadata object, keys that begin with an +// Optional metadata. Within the metadata object, keys that begin with an // underscore (`_`) are reserved for system use. // API name: metadata -func (r *PutRole) Metadata(metadata types.Metadata) *PutRole { - r.req.Metadata = metadata +func (r *PutRole) Metadata(metadata types.MetadataVariant) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() return r } -// RemoteIndices A list of remote indices permissions entries. +// A list of remote cluster permissions entries. +// API name: remote_cluster +func (r *PutRole) RemoteCluster(remoteclusters ...types.RemoteClusterPrivilegesVariant) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range remoteclusters { + + r.req.RemoteCluster = append(r.req.RemoteCluster, *v.RemoteClusterPrivilegesCaster()) + + } + return r +} + +// A list of remote indices permissions entries. +// +// NOTE: Remote indices are effective for remote clusters configured with the +// API key based model. +// They have no effect for remote clusters configured with the certificate based +// model. // API name: remote_indices -func (r *PutRole) RemoteIndices(remoteindices ...types.RemoteIndicesPrivileges) *PutRole { - r.req.RemoteIndices = remoteindices +func (r *PutRole) RemoteIndices(remoteindices ...types.RemoteIndicesPrivilegesVariant) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range remoteindices { + + r.req.RemoteIndices = append(r.req.RemoteIndices, *v.RemoteIndicesPrivilegesCaster()) + } return r } -// RunAs A list of users that the owners of this role can impersonate. *Note*: in +// A list of users that the owners of this role can impersonate. *Note*: in // Serverless, the run-as feature is disabled. For API compatibility, you can // still specify an empty `run_as` field, but a non-empty list will be rejected. // API name: run_as func (r *PutRole) RunAs(runas ...string) *PutRole { - r.req.RunAs = runas + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range runas { + + r.req.RunAs = append(r.req.RunAs, v) + } return r } -// TransientMetadata Indicates roles that might be incompatible with the current cluster license, +// Indicates roles that might be incompatible with the current cluster license, // specifically roles with document and field level security. When the cluster // license doesn’t allow certain features for a given role, this parameter is // updated dynamically to list the incompatible features. If `enabled` is @@ -466,8 +549,29 @@ func (r *PutRole) RunAs(runas ...string) *PutRole { // authenticate API. // API name: transient_metadata func (r *PutRole) TransientMetadata(transientmetadata map[string]json.RawMessage) *PutRole { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.TransientMetadata = transientmetadata + return r +} + +func (r *PutRole) AddTransientMetadatum(key string, value json.RawMessage) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.TransientMetadata == nil { + r.req.TransientMetadata = make(map[string]json.RawMessage) + } else { + tmp = r.req.TransientMetadata + } + + tmp[key] = value + r.req.TransientMetadata = tmp return r } diff --git a/typedapi/security/putrole/request.go b/typedapi/security/putrole/request.go index a799aca27e..2c0f984968 100644 --- a/typedapi/security/putrole/request.go +++ b/typedapi/security/putrole/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putrole @@ -34,7 +34,7 @@ import ( // Request holds the request body struct for the package putrole // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/put_role/SecurityPutRoleRequest.ts#L31-L95 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/put_role/SecurityPutRoleRequest.ts#L32-L111 type Request struct { // Applications A list of application privilege entries. @@ -53,7 +53,14 @@ type Request struct { // Metadata Optional metadata. Within the metadata object, keys that begin with an // underscore (`_`) are reserved for system use. Metadata types.Metadata `json:"metadata,omitempty"` + // RemoteCluster A list of remote cluster permissions entries. + RemoteCluster []types.RemoteClusterPrivileges `json:"remote_cluster,omitempty"` // RemoteIndices A list of remote indices permissions entries. + // + // NOTE: Remote indices are effective for remote clusters configured with the + // API key based model. + // They have no effect for remote clusters configured with the certificate based + // model. RemoteIndices []types.RemoteIndicesPrivileges `json:"remote_indices,omitempty"` // RunAs A list of users that the owners of this role can impersonate. *Note*: in // Serverless, the run-as feature is disabled. For API compatibility, you can @@ -144,6 +151,11 @@ func (s *Request) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Metadata", err) } + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + case "remote_indices": if err := dec.Decode(&s.RemoteIndices); err != nil { return fmt.Errorf("%s | %w", "RemoteIndices", err) diff --git a/typedapi/security/putrole/response.go b/typedapi/security/putrole/response.go index defbda69a9..6f53a788dd 100644 --- a/typedapi/security/putrole/response.go +++ b/typedapi/security/putrole/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putrole @@ -26,8 +26,10 @@ import ( // Response holds the response body struct for the package putrole // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/put_role/SecurityPutRoleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/put_role/SecurityPutRoleResponse.ts#L22-L29 type Response struct { + + // Role When an existing role is updated, `created` is set to `false`. Role types.CreatedStatus `json:"role"` } diff --git a/typedapi/security/putrolemapping/put_role_mapping.go b/typedapi/security/putrolemapping/put_role_mapping.go index 69e4387bdc..76c40062c8 100644 --- a/typedapi/security/putrolemapping/put_role_mapping.go +++ b/typedapi/security/putrolemapping/put_role_mapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create or update role mappings. // @@ -27,8 +27,36 @@ // rather than using role mapping files. The create or update role mappings API // cannot update role mappings that are defined in role mapping files. // -// This API does not create roles. Rather, it maps users to existing roles. +// NOTE: This API does not create roles. Rather, it maps users to existing +// roles. // Roles can be created by using the create or update roles API or roles files. +// +// **Role templates** +// +// The most common use for role mappings is to create a mapping from a known +// value on the user to a fixed role name. +// For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should +// be given the superuser role in Elasticsearch. +// The `roles` field is used for this purpose. +// +// For more complex needs, it is possible to use Mustache templates to +// dynamically determine the names of the roles that should be granted to the +// user. +// The `role_templates` field is used for this purpose. +// +// NOTE: To use role templates successfully, the relevant scripting feature must +// be enabled. +// Otherwise, all attempts to create a role mapping with role templates fail. +// +// All of the user fields that are available in the role mapping rules are also +// available in the role templates. +// Thus it is possible to assign a user to a role that reflects their username, +// their groups, or the name of the realm to which they authenticated. +// +// By default a template is evaluated to produce a single string that is the +// name of the role which should be assigned to the user. +// If the format of the template is set to "json" then the template is expected +// to produce a JSON string or an array of JSON strings for the role names. package putrolemapping import ( @@ -101,10 +129,38 @@ func NewPutRoleMappingFunc(tp elastictransport.Interface) NewPutRoleMapping { // rather than using role mapping files. The create or update role mappings API // cannot update role mappings that are defined in role mapping files. // -// This API does not create roles. Rather, it maps users to existing roles. +// NOTE: This API does not create roles. Rather, it maps users to existing +// roles. // Roles can be created by using the create or update roles API or roles files. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role-mapping.html +// **Role templates** +// +// The most common use for role mappings is to create a mapping from a known +// value on the user to a fixed role name. +// For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should +// be given the superuser role in Elasticsearch. +// The `roles` field is used for this purpose. +// +// For more complex needs, it is possible to use Mustache templates to +// dynamically determine the names of the roles that should be granted to the +// user. +// The `role_templates` field is used for this purpose. +// +// NOTE: To use role templates successfully, the relevant scripting feature must +// be enabled. +// Otherwise, all attempts to create a role mapping with role templates fail. +// +// All of the user fields that are available in the role mapping rules are also +// available in the role templates. +// Thus it is possible to assign a user to a role that reflects their username, +// their groups, or the name of the realm to which they authenticated. +// +// By default a template is evaluated to produce a single string that is the +// name of the role which should be assigned to the user. +// If the format of the template is set to "json" then the template is expected +// to produce a JSON string or an array of JSON strings for the role names. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping func New(tp elastictransport.Interface) *PutRoleMapping { r := &PutRoleMapping{ transport: tp, @@ -112,8 +168,6 @@ func New(tp elastictransport.Interface) *PutRoleMapping { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -327,7 +381,9 @@ func (r *PutRoleMapping) Header(key, value string) *PutRoleMapping { return r } -// Name Role-mapping name +// Name The distinct name that identifies the role mapping. +// The name is used solely as an identifier to facilitate interaction via the +// API; it does not affect the behavior of the mapping in any way. // API Name: name func (r *PutRoleMapping) _name(name string) *PutRoleMapping { r.paramSet |= nameMask @@ -390,45 +446,93 @@ func (r *PutRoleMapping) Pretty(pretty bool) *PutRoleMapping { return r } +// Mappings that have `enabled` set to `false` are ignored when role mapping is +// performed. // API name: enabled func (r *PutRoleMapping) Enabled(enabled bool) *PutRoleMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Enabled = &enabled return r } +// Additional metadata that helps define which roles are assigned to each user. +// Within the metadata object, keys beginning with `_` are reserved for system +// usage. // API name: metadata -func (r *PutRoleMapping) Metadata(metadata types.Metadata) *PutRoleMapping { - r.req.Metadata = metadata +func (r *PutRoleMapping) Metadata(metadata types.MetadataVariant) *PutRoleMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() return r } +// A list of Mustache templates that will be evaluated to determine the roles +// names that should granted to the users that match the role mapping rules. +// Exactly one of `roles` or `role_templates` must be specified. // API name: role_templates -func (r *PutRoleMapping) RoleTemplates(roletemplates ...types.RoleTemplate) *PutRoleMapping { - r.req.RoleTemplates = roletemplates +func (r *PutRoleMapping) RoleTemplates(roletemplates ...types.RoleTemplateVariant) *PutRoleMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range roletemplates { + r.req.RoleTemplates = append(r.req.RoleTemplates, *v.RoleTemplateCaster()) + + } return r } +// A list of role names that are granted to the users that match the role +// mapping rules. +// Exactly one of `roles` or `role_templates` must be specified. // API name: roles func (r *PutRoleMapping) Roles(roles ...string) *PutRoleMapping { - r.req.Roles = roles + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range roles { + r.req.Roles = append(r.req.Roles, v) + + } return r } +// The rules that determine which users should be matched by the mapping. +// A rule is a logical condition that is expressed by using a JSON DSL. // API name: rules -func (r *PutRoleMapping) Rules(rules *types.RoleMappingRule) *PutRoleMapping { +func (r *PutRoleMapping) Rules(rules types.RoleMappingRuleVariant) *PutRoleMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Rules = rules + r.req.Rules = rules.RoleMappingRuleCaster() return r } // API name: run_as func (r *PutRoleMapping) RunAs(runas ...string) *PutRoleMapping { - r.req.RunAs = runas + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range runas { + + r.req.RunAs = append(r.req.RunAs, v) + } return r } diff --git a/typedapi/security/putrolemapping/request.go b/typedapi/security/putrolemapping/request.go index 7e7b11101c..5512c01773 100644 --- a/typedapi/security/putrolemapping/request.go +++ b/typedapi/security/putrolemapping/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putrolemapping @@ -33,14 +33,28 @@ import ( // Request holds the request body struct for the package putrolemapping // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/put_role_mapping/SecurityPutRoleMappingRequest.ts#L25-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/put_role_mapping/SecurityPutRoleMappingRequest.ts#L25-L103 type Request struct { - Enabled *bool `json:"enabled,omitempty"` - Metadata types.Metadata `json:"metadata,omitempty"` - RoleTemplates []types.RoleTemplate `json:"role_templates,omitempty"` - Roles []string `json:"roles,omitempty"` - Rules *types.RoleMappingRule `json:"rules,omitempty"` - RunAs []string `json:"run_as,omitempty"` + + // Enabled Mappings that have `enabled` set to `false` are ignored when role mapping is + // performed. + Enabled *bool `json:"enabled,omitempty"` + // Metadata Additional metadata that helps define which roles are assigned to each user. + // Within the metadata object, keys beginning with `_` are reserved for system + // usage. + Metadata types.Metadata `json:"metadata,omitempty"` + // RoleTemplates A list of Mustache templates that will be evaluated to determine the roles + // names that should granted to the users that match the role mapping rules. + // Exactly one of `roles` or `role_templates` must be specified. + RoleTemplates []types.RoleTemplate `json:"role_templates,omitempty"` + // Roles A list of role names that are granted to the users that match the role + // mapping rules. + // Exactly one of `roles` or `role_templates` must be specified. + Roles []string `json:"roles,omitempty"` + // Rules The rules that determine which users should be matched by the mapping. + // A rule is a logical condition that is expressed by using a JSON DSL. + Rules *types.RoleMappingRule `json:"rules,omitempty"` + RunAs []string `json:"run_as,omitempty"` } // NewRequest returns a Request diff --git a/typedapi/security/putrolemapping/response.go b/typedapi/security/putrolemapping/response.go index 5b98cc58be..f73ed65af0 100644 --- a/typedapi/security/putrolemapping/response.go +++ b/typedapi/security/putrolemapping/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putrolemapping @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putrolemapping // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/put_role_mapping/SecurityPutRoleMappingResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/put_role_mapping/SecurityPutRoleMappingResponse.ts#L22-L24 type Response struct { Created *bool `json:"created,omitempty"` RoleMapping types.CreatedStatus `json:"role_mapping"` diff --git a/typedapi/security/putuser/put_user.go b/typedapi/security/putuser/put_user.go index 561b3f880a..0337baeb2e 100644 --- a/typedapi/security/putuser/put_user.go +++ b/typedapi/security/putuser/put_user.go @@ -16,13 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create or update users. // +// Add and update users in the native realm. // A password is required for adding a new user but is optional when updating an // existing user. -// To change a user’s password without updating any other fields, use the change +// To change a user's password without updating any other fields, use the change // password API. package putuser @@ -89,12 +90,13 @@ func NewPutUserFunc(tp elastictransport.Interface) NewPutUser { // Create or update users. // +// Add and update users in the native realm. // A password is required for adding a new user but is optional when updating an // existing user. -// To change a user’s password without updating any other fields, use the change +// To change a user's password without updating any other fields, use the change // password API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user func New(tp elastictransport.Interface) *PutUser { r := &PutUser{ transport: tp, @@ -102,8 +104,6 @@ func New(tp elastictransport.Interface) *PutUser { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -317,7 +317,12 @@ func (r *PutUser) Header(key, value string) *PutUser { return r } -// Username The username of the User +// Username An identifier for the user. +// +// NOTE: Usernames must be at least 1 and no more than 507 characters. +// They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, +// punctuation, and printable symbols in the Basic Latin (ASCII) block. +// Leading or trailing whitespace is not allowed. // API Name: username func (r *PutUser) _username(username string) *PutUser { r.paramSet |= usernameMask @@ -326,9 +331,9 @@ func (r *PutUser) _username(username string) *PutUser { return r } -// Refresh If `true` (the default) then refresh the affected shards to make this -// operation visible to search, if `wait_for` then wait for a refresh to make -// this operation visible to search, if `false` then do nothing with refreshes. +// Refresh Valid values are `true`, `false`, and `wait_for`. +// These values have the same meaning as in the index API, but the default value +// for this API is true. // API name: refresh func (r *PutUser) Refresh(refresh refresh.Refresh) *PutUser { r.values.Set("refresh", refresh.String()) @@ -380,52 +385,111 @@ func (r *PutUser) Pretty(pretty bool) *PutUser { return r } +// The email of the user. // API name: email func (r *PutUser) Email(email string) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Email = &email return r } +// Specifies whether the user is enabled. // API name: enabled func (r *PutUser) Enabled(enabled bool) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Enabled = &enabled return r } +// The full name of the user. // API name: full_name func (r *PutUser) FullName(fullname string) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FullName = &fullname return r } +// Arbitrary metadata that you want to associate with the user. // API name: metadata -func (r *PutUser) Metadata(metadata types.Metadata) *PutUser { - r.req.Metadata = metadata +func (r *PutUser) Metadata(metadata types.MetadataVariant) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() return r } +// The user's password. +// Passwords must be at least 6 characters long. +// When adding a user, one of `password` or `password_hash` is required. +// When updating an existing user, the password is optional, so that other +// fields on the user (such as their roles) may be updated without modifying the +// user's password // API name: password func (r *PutUser) Password(password string) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Password = &password return r } +// A hash of the user's password. +// This must be produced using the same hashing algorithm as has been configured +// for password storage. +// For more details, see the explanation of the +// `xpack.security.authc.password_hashing.algorithm` setting in the user cache +// and password hash algorithm documentation. +// Using this parameter allows the client to pre-hash the password for +// performance and/or confidentiality reasons. +// The `password` parameter and the `password_hash` parameter cannot be used in +// the same request. // API name: password_hash func (r *PutUser) PasswordHash(passwordhash string) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.PasswordHash = &passwordhash return r } +// A set of roles the user has. +// The roles determine the user's access permissions. +// To create a user without any roles, specify an empty list (`[]`). // API name: roles func (r *PutUser) Roles(roles ...string) *PutUser { - r.req.Roles = roles + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range roles { + r.req.Roles = append(r.req.Roles, v) + + } return r } diff --git a/typedapi/security/putuser/request.go b/typedapi/security/putuser/request.go index d766fd1208..322dcf6410 100644 --- a/typedapi/security/putuser/request.go +++ b/typedapi/security/putuser/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putuser @@ -33,16 +33,40 @@ import ( // Request holds the request body struct for the package putuser // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/put_user/SecurityPutUserRequest.ts#L23-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/put_user/SecurityPutUserRequest.ts#L23-L101 type Request struct { - Email *string `json:"email,omitempty"` - Enabled *bool `json:"enabled,omitempty"` - FullName *string `json:"full_name,omitempty"` - Metadata types.Metadata `json:"metadata,omitempty"` - Password *string `json:"password,omitempty"` - PasswordHash *string `json:"password_hash,omitempty"` - Roles []string `json:"roles,omitempty"` - Username *string `json:"username,omitempty"` + + // Email The email of the user. + Email *string `json:"email,omitempty"` + // Enabled Specifies whether the user is enabled. + Enabled *bool `json:"enabled,omitempty"` + // FullName The full name of the user. + FullName *string `json:"full_name,omitempty"` + // Metadata Arbitrary metadata that you want to associate with the user. + Metadata types.Metadata `json:"metadata,omitempty"` + // Password The user's password. + // Passwords must be at least 6 characters long. + // When adding a user, one of `password` or `password_hash` is required. + // When updating an existing user, the password is optional, so that other + // fields on the user (such as their roles) may be updated without modifying the + // user's password + Password *string `json:"password,omitempty"` + // PasswordHash A hash of the user's password. + // This must be produced using the same hashing algorithm as has been configured + // for password storage. + // For more details, see the explanation of the + // `xpack.security.authc.password_hashing.algorithm` setting in the user cache + // and password hash algorithm documentation. + // Using this parameter allows the client to pre-hash the password for + // performance and/or confidentiality reasons. + // The `password` parameter and the `password_hash` parameter cannot be used in + // the same request. + PasswordHash *string `json:"password_hash,omitempty"` + // Roles A set of roles the user has. + // The roles determine the user's access permissions. + // To create a user without any roles, specify an empty list (`[]`). + Roles []string `json:"roles,omitempty"` + Username *string `json:"username,omitempty"` } // NewRequest returns a Request diff --git a/typedapi/security/putuser/response.go b/typedapi/security/putuser/response.go index 7bd3199a1d..9d318aa0e0 100644 --- a/typedapi/security/putuser/response.go +++ b/typedapi/security/putuser/response.go @@ -16,14 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putuser // Response holds the response body struct for the package putuser // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/put_user/SecurityPutUserResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/put_user/SecurityPutUserResponse.ts#L20-L28 type Response struct { + + // Created A successful call returns a JSON structure that shows whether the user has + // been created or updated. + // When an existing user is updated, `created` is set to `false`. Created bool `json:"created"` } diff --git a/typedapi/security/queryapikeys/query_api_keys.go b/typedapi/security/queryapikeys/query_api_keys.go index 67c4b62f66..84d65188d6 100644 --- a/typedapi/security/queryapikeys/query_api_keys.go +++ b/typedapi/security/queryapikeys/query_api_keys.go @@ -16,12 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Find API keys with a query. // -// Get a paginated list of API keys and their information. You can optionally -// filter the results with a query. +// Get a paginated list of API keys and their information. +// You can optionally filter the results with a query. +// +// To use this API, you must have at least the `manage_own_api_key` or the +// `read_security` cluster privileges. +// If you have only the `manage_own_api_key` privilege, this API returns only +// the API keys that you own. +// If you have the `read_security`, `manage_api_key`, or greater privileges +// (including `manage_security`), this API returns all API keys regardless of +// ownership. package queryapikeys import ( @@ -78,10 +86,18 @@ func NewQueryApiKeysFunc(tp elastictransport.Interface) NewQueryApiKeys { // Find API keys with a query. // -// Get a paginated list of API keys and their information. You can optionally -// filter the results with a query. +// Get a paginated list of API keys and their information. +// You can optionally filter the results with a query. +// +// To use this API, you must have at least the `manage_own_api_key` or the +// `read_security` cluster privileges. +// If you have only the `manage_own_api_key` privilege, this API returns only +// the API keys that you own. +// If you have the `read_security`, `manage_api_key`, or greater privileges +// (including `manage_security`), this API returns all API keys regardless of +// ownership. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-api-key.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys func New(tp elastictransport.Interface) *QueryApiKeys { r := &QueryApiKeys{ transport: tp, @@ -89,8 +105,6 @@ func New(tp elastictransport.Interface) *QueryApiKeys { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -305,7 +319,10 @@ func (r *QueryApiKeys) Header(key, value string) *QueryApiKeys { // WithLimitedBy Return the snapshot of the owner user's role descriptors associated with the // API key. // An API key's actual permission is the intersection of its assigned role -// descriptors and the owner user's role descriptors. +// descriptors and the owner user's role descriptors (effectively limited by +// it). +// An API key cannot retrieve any API key’s limited-by role descriptors +// (including itself) unless it has `manage_api_key` or higher privileges. // API name: with_limited_by func (r *QueryApiKeys) WithLimitedBy(withlimitedby bool) *QueryApiKeys { r.values.Set("with_limited_by", strconv.FormatBool(withlimitedby)) @@ -313,8 +330,10 @@ func (r *QueryApiKeys) WithLimitedBy(withlimitedby bool) *QueryApiKeys { return r } -// WithProfileUid Determines whether to also retrieve the profile uid, for the API key owner -// principal, if it exists. +// WithProfileUid Determines whether to also retrieve the profile UID for the API key owner +// principal. +// If it exists, the profile UID is returned under the `profile_uid` response +// field for each API key. // API name: with_profile_uid func (r *QueryApiKeys) WithProfileUid(withprofileuid bool) *QueryApiKeys { r.values.Set("with_profile_uid", strconv.FormatBool(withprofileuid)) @@ -375,7 +394,7 @@ func (r *QueryApiKeys) Pretty(pretty bool) *QueryApiKeys { return r } -// Aggregations Any aggregations to run over the corpus of returned API keys. +// Any aggregations to run over the corpus of returned API keys. // Aggregations and queries work together. Aggregations are computed only on the // API keys that match the query. // This supports only a subset of aggregation types, namely: `terms`, `range`, @@ -385,24 +404,51 @@ func (r *QueryApiKeys) Pretty(pretty bool) *QueryApiKeys { // works with. // API name: aggregations func (r *QueryApiKeys) Aggregations(aggregations map[string]types.ApiKeyAggregationContainer) *QueryApiKeys { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aggregations = aggregations + return r +} +func (r *QueryApiKeys) AddAggregation(key string, value types.ApiKeyAggregationContainerVariant) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ApiKeyAggregationContainer + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.ApiKeyAggregationContainer) + } else { + tmp = r.req.Aggregations + } + + tmp[key] = *value.ApiKeyAggregationContainerCaster() + + r.req.Aggregations = tmp return r } -// From Starting document offset. -// By default, you cannot page through more than 10,000 hits using the from and -// size parameters. +// The starting document offset. +// It must not be negative. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. // To page through more hits, use the `search_after` parameter. // API name: from func (r *QueryApiKeys) From(from int) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.From = &from return r } -// Query A query to filter which API keys to return. +// A query to filter which API keys to return. // If the query parameter is missing, it is equivalent to a `match_all` query. // The query supports a subset of query types, including `match_all`, `bool`, // `term`, `terms`, `match`, @@ -411,39 +457,73 @@ func (r *QueryApiKeys) From(from int) *QueryApiKeys { // `id`, `type`, `name`, // `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, // and `metadata`. +// +// NOTE: The queryable string values associated with API keys are internally +// mapped as keywords. +// Consequently, if no `analyzer` parameter is specified for a `match` query, +// then the provided match query string is interpreted as a single keyword +// value. +// Such a match query is hence equivalent to a `term` query. // API name: query -func (r *QueryApiKeys) Query(query *types.ApiKeyQueryContainer) *QueryApiKeys { +func (r *QueryApiKeys) Query(query types.ApiKeyQueryContainerVariant) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.ApiKeyQueryContainerCaster() return r } -// SearchAfter Search after definition +// The search after definition. // API name: search_after -func (r *QueryApiKeys) SearchAfter(sortresults ...types.FieldValue) *QueryApiKeys { - r.req.SearchAfter = sortresults +func (r *QueryApiKeys) SearchAfter(sortresults ...types.FieldValueVariant) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } return r } -// Size The number of hits to return. +// The number of hits to return. +// It must not be negative. +// The `size` parameter can be set to `0`, in which case no API key matches are +// returned, only the aggregation results. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. // To page through more hits, use the `search_after` parameter. // API name: size func (r *QueryApiKeys) Size(size int) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } -// Sort Other than `id`, all public fields of an API key are eligible for sorting. +// The sort definition. +// Other than `id`, all public fields of an API key are eligible for sorting. // In addition, sort can also be applied to the `_doc` field to sort by index // order. // API name: sort -func (r *QueryApiKeys) Sort(sorts ...types.SortCombinations) *QueryApiKeys { - r.req.Sort = sorts +func (r *QueryApiKeys) Sort(sorts ...types.SortCombinationsVariant) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } return r } diff --git a/typedapi/security/queryapikeys/request.go b/typedapi/security/queryapikeys/request.go index ea419eb439..f93480ae55 100644 --- a/typedapi/security/queryapikeys/request.go +++ b/typedapi/security/queryapikeys/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package queryapikeys @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package queryapikeys // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/query_api_keys/QueryApiKeysRequest.ts#L26-L101 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/query_api_keys/QueryApiKeysRequest.ts#L26-L124 type Request struct { // Aggregations Any aggregations to run over the corpus of returned API keys. @@ -45,9 +45,10 @@ type Request struct { // Additionally, aggregations only run over the same subset of fields that query // works with. Aggregations map[string]types.ApiKeyAggregationContainer `json:"aggregations,omitempty"` - // From Starting document offset. - // By default, you cannot page through more than 10,000 hits using the from and - // size parameters. + // From The starting document offset. + // It must not be negative. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. // To page through more hits, use the `search_after` parameter. From *int `json:"from,omitempty"` // Query A query to filter which API keys to return. @@ -59,15 +60,26 @@ type Request struct { // `id`, `type`, `name`, // `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, // and `metadata`. + // + // NOTE: The queryable string values associated with API keys are internally + // mapped as keywords. + // Consequently, if no `analyzer` parameter is specified for a `match` query, + // then the provided match query string is interpreted as a single keyword + // value. + // Such a match query is hence equivalent to a `term` query. Query *types.ApiKeyQueryContainer `json:"query,omitempty"` - // SearchAfter Search after definition + // SearchAfter The search after definition. SearchAfter []types.FieldValue `json:"search_after,omitempty"` // Size The number of hits to return. + // It must not be negative. + // The `size` parameter can be set to `0`, in which case no API key matches are + // returned, only the aggregation results. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. // To page through more hits, use the `search_after` parameter. Size *int `json:"size,omitempty"` - // Sort Other than `id`, all public fields of an API key are eligible for sorting. + // Sort The sort definition. + // Other than `id`, all public fields of an API key are eligible for sorting. // In addition, sort can also be applied to the `_doc` field to sort by index // order. Sort []types.SortCombinations `json:"sort,omitempty"` diff --git a/typedapi/security/queryapikeys/response.go b/typedapi/security/queryapikeys/response.go index d57359c2e7..ec6d838486 100644 --- a/typedapi/security/queryapikeys/response.go +++ b/typedapi/security/queryapikeys/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package queryapikeys @@ -34,7 +34,7 @@ import ( // Response holds the response body struct for the package queryapikeys // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/query_api_keys/QueryApiKeysResponse.ts#L26-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/query_api_keys/QueryApiKeysResponse.ts#L26-L45 type Response struct { // Aggregations The aggregations result, if requested. diff --git a/typedapi/security/queryrole/query_role.go b/typedapi/security/queryrole/query_role.go index 84cab2f32a..c50a9df3b1 100644 --- a/typedapi/security/queryrole/query_role.go +++ b/typedapi/security/queryrole/query_role.go @@ -16,12 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Find roles with a query. // -// Get roles in a paginated manner. You can optionally filter the results with a -// query. +// Get roles in a paginated manner. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The query roles API does not retrieve roles that are defined in roles files, +// nor built-in ones. +// You can optionally filter the results with a query. +// Also, the results can be paginated and sorted. package queryrole import ( @@ -78,10 +83,15 @@ func NewQueryRoleFunc(tp elastictransport.Interface) NewQueryRole { // Find roles with a query. // -// Get roles in a paginated manner. You can optionally filter the results with a -// query. +// Get roles in a paginated manner. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The query roles API does not retrieve roles that are defined in roles files, +// nor built-in ones. +// You can optionally filter the results with a query. +// Also, the results can be paginated and sorted. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-role.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-role func New(tp elastictransport.Interface) *QueryRole { r := &QueryRole{ transport: tp, @@ -89,8 +99,6 @@ func New(tp elastictransport.Interface) *QueryRole { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -344,59 +352,90 @@ func (r *QueryRole) Pretty(pretty bool) *QueryRole { return r } -// From Starting document offset. -// By default, you cannot page through more than 10,000 hits using the from and -// size parameters. +// The starting document offset. +// It must not be negative. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. // To page through more hits, use the `search_after` parameter. // API name: from func (r *QueryRole) From(from int) *QueryRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.From = &from return r } -// Query A query to filter which roles to return. +// A query to filter which roles to return. // If the query parameter is missing, it is equivalent to a `match_all` query. // The query supports a subset of query types, including `match_all`, `bool`, // `term`, `terms`, `match`, // `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. // You can query the following information associated with roles: `name`, // `description`, `metadata`, -// `applications.application`, `applications.privileges`, +// `applications.application`, `applications.privileges`, and // `applications.resources`. // API name: query -func (r *QueryRole) Query(query *types.RoleQueryContainer) *QueryRole { +func (r *QueryRole) Query(query types.RoleQueryContainerVariant) *QueryRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.RoleQueryContainerCaster() return r } -// SearchAfter Search after definition +// The search after definition. // API name: search_after -func (r *QueryRole) SearchAfter(sortresults ...types.FieldValue) *QueryRole { - r.req.SearchAfter = sortresults +func (r *QueryRole) SearchAfter(sortresults ...types.FieldValueVariant) *QueryRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } return r } -// Size The number of hits to return. +// The number of hits to return. +// It must not be negative. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. // To page through more hits, use the `search_after` parameter. // API name: size func (r *QueryRole) Size(size int) *QueryRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } -// Sort All public fields of a role are eligible for sorting. +// The sort definition. +// You can sort on `username`, `roles`, or `enabled`. // In addition, sort can also be applied to the `_doc` field to sort by index // order. // API name: sort -func (r *QueryRole) Sort(sorts ...types.SortCombinations) *QueryRole { - r.req.Sort = sorts +func (r *QueryRole) Sort(sorts ...types.SortCombinationsVariant) *QueryRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } return r } diff --git a/typedapi/security/queryrole/request.go b/typedapi/security/queryrole/request.go index b4af212487..29908c0f9b 100644 --- a/typedapi/security/queryrole/request.go +++ b/typedapi/security/queryrole/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package queryrole @@ -33,12 +33,13 @@ import ( // Request holds the request body struct for the package queryrole // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/query_role/QueryRolesRequest.ts#L25-L69 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/query_role/QueryRolesRequest.ts#L25-L85 type Request struct { - // From Starting document offset. - // By default, you cannot page through more than 10,000 hits using the from and - // size parameters. + // From The starting document offset. + // It must not be negative. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. // To page through more hits, use the `search_after` parameter. From *int `json:"from,omitempty"` // Query A query to filter which roles to return. @@ -48,17 +49,19 @@ type Request struct { // `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. // You can query the following information associated with roles: `name`, // `description`, `metadata`, - // `applications.application`, `applications.privileges`, + // `applications.application`, `applications.privileges`, and // `applications.resources`. Query *types.RoleQueryContainer `json:"query,omitempty"` - // SearchAfter Search after definition + // SearchAfter The search after definition. SearchAfter []types.FieldValue `json:"search_after,omitempty"` // Size The number of hits to return. + // It must not be negative. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. // To page through more hits, use the `search_after` parameter. Size *int `json:"size,omitempty"` - // Sort All public fields of a role are eligible for sorting. + // Sort The sort definition. + // You can sort on `username`, `roles`, or `enabled`. // In addition, sort can also be applied to the `_doc` field to sort by index // order. Sort []types.SortCombinations `json:"sort,omitempty"` diff --git a/typedapi/security/queryrole/response.go b/typedapi/security/queryrole/response.go index 23231237c3..0c423b4eb5 100644 --- a/typedapi/security/queryrole/response.go +++ b/typedapi/security/queryrole/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package queryrole @@ -26,12 +26,19 @@ import ( // Response holds the response body struct for the package queryrole // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/query_role/QueryRolesResponse.ts#L23-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/query_role/QueryRolesResponse.ts#L23-L43 type Response struct { // Count The number of roles returned in the response. Count int `json:"count"` - // Roles The list of roles. + // Roles A list of roles that match the query. + // The returned role format is an extension of the role definition format. + // It adds the `transient_metadata.enabled` and the `_sort` fields. + // `transient_metadata.enabled` is set to `false` in case the role is + // automatically disabled, for example when the role grants privileges that are + // not allowed by the installed license. + // `_sort` is present when the search query sorts on some field. + // It contains the array of values that have been used for sorting. Roles []types.QueryRole `json:"roles"` // Total The total number of roles found. Total int `json:"total"` diff --git a/typedapi/security/queryuser/query_user.go b/typedapi/security/queryuser/query_user.go index 946e9d3895..6d603b0f2e 100644 --- a/typedapi/security/queryuser/query_user.go +++ b/typedapi/security/queryuser/query_user.go @@ -16,12 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Find users with a query. // // Get information for users in a paginated manner. // You can optionally filter the results with a query. +// +// NOTE: As opposed to the get user API, built-in users are excluded from the +// result. +// This API is only for native users. package queryuser import ( @@ -81,7 +85,11 @@ func NewQueryUserFunc(tp elastictransport.Interface) NewQueryUser { // Get information for users in a paginated manner. // You can optionally filter the results with a query. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-user.html +// NOTE: As opposed to the get user API, built-in users are excluded from the +// result. +// This API is only for native users. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-user func New(tp elastictransport.Interface) *QueryUser { r := &QueryUser{ transport: tp, @@ -89,8 +97,6 @@ func New(tp elastictransport.Interface) *QueryUser { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -300,8 +306,8 @@ func (r *QueryUser) Header(key, value string) *QueryUser { return r } -// WithProfileUid If true will return the User Profile ID for the users in the query result, if -// any. +// WithProfileUid Determines whether to retrieve the user profile UID, if it exists, for the +// users. // API name: with_profile_uid func (r *QueryUser) WithProfileUid(withprofileuid bool) *QueryUser { r.values.Set("with_profile_uid", strconv.FormatBool(withprofileuid)) @@ -353,57 +359,88 @@ func (r *QueryUser) Pretty(pretty bool) *QueryUser { return r } -// From Starting document offset. -// By default, you cannot page through more than 10,000 hits using the from and -// size parameters. +// The starting document offset. +// It must not be negative. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. // To page through more hits, use the `search_after` parameter. // API name: from func (r *QueryUser) From(from int) *QueryUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.From = &from return r } -// Query A query to filter which users to return. +// A query to filter which users to return. // If the query parameter is missing, it is equivalent to a `match_all` query. // The query supports a subset of query types, including `match_all`, `bool`, // `term`, `terms`, `match`, // `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. // You can query the following information associated with user: `username`, -// `roles`, `enabled` +// `roles`, `enabled`, `full_name`, and `email`. // API name: query -func (r *QueryUser) Query(query *types.UserQueryContainer) *QueryUser { +func (r *QueryUser) Query(query types.UserQueryContainerVariant) *QueryUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.UserQueryContainerCaster() return r } -// SearchAfter Search after definition +// The search after definition // API name: search_after -func (r *QueryUser) SearchAfter(sortresults ...types.FieldValue) *QueryUser { - r.req.SearchAfter = sortresults +func (r *QueryUser) SearchAfter(sortresults ...types.FieldValueVariant) *QueryUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } return r } -// Size The number of hits to return. +// The number of hits to return. +// It must not be negative. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. // To page through more hits, use the `search_after` parameter. // API name: size func (r *QueryUser) Size(size int) *QueryUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } -// Sort Fields eligible for sorting are: username, roles, enabled +// The sort definition. +// Fields eligible for sorting are: `username`, `roles`, `enabled`. // In addition, sort can also be applied to the `_doc` field to sort by index // order. // API name: sort -func (r *QueryUser) Sort(sorts ...types.SortCombinations) *QueryUser { - r.req.Sort = sorts +func (r *QueryUser) Sort(sorts ...types.SortCombinationsVariant) *QueryUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } return r } diff --git a/typedapi/security/queryuser/request.go b/typedapi/security/queryuser/request.go index 92e0d3e95d..2a7e6b1fba 100644 --- a/typedapi/security/queryuser/request.go +++ b/typedapi/security/queryuser/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package queryuser @@ -33,12 +33,13 @@ import ( // Request holds the request body struct for the package queryuser // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/query_user/SecurityQueryUserRequest.ts#L25-L75 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/query_user/SecurityQueryUserRequest.ts#L25-L91 type Request struct { - // From Starting document offset. - // By default, you cannot page through more than 10,000 hits using the from and - // size parameters. + // From The starting document offset. + // It must not be negative. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. // To page through more hits, use the `search_after` parameter. From *int `json:"from,omitempty"` // Query A query to filter which users to return. @@ -47,16 +48,18 @@ type Request struct { // `term`, `terms`, `match`, // `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. // You can query the following information associated with user: `username`, - // `roles`, `enabled` + // `roles`, `enabled`, `full_name`, and `email`. Query *types.UserQueryContainer `json:"query,omitempty"` - // SearchAfter Search after definition + // SearchAfter The search after definition SearchAfter []types.FieldValue `json:"search_after,omitempty"` // Size The number of hits to return. + // It must not be negative. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. // To page through more hits, use the `search_after` parameter. Size *int `json:"size,omitempty"` - // Sort Fields eligible for sorting are: username, roles, enabled + // Sort The sort definition. + // Fields eligible for sorting are: `username`, `roles`, `enabled`. // In addition, sort can also be applied to the `_doc` field to sort by index // order. Sort []types.SortCombinations `json:"sort,omitempty"` diff --git a/typedapi/security/queryuser/response.go b/typedapi/security/queryuser/response.go index 7e18f55da8..832043cff0 100644 --- a/typedapi/security/queryuser/response.go +++ b/typedapi/security/queryuser/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package queryuser @@ -26,14 +26,14 @@ import ( // Response holds the response body struct for the package queryuser // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/query_user/SecurityQueryUserResponse.ts#L23-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/query_user/SecurityQueryUserResponse.ts#L23-L38 type Response struct { // Count The number of users returned in the response. Count int `json:"count"` // Total The total number of users found. Total int `json:"total"` - // Users A list of user information. + // Users A list of users that match the query. Users []types.QueryUser `json:"users"` } diff --git a/typedapi/security/samlauthenticate/request.go b/typedapi/security/samlauthenticate/request.go index d0a7fcae82..0ec106953b 100644 --- a/typedapi/security/samlauthenticate/request.go +++ b/typedapi/security/samlauthenticate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package samlauthenticate @@ -31,13 +31,13 @@ import ( // Request holds the request body struct for the package samlauthenticate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/saml_authenticate/Request.ts#L23-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/saml_authenticate/Request.ts#L23-L61 type Request struct { - // Content The SAML response as it was sent by the user’s browser, usually a Base64 + // Content The SAML response as it was sent by the user's browser, usually a Base64 // encoded XML document. Content string `json:"content"` - // Ids A json array with all the valid SAML Request Ids that the caller of the API + // Ids A JSON array with all the valid SAML Request Ids that the caller of the API // has for the current user. Ids []string `json:"ids"` // Realm The name of the realm that should authenticate the SAML response. Useful in diff --git a/typedapi/security/samlauthenticate/response.go b/typedapi/security/samlauthenticate/response.go index a967995028..346cfee17f 100644 --- a/typedapi/security/samlauthenticate/response.go +++ b/typedapi/security/samlauthenticate/response.go @@ -16,19 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package samlauthenticate // Response holds the response body struct for the package samlauthenticate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/saml_authenticate/Response.ts#L22-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/saml_authenticate/Response.ts#L22-L45 type Response struct { - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - Realm string `json:"realm"` + + // AccessToken The access token that was generated by Elasticsearch. + AccessToken string `json:"access_token"` + // ExpiresIn The amount of time (in seconds) left until the token expires. + ExpiresIn int `json:"expires_in"` + // Realm The name of the realm where the user was authenticated. + Realm string `json:"realm"` + // RefreshToken The refresh token that was generated by Elasticsearch. RefreshToken string `json:"refresh_token"` - Username string `json:"username"` + // Username The authenticated user's name. + Username string `json:"username"` } // NewResponse returns a Response diff --git a/typedapi/security/samlauthenticate/saml_authenticate.go b/typedapi/security/samlauthenticate/saml_authenticate.go index bafd35558f..3b0a292c65 100644 --- a/typedapi/security/samlauthenticate/saml_authenticate.go +++ b/typedapi/security/samlauthenticate/saml_authenticate.go @@ -16,11 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Authenticate SAML. // -// Submits a SAML response message to Elasticsearch for consumption. +// Submit a SAML response message to Elasticsearch for consumption. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The SAML message that is submitted can be: +// +// * A response to a SAML authentication request that was previously created +// using the SAML prepare authentication API. +// * An unsolicited SAML message in the case of an IdP-initiated single sign-on +// (SSO) flow. +// +// In either case, the SAML message needs to be a base64 encoded XML document +// with a root element of ``. +// +// After successful validation, Elasticsearch responds with an Elasticsearch +// internal access token and refresh token that can be subsequently used for +// authentication. +// This API endpoint essentially exchanges SAML responses that indicate +// successful authentication in the IdP for Elasticsearch access and refresh +// tokens, which can be used for authentication against Elasticsearch. package samlauthenticate import ( @@ -77,9 +99,31 @@ func NewSamlAuthenticateFunc(tp elastictransport.Interface) NewSamlAuthenticate // Authenticate SAML. // -// Submits a SAML response message to Elasticsearch for consumption. +// Submit a SAML response message to Elasticsearch for consumption. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The SAML message that is submitted can be: +// +// * A response to a SAML authentication request that was previously created +// using the SAML prepare authentication API. +// * An unsolicited SAML message in the case of an IdP-initiated single sign-on +// (SSO) flow. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-authenticate.html +// In either case, the SAML message needs to be a base64 encoded XML document +// with a root element of ``. +// +// After successful validation, Elasticsearch responds with an Elasticsearch +// internal access token and refresh token that can be subsequently used for +// authentication. +// This API endpoint essentially exchanges SAML responses that indicate +// successful authentication in the IdP for Elasticsearch access and refresh +// tokens, which can be used for authentication against Elasticsearch. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-authenticate func New(tp elastictransport.Interface) *SamlAuthenticate { r := &SamlAuthenticate{ transport: tp, @@ -87,8 +131,6 @@ func New(tp elastictransport.Interface) *SamlAuthenticate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -342,29 +384,42 @@ func (r *SamlAuthenticate) Pretty(pretty bool) *SamlAuthenticate { return r } -// Content The SAML response as it was sent by the user’s browser, usually a Base64 +// The SAML response as it was sent by the user's browser, usually a Base64 // encoded XML document. // API name: content func (r *SamlAuthenticate) Content(content string) *SamlAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Content = content return r } -// Ids A json array with all the valid SAML Request Ids that the caller of the API +// A JSON array with all the valid SAML Request Ids that the caller of the API // has for the current user. // API name: ids func (r *SamlAuthenticate) Ids(ids ...string) *SamlAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Ids = ids return r } -// Realm The name of the realm that should authenticate the SAML response. Useful in +// The name of the realm that should authenticate the SAML response. Useful in // cases where many SAML realms are defined. // API name: realm func (r *SamlAuthenticate) Realm(realm string) *SamlAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Realm = &realm diff --git a/typedapi/security/samlcompletelogout/request.go b/typedapi/security/samlcompletelogout/request.go index e9c8bf037f..2d599078bd 100644 --- a/typedapi/security/samlcompletelogout/request.go +++ b/typedapi/security/samlcompletelogout/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package samlcompletelogout @@ -31,14 +31,14 @@ import ( // Request holds the request body struct for the package samlcompletelogout // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/saml_complete_logout/Request.ts#L23-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/saml_complete_logout/Request.ts#L23-L61 type Request struct { // Content If the SAML IdP sends the logout response with the HTTP-Post binding, this // field must be set to the value of the SAMLResponse form parameter from the // logout response. Content *string `json:"content,omitempty"` - // Ids A json array with all the valid SAML Request Ids that the caller of the API + // Ids A JSON array with all the valid SAML Request Ids that the caller of the API // has for the current user. Ids []string `json:"ids"` // QueryString If the SAML IdP sends the logout response with the HTTP-Redirect binding, diff --git a/typedapi/security/samlcompletelogout/saml_complete_logout.go b/typedapi/security/samlcompletelogout/saml_complete_logout.go index f7475ec9b4..91159209d0 100644 --- a/typedapi/security/samlcompletelogout/saml_complete_logout.go +++ b/typedapi/security/samlcompletelogout/saml_complete_logout.go @@ -16,11 +16,26 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Logout of SAML completely. // // Verifies the logout response sent from the SAML IdP. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The SAML IdP may send a logout response back to the SP after handling the +// SP-initiated SAML Single Logout. +// This API verifies the response by ensuring the content is relevant and +// validating its signature. +// An empty response is returned if the verification process is successful. +// The response can be sent by the IdP with either the HTTP-Redirect or the +// HTTP-Post binding. +// The caller of this API must prepare the request accordingly so that this API +// can handle either of them. package samlcompletelogout import ( @@ -78,7 +93,22 @@ func NewSamlCompleteLogoutFunc(tp elastictransport.Interface) NewSamlCompleteLog // // Verifies the logout response sent from the SAML IdP. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-complete-logout.html +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The SAML IdP may send a logout response back to the SP after handling the +// SP-initiated SAML Single Logout. +// This API verifies the response by ensuring the content is relevant and +// validating its signature. +// An empty response is returned if the verification process is successful. +// The response can be sent by the IdP with either the HTTP-Redirect or the +// HTTP-Post binding. +// The caller of this API must prepare the request accordingly so that this API +// can handle either of them. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-complete-logout func New(tp elastictransport.Interface) *SamlCompleteLogout { r := &SamlCompleteLogout{ transport: tp, @@ -86,8 +116,6 @@ func New(tp elastictransport.Interface) *SamlCompleteLogout { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -287,40 +315,57 @@ func (r *SamlCompleteLogout) Pretty(pretty bool) *SamlCompleteLogout { return r } -// Content If the SAML IdP sends the logout response with the HTTP-Post binding, this +// If the SAML IdP sends the logout response with the HTTP-Post binding, this // field must be set to the value of the SAMLResponse form parameter from the // logout response. // API name: content func (r *SamlCompleteLogout) Content(content string) *SamlCompleteLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Content = &content return r } -// Ids A json array with all the valid SAML Request Ids that the caller of the API +// A JSON array with all the valid SAML Request Ids that the caller of the API // has for the current user. // API name: ids func (r *SamlCompleteLogout) Ids(ids ...string) *SamlCompleteLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Ids = ids return r } -// QueryString If the SAML IdP sends the logout response with the HTTP-Redirect binding, +// If the SAML IdP sends the logout response with the HTTP-Redirect binding, // this field must be set to the query string of the redirect URI. // API name: query_string func (r *SamlCompleteLogout) QueryString(querystring string) *SamlCompleteLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.QueryString = &querystring return r } -// Realm The name of the SAML realm in Elasticsearch for which the configuration is +// The name of the SAML realm in Elasticsearch for which the configuration is // used to verify the logout response. // API name: realm func (r *SamlCompleteLogout) Realm(realm string) *SamlCompleteLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Realm = realm diff --git a/typedapi/security/samlinvalidate/request.go b/typedapi/security/samlinvalidate/request.go index 1bcb851abb..31ac8bc320 100644 --- a/typedapi/security/samlinvalidate/request.go +++ b/typedapi/security/samlinvalidate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package samlinvalidate @@ -27,28 +27,28 @@ import ( // Request holds the request body struct for the package samlinvalidate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/saml_invalidate/Request.ts#L22-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/saml_invalidate/Request.ts#L22-L61 type Request struct { // Acs The Assertion Consumer Service URL that matches the one of the SAML realm in // Elasticsearch that should be used. You must specify either this parameter or - // the realm parameter. + // the `realm` parameter. Acs *string `json:"acs,omitempty"` // QueryString The query part of the URL that the user was redirected to by the SAML IdP to // initiate the Single Logout. - // This query should include a single parameter named SAMLRequest that contains - // a SAML logout request that is deflated and Base64 encoded. + // This query should include a single parameter named `SAMLRequest` that + // contains a SAML logout request that is deflated and Base64 encoded. // If the SAML IdP has signed the logout request, the URL should include two - // extra parameters named SigAlg and Signature that contain the algorithm used - // for the signature and the signature value itself. - // In order for Elasticsearch to be able to verify the IdP’s signature, the - // value of the query_string field must be an exact match to the string provided - // by the browser. + // extra parameters named `SigAlg` and `Signature` that contain the algorithm + // used for the signature and the signature value itself. + // In order for Elasticsearch to be able to verify the IdP's signature, the + // value of the `query_string` field must be an exact match to the string + // provided by the browser. // The client application must not attempt to parse or process the string in any // way. QueryString string `json:"query_string"` // Realm The name of the SAML realm in Elasticsearch the configuration. You must - // specify either this parameter or the acs parameter. + // specify either this parameter or the `acs` parameter. Realm *string `json:"realm,omitempty"` } diff --git a/typedapi/security/samlinvalidate/response.go b/typedapi/security/samlinvalidate/response.go index 8865b4dee2..c4e3adcea1 100644 --- a/typedapi/security/samlinvalidate/response.go +++ b/typedapi/security/samlinvalidate/response.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package samlinvalidate // Response holds the response body struct for the package samlinvalidate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/saml_invalidate/Response.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/saml_invalidate/Response.ts#L22-L37 type Response struct { - Invalidated int `json:"invalidated"` - Realm string `json:"realm"` - Redirect string `json:"redirect"` + + // Invalidated The number of tokens that were invalidated as part of this logout. + Invalidated int `json:"invalidated"` + // Realm The realm name of the SAML realm in Elasticsearch that authenticated the + // user. + Realm string `json:"realm"` + // Redirect A SAML logout response as a parameter so that the user can be redirected back + // to the SAML IdP. + Redirect string `json:"redirect"` } // NewResponse returns a Response diff --git a/typedapi/security/samlinvalidate/saml_invalidate.go b/typedapi/security/samlinvalidate/saml_invalidate.go index 1f7bd8ff7a..463c1625ce 100644 --- a/typedapi/security/samlinvalidate/saml_invalidate.go +++ b/typedapi/security/samlinvalidate/saml_invalidate.go @@ -16,11 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Invalidate SAML. // -// Submits a SAML LogoutRequest message to Elasticsearch for consumption. +// Submit a SAML LogoutRequest message to Elasticsearch for consumption. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The logout request comes from the SAML IdP during an IdP initiated Single +// Logout. +// The custom web application can use this API to have Elasticsearch process the +// `LogoutRequest`. +// After successful validation of the request, Elasticsearch invalidates the +// access token and refresh token that corresponds to that specific SAML +// principal and provides a URL that contains a SAML LogoutResponse message. +// Thus the user can be redirected back to their IdP. package samlinvalidate import ( @@ -77,9 +91,23 @@ func NewSamlInvalidateFunc(tp elastictransport.Interface) NewSamlInvalidate { // Invalidate SAML. // -// Submits a SAML LogoutRequest message to Elasticsearch for consumption. +// Submit a SAML LogoutRequest message to Elasticsearch for consumption. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The logout request comes from the SAML IdP during an IdP initiated Single +// Logout. +// The custom web application can use this API to have Elasticsearch process the +// `LogoutRequest`. +// After successful validation of the request, Elasticsearch invalidates the +// access token and refresh token that corresponds to that specific SAML +// principal and provides a URL that contains a SAML LogoutResponse message. +// Thus the user can be redirected back to their IdP. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-invalidate.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-invalidate func New(tp elastictransport.Interface) *SamlInvalidate { r := &SamlInvalidate{ transport: tp, @@ -87,8 +115,6 @@ func New(tp elastictransport.Interface) *SamlInvalidate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -342,41 +368,53 @@ func (r *SamlInvalidate) Pretty(pretty bool) *SamlInvalidate { return r } -// Acs The Assertion Consumer Service URL that matches the one of the SAML realm in +// The Assertion Consumer Service URL that matches the one of the SAML realm in // Elasticsearch that should be used. You must specify either this parameter or -// the realm parameter. +// the `realm` parameter. // API name: acs func (r *SamlInvalidate) Acs(acs string) *SamlInvalidate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Acs = &acs return r } -// QueryString The query part of the URL that the user was redirected to by the SAML IdP to +// The query part of the URL that the user was redirected to by the SAML IdP to // initiate the Single Logout. -// This query should include a single parameter named SAMLRequest that contains -// a SAML logout request that is deflated and Base64 encoded. +// This query should include a single parameter named `SAMLRequest` that +// contains a SAML logout request that is deflated and Base64 encoded. // If the SAML IdP has signed the logout request, the URL should include two -// extra parameters named SigAlg and Signature that contain the algorithm used -// for the signature and the signature value itself. -// In order for Elasticsearch to be able to verify the IdP’s signature, the -// value of the query_string field must be an exact match to the string provided -// by the browser. +// extra parameters named `SigAlg` and `Signature` that contain the algorithm +// used for the signature and the signature value itself. +// In order for Elasticsearch to be able to verify the IdP's signature, the +// value of the `query_string` field must be an exact match to the string +// provided by the browser. // The client application must not attempt to parse or process the string in any // way. // API name: query_string func (r *SamlInvalidate) QueryString(querystring string) *SamlInvalidate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.QueryString = querystring return r } -// Realm The name of the SAML realm in Elasticsearch the configuration. You must -// specify either this parameter or the acs parameter. +// The name of the SAML realm in Elasticsearch the configuration. You must +// specify either this parameter or the `acs` parameter. // API name: realm func (r *SamlInvalidate) Realm(realm string) *SamlInvalidate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Realm = &realm diff --git a/typedapi/security/samllogout/request.go b/typedapi/security/samllogout/request.go index 3a435b1a38..3e30c09182 100644 --- a/typedapi/security/samllogout/request.go +++ b/typedapi/security/samllogout/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package samllogout @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package samllogout // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/saml_logout/Request.ts#L22-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/saml_logout/Request.ts#L22-L57 type Request struct { // RefreshToken The refresh token that was returned as a response to calling the SAML @@ -38,7 +38,7 @@ type Request struct { // Token The access token that was returned as a response to calling the SAML // authenticate API. // Alternatively, the most recent token that was received after refreshing the - // original one by using a refresh_token. + // original one by using a `refresh_token`. Token string `json:"token"` } diff --git a/typedapi/security/samllogout/response.go b/typedapi/security/samllogout/response.go index f9a4b5f490..09571d42b3 100644 --- a/typedapi/security/samllogout/response.go +++ b/typedapi/security/samllogout/response.go @@ -16,14 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package samllogout // Response holds the response body struct for the package samllogout // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/saml_logout/Response.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/saml_logout/Response.ts#L20-L28 type Response struct { + + // Redirect A URL that contains a SAML logout request as a parameter. + // You can use this URL to be redirected back to the SAML IdP and to initiate + // Single Logout. Redirect string `json:"redirect"` } diff --git a/typedapi/security/samllogout/saml_logout.go b/typedapi/security/samllogout/saml_logout.go index 4c156e3091..c546f73876 100644 --- a/typedapi/security/samllogout/saml_logout.go +++ b/typedapi/security/samllogout/saml_logout.go @@ -16,11 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Logout of SAML. // // Submits a request to invalidate an access token and refresh token. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// This API invalidates the tokens that were generated for a user by the SAML +// authenticate API. +// If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP +// supports this, the Elasticsearch response contains a URL to redirect the user +// to the IdP that contains a SAML logout request (starting an SP-initiated SAML +// Single Logout). package samllogout import ( @@ -79,7 +91,19 @@ func NewSamlLogoutFunc(tp elastictransport.Interface) NewSamlLogout { // // Submits a request to invalidate an access token and refresh token. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-logout.html +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// This API invalidates the tokens that were generated for a user by the SAML +// authenticate API. +// If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP +// supports this, the Elasticsearch response contains a URL to redirect the user +// to the IdP that contains a SAML logout request (starting an SP-initiated SAML +// Single Logout). +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-logout func New(tp elastictransport.Interface) *SamlLogout { r := &SamlLogout{ transport: tp, @@ -87,8 +111,6 @@ func New(tp elastictransport.Interface) *SamlLogout { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -342,24 +364,32 @@ func (r *SamlLogout) Pretty(pretty bool) *SamlLogout { return r } -// RefreshToken The refresh token that was returned as a response to calling the SAML +// The refresh token that was returned as a response to calling the SAML // authenticate API. // Alternatively, the most recent refresh token that was received after // refreshing the original access token. // API name: refresh_token func (r *SamlLogout) RefreshToken(refreshtoken string) *SamlLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RefreshToken = &refreshtoken return r } -// Token The access token that was returned as a response to calling the SAML +// The access token that was returned as a response to calling the SAML // authenticate API. // Alternatively, the most recent token that was received after refreshing the -// original one by using a refresh_token. +// original one by using a `refresh_token`. // API name: token func (r *SamlLogout) Token(token string) *SamlLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Token = token diff --git a/typedapi/security/samlprepareauthentication/request.go b/typedapi/security/samlprepareauthentication/request.go index 066d79420b..4d19e784ff 100644 --- a/typedapi/security/samlprepareauthentication/request.go +++ b/typedapi/security/samlprepareauthentication/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package samlprepareauthentication @@ -27,20 +27,20 @@ import ( // Request holds the request body struct for the package samlprepareauthentication // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/saml_prepare_authentication/Request.ts#L22-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/saml_prepare_authentication/Request.ts#L22-L67 type Request struct { // Acs The Assertion Consumer Service URL that matches the one of the SAML realms in // Elasticsearch. // The realm is used to generate the authentication request. You must specify - // either this parameter or the realm parameter. + // either this parameter or the `realm` parameter. Acs *string `json:"acs,omitempty"` // Realm The name of the SAML realm in Elasticsearch for which the configuration is // used to generate the authentication request. - // You must specify either this parameter or the acs parameter. + // You must specify either this parameter or the `acs` parameter. Realm *string `json:"realm,omitempty"` // RelayState A string that will be included in the redirect URL that this API returns as - // the RelayState query parameter. + // the `RelayState` query parameter. // If the Authentication Request is signed, this value is used as part of the // signature computation. RelayState *string `json:"relay_state,omitempty"` diff --git a/typedapi/security/samlprepareauthentication/response.go b/typedapi/security/samlprepareauthentication/response.go index 620a3fd794..b44efeb273 100644 --- a/typedapi/security/samlprepareauthentication/response.go +++ b/typedapi/security/samlprepareauthentication/response.go @@ -16,16 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package samlprepareauthentication // Response holds the response body struct for the package samlprepareauthentication // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/saml_prepare_authentication/Response.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/saml_prepare_authentication/Response.ts#L22-L37 type Response struct { - Id string `json:"id"` - Realm string `json:"realm"` + + // Id A unique identifier for the SAML Request to be stored by the caller of the + // API. + Id string `json:"id"` + // Realm The name of the Elasticsearch realm that was used to construct the + // authentication request. + Realm string `json:"realm"` + // Redirect The URL to redirect the user to. Redirect string `json:"redirect"` } diff --git a/typedapi/security/samlprepareauthentication/saml_prepare_authentication.go b/typedapi/security/samlprepareauthentication/saml_prepare_authentication.go index abcc07ed3c..767502a7ef 100644 --- a/typedapi/security/samlprepareauthentication/saml_prepare_authentication.go +++ b/typedapi/security/samlprepareauthentication/saml_prepare_authentication.go @@ -16,12 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Prepare SAML authentication. // -// Creates a SAML authentication request (``) as a URL string, -// based on the configuration of the respective SAML realm in Elasticsearch. +// Create a SAML authentication request (``) as a URL string based +// on the configuration of the respective SAML realm in Elasticsearch. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// This API returns a URL pointing to the SAML Identity Provider. +// You can use the URL to redirect the browser of the user in order to continue +// the authentication process. +// The URL includes a single parameter named `SAMLRequest`, which contains a +// SAML Authentication request that is deflated and Base64 encoded. +// If the configuration dictates that SAML authentication requests should be +// signed, the URL has two extra parameters named `SigAlg` and `Signature`. +// These parameters contain the algorithm used for the signature and the +// signature value itself. +// It also returns a random string that uniquely identifies this SAML +// Authentication request. +// The caller of this API needs to store this identifier as it needs to be used +// in a following step of the authentication process. package samlprepareauthentication import ( @@ -78,10 +97,29 @@ func NewSamlPrepareAuthenticationFunc(tp elastictransport.Interface) NewSamlPrep // Prepare SAML authentication. // -// Creates a SAML authentication request (``) as a URL string, -// based on the configuration of the respective SAML realm in Elasticsearch. +// Create a SAML authentication request (``) as a URL string based +// on the configuration of the respective SAML realm in Elasticsearch. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// This API returns a URL pointing to the SAML Identity Provider. +// You can use the URL to redirect the browser of the user in order to continue +// the authentication process. +// The URL includes a single parameter named `SAMLRequest`, which contains a +// SAML Authentication request that is deflated and Base64 encoded. +// If the configuration dictates that SAML authentication requests should be +// signed, the URL has two extra parameters named `SigAlg` and `Signature`. +// These parameters contain the algorithm used for the signature and the +// signature value itself. +// It also returns a random string that uniquely identifies this SAML +// Authentication request. +// The caller of this API needs to store this identifier as it needs to be used +// in a following step of the authentication process. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-prepare-authentication.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-prepare-authentication func New(tp elastictransport.Interface) *SamlPrepareAuthentication { r := &SamlPrepareAuthentication{ transport: tp, @@ -89,8 +127,6 @@ func New(tp elastictransport.Interface) *SamlPrepareAuthentication { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -344,35 +380,47 @@ func (r *SamlPrepareAuthentication) Pretty(pretty bool) *SamlPrepareAuthenticati return r } -// Acs The Assertion Consumer Service URL that matches the one of the SAML realms in +// The Assertion Consumer Service URL that matches the one of the SAML realms in // Elasticsearch. // The realm is used to generate the authentication request. You must specify -// either this parameter or the realm parameter. +// either this parameter or the `realm` parameter. // API name: acs func (r *SamlPrepareAuthentication) Acs(acs string) *SamlPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Acs = &acs return r } -// Realm The name of the SAML realm in Elasticsearch for which the configuration is +// The name of the SAML realm in Elasticsearch for which the configuration is // used to generate the authentication request. -// You must specify either this parameter or the acs parameter. +// You must specify either this parameter or the `acs` parameter. // API name: realm func (r *SamlPrepareAuthentication) Realm(realm string) *SamlPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Realm = &realm return r } -// RelayState A string that will be included in the redirect URL that this API returns as -// the RelayState query parameter. +// A string that will be included in the redirect URL that this API returns as +// the `RelayState` query parameter. // If the Authentication Request is signed, this value is used as part of the // signature computation. // API name: relay_state func (r *SamlPrepareAuthentication) RelayState(relaystate string) *SamlPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RelayState = &relaystate diff --git a/typedapi/security/samlserviceprovidermetadata/response.go b/typedapi/security/samlserviceprovidermetadata/response.go index 8d00f34899..d634e68b3e 100644 --- a/typedapi/security/samlserviceprovidermetadata/response.go +++ b/typedapi/security/samlserviceprovidermetadata/response.go @@ -16,14 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package samlserviceprovidermetadata // Response holds the response body struct for the package samlserviceprovidermetadata // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/saml_service_provider_metadata/Response.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/saml_service_provider_metadata/Response.ts#L20-L27 type Response struct { + + // Metadata An XML string that contains a SAML Service Provider's metadata for the realm. Metadata string `json:"metadata"` } diff --git a/typedapi/security/samlserviceprovidermetadata/saml_service_provider_metadata.go b/typedapi/security/samlserviceprovidermetadata/saml_service_provider_metadata.go index 534747e418..b116acc06e 100644 --- a/typedapi/security/samlserviceprovidermetadata/saml_service_provider_metadata.go +++ b/typedapi/security/samlserviceprovidermetadata/saml_service_provider_metadata.go @@ -16,11 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create SAML service provider metadata. // // Generate SAML metadata for a SAML 2.0 Service Provider. +// +// The SAML 2.0 specification provides a mechanism for Service Providers to +// describe their capabilities and configuration using a metadata file. +// This API generates Service Provider metadata based on the configuration of a +// SAML realm in Elasticsearch. package samlserviceprovidermetadata import ( @@ -82,7 +87,12 @@ func NewSamlServiceProviderMetadataFunc(tp elastictransport.Interface) NewSamlSe // // Generate SAML metadata for a SAML 2.0 Service Provider. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-sp-metadata.html +// The SAML 2.0 specification provides a mechanism for Service Providers to +// describe their capabilities and configuration using a metadata file. +// This API generates Service Provider metadata based on the configuration of a +// SAML realm in Elasticsearch. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-service-provider-metadata func New(tp elastictransport.Interface) *SamlServiceProviderMetadata { r := &SamlServiceProviderMetadata{ transport: tp, diff --git a/typedapi/security/suggestuserprofiles/request.go b/typedapi/security/suggestuserprofiles/request.go index 79d846cf50..f6622851d2 100644 --- a/typedapi/security/suggestuserprofiles/request.go +++ b/typedapi/security/suggestuserprofiles/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package suggestuserprofiles @@ -33,23 +33,27 @@ import ( // Request holds the request body struct for the package suggestuserprofiles // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/suggest_user_profiles/Request.ts#L24-L68 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/suggest_user_profiles/Request.ts#L24-L81 type Request struct { - // Data List of filters for the `data` field of the profile document. - // To return all content use `data=*`. To return a subset of content - // use `data=` to retrieve content nested under the specified ``. - // By default returns no `data` content. + // Data A comma-separated list of filters for the `data` field of the profile + // document. + // To return all content use `data=*`. + // To return a subset of content, use `data=` to retrieve content nested + // under the specified ``. + // By default, the API returns no `data` content. + // It is an error to specify `data` as both the query parameter and the request + // body field. Data []string `json:"data,omitempty"` // Hint Extra search criteria to improve relevance of the suggestion result. // Profiles matching the spcified hint are ranked higher in the response. - // Profiles not matching the hint don't exclude the profile from the response - // as long as the profile matches the `name` field query. + // Profiles not matching the hint aren't excluded from the response as long as + // the profile matches the `name` field query. Hint *types.Hint `json:"hint,omitempty"` - // Name Query string used to match name-related fields in user profile documents. + // Name A query string used to match name-related fields in user profile documents. // Name-related fields are the user's `username`, `full_name`, and `email`. Name *string `json:"name,omitempty"` - // Size Number of profiles to return. + // Size The number of profiles to return. Size *int64 `json:"size,omitempty"` } diff --git a/typedapi/security/suggestuserprofiles/response.go b/typedapi/security/suggestuserprofiles/response.go index 71fa8ff41c..a05efcf983 100644 --- a/typedapi/security/suggestuserprofiles/response.go +++ b/typedapi/security/suggestuserprofiles/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package suggestuserprofiles @@ -26,11 +26,16 @@ import ( // Response holds the response body struct for the package suggestuserprofiles // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/suggest_user_profiles/Response.ts#L29-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/suggest_user_profiles/Response.ts#L29-L44 type Response struct { - Profiles []types.UserProfile `json:"profiles"` - Took int64 `json:"took"` - Total types.TotalUserProfiles `json:"total"` + + // Profiles A list of profile documents, ordered by relevance, that match the search + // criteria. + Profiles []types.UserProfile `json:"profiles"` + // Took The number of milliseconds it took Elasticsearch to run the request. + Took int64 `json:"took"` + // Total Metadata about the number of matching profiles. + Total types.TotalUserProfiles `json:"total"` } // NewResponse returns a Response diff --git a/typedapi/security/suggestuserprofiles/suggest_user_profiles.go b/typedapi/security/suggestuserprofiles/suggest_user_profiles.go index f0c329c11e..acf2ca25a4 100644 --- a/typedapi/security/suggestuserprofiles/suggest_user_profiles.go +++ b/typedapi/security/suggestuserprofiles/suggest_user_profiles.go @@ -16,11 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Suggest a user profile. // // Get suggestions for user profiles that match specified search criteria. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. package suggestuserprofiles import ( @@ -79,7 +85,13 @@ func NewSuggestUserProfilesFunc(tp elastictransport.Interface) NewSuggestUserPro // // Get suggestions for user profiles that match specified search criteria. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-suggest-user-profile.html +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-suggest-user-profiles func New(tp elastictransport.Interface) *SuggestUserProfiles { r := &SuggestUserProfiles{ transport: tp, @@ -87,8 +99,6 @@ func New(tp elastictransport.Interface) *SuggestUserProfiles { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -342,42 +352,63 @@ func (r *SuggestUserProfiles) Pretty(pretty bool) *SuggestUserProfiles { return r } -// Data List of filters for the `data` field of the profile document. -// To return all content use `data=*`. To return a subset of content -// use `data=` to retrieve content nested under the specified ``. -// By default returns no `data` content. +// A comma-separated list of filters for the `data` field of the profile +// document. +// To return all content use `data=*`. +// To return a subset of content, use `data=` to retrieve content nested +// under the specified ``. +// By default, the API returns no `data` content. +// It is an error to specify `data` as both the query parameter and the request +// body field. // API name: data func (r *SuggestUserProfiles) Data(data ...string) *SuggestUserProfiles { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Data = make([]string, len(data)) r.req.Data = data return r } -// Hint Extra search criteria to improve relevance of the suggestion result. +// Extra search criteria to improve relevance of the suggestion result. // Profiles matching the spcified hint are ranked higher in the response. -// Profiles not matching the hint don't exclude the profile from the response -// as long as the profile matches the `name` field query. +// Profiles not matching the hint aren't excluded from the response as long as +// the profile matches the `name` field query. // API name: hint -func (r *SuggestUserProfiles) Hint(hint *types.Hint) *SuggestUserProfiles { +func (r *SuggestUserProfiles) Hint(hint types.HintVariant) *SuggestUserProfiles { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Hint = hint + r.req.Hint = hint.HintCaster() return r } -// Name Query string used to match name-related fields in user profile documents. +// A query string used to match name-related fields in user profile documents. // Name-related fields are the user's `username`, `full_name`, and `email`. // API name: name func (r *SuggestUserProfiles) Name(name string) *SuggestUserProfiles { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Name = &name return r } -// Size Number of profiles to return. +// The number of profiles to return. // API name: size func (r *SuggestUserProfiles) Size(size int64) *SuggestUserProfiles { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Size = &size diff --git a/typedapi/security/updateapikey/request.go b/typedapi/security/updateapikey/request.go index 47e1e00c87..b2a65f3ad9 100644 --- a/typedapi/security/updateapikey/request.go +++ b/typedapi/security/updateapikey/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updateapikey @@ -32,23 +32,32 @@ import ( // Request holds the request body struct for the package updateapikey // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/update_api_key/Request.ts#L26-L67 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/update_api_key/Request.ts#L26-L91 type Request struct { - // Expiration Expiration time for the API key. + // Expiration The expiration time for the API key. + // By default, API keys never expire. + // This property can be omitted to leave the expiration unchanged. Expiration types.Duration `json:"expiration,omitempty"` - // Metadata Arbitrary metadata that you want to associate with the API key. It supports - // nested data structure. Within the metadata object, keys beginning with _ are - // reserved for system usage. + // Metadata Arbitrary metadata that you want to associate with the API key. + // It supports a nested data structure. + // Within the metadata object, keys beginning with `_` are reserved for system + // usage. + // When specified, this value fully replaces the metadata previously associated + // with the API key. Metadata types.Metadata `json:"metadata,omitempty"` - // RoleDescriptors An array of role descriptors for this API key. This parameter is optional. - // When it is not specified or is an empty array, then the API key will have a - // point in time snapshot of permissions of the authenticated user. If you - // supply role descriptors then the resultant permissions would be an - // intersection of API keys permissions and authenticated user’s permissions - // thereby limiting the access scope for API keys. The structure of role - // descriptor is the same as the request for create role API. For more details, - // see create or update roles API. + // RoleDescriptors The role descriptors to assign to this API key. + // The API key's effective permissions are an intersection of its assigned + // privileges and the point in time snapshot of permissions of the owner user. + // You can assign new privileges by specifying them in this parameter. + // To remove assigned privileges, you can supply an empty `role_descriptors` + // parameter, that is to say, an empty object `{}`. + // If an API key has no assigned privileges, it inherits the owner user's full + // permissions. + // The snapshot of the owner's permissions is always updated, whether you supply + // the `role_descriptors` parameter or not. + // The structure of a role descriptor is the same as the request for the create + // API keys API. RoleDescriptors map[string]types.RoleDescriptor `json:"role_descriptors,omitempty"` } diff --git a/typedapi/security/updateapikey/response.go b/typedapi/security/updateapikey/response.go index c67241e907..bc15170bab 100644 --- a/typedapi/security/updateapikey/response.go +++ b/typedapi/security/updateapikey/response.go @@ -16,17 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updateapikey // Response holds the response body struct for the package updateapikey // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/update_api_key/Response.ts#L20-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/update_api_key/Response.ts#L20-L28 type Response struct { // Updated If `true`, the API key was updated. - // If `false`, the API key didn’t change because no change was detected. + // If `false`, the API key didn't change because no change was detected. Updated bool `json:"updated"` } diff --git a/typedapi/security/updateapikey/update_api_key.go b/typedapi/security/updateapikey/update_api_key.go index a60630e415..e7eb44c65e 100644 --- a/typedapi/security/updateapikey/update_api_key.go +++ b/typedapi/security/updateapikey/update_api_key.go @@ -16,34 +16,41 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update an API key. // -// Updates attributes of an existing API key. +// Update attributes of an existing API key. +// This API supports updates to an API key's access scope, expiration, and +// metadata. +// +// To use this API, you must have at least the `manage_own_api_key` cluster +// privilege. // Users can only update API keys that they created or that were granted to // them. -// Use this API to update API keys created by the create API Key or grant API +// To update another user’s API key, use the `run_as` feature to submit a +// request on behalf of another user. +// +// IMPORTANT: It's not possible to use an API key as the authentication +// credential for this API. The owner user’s credentials are required. +// +// Use this API to update API keys created by the create API key or grant API // Key APIs. -// If you need to apply the same update to many API keys, you can use bulk -// update API Keys to reduce overhead. -// It’s not possible to update expired API keys, or API keys that have been -// invalidated by invalidate API Key. -// This API supports updates to an API key’s access scope and metadata. +// If you need to apply the same update to many API keys, you can use the bulk +// update API keys API to reduce overhead. +// It's not possible to update expired API keys or API keys that have been +// invalidated by the invalidate API key API. +// // The access scope of an API key is derived from the `role_descriptors` you -// specify in the request, and a snapshot of the owner user’s permissions at the +// specify in the request and a snapshot of the owner user's permissions at the // time of the request. -// The snapshot of the owner’s permissions is updated automatically on every +// The snapshot of the owner's permissions is updated automatically on every // call. -// If you don’t specify `role_descriptors` in the request, a call to this API -// might still change the API key’s access scope. -// This change can occur if the owner user’s permissions have changed since the +// +// IMPORTANT: If you don't specify `role_descriptors` in the request, a call to +// this API might still change the API key's access scope. +// This change can occur if the owner user's permissions have changed since the // API key was created or last modified. -// To update another user’s API key, use the `run_as` feature to submit a -// request on behalf of another user. -// IMPORTANT: It’s not possible to use an API key as the authentication -// credential for this API. -// To update an API key, the owner user’s credentials are required. package updateapikey import ( @@ -108,32 +115,39 @@ func NewUpdateApiKeyFunc(tp elastictransport.Interface) NewUpdateApiKey { // Update an API key. // -// Updates attributes of an existing API key. +// Update attributes of an existing API key. +// This API supports updates to an API key's access scope, expiration, and +// metadata. +// +// To use this API, you must have at least the `manage_own_api_key` cluster +// privilege. // Users can only update API keys that they created or that were granted to // them. -// Use this API to update API keys created by the create API Key or grant API +// To update another user’s API key, use the `run_as` feature to submit a +// request on behalf of another user. +// +// IMPORTANT: It's not possible to use an API key as the authentication +// credential for this API. The owner user’s credentials are required. +// +// Use this API to update API keys created by the create API key or grant API // Key APIs. -// If you need to apply the same update to many API keys, you can use bulk -// update API Keys to reduce overhead. -// It’s not possible to update expired API keys, or API keys that have been -// invalidated by invalidate API Key. -// This API supports updates to an API key’s access scope and metadata. +// If you need to apply the same update to many API keys, you can use the bulk +// update API keys API to reduce overhead. +// It's not possible to update expired API keys or API keys that have been +// invalidated by the invalidate API key API. +// // The access scope of an API key is derived from the `role_descriptors` you -// specify in the request, and a snapshot of the owner user’s permissions at the +// specify in the request and a snapshot of the owner user's permissions at the // time of the request. -// The snapshot of the owner’s permissions is updated automatically on every +// The snapshot of the owner's permissions is updated automatically on every // call. -// If you don’t specify `role_descriptors` in the request, a call to this API -// might still change the API key’s access scope. -// This change can occur if the owner user’s permissions have changed since the +// +// IMPORTANT: If you don't specify `role_descriptors` in the request, a call to +// this API might still change the API key's access scope. +// This change can occur if the owner user's permissions have changed since the // API key was created or last modified. -// To update another user’s API key, use the `run_as` feature to submit a -// request on behalf of another user. -// IMPORTANT: It’s not possible to use an API key as the authentication -// credential for this API. -// To update an API key, the owner user’s credentials are required. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-api-key.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-api-key func New(tp elastictransport.Interface) *UpdateApiKey { r := &UpdateApiKey{ transport: tp, @@ -141,8 +155,6 @@ func New(tp elastictransport.Interface) *UpdateApiKey { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -409,36 +421,76 @@ func (r *UpdateApiKey) Pretty(pretty bool) *UpdateApiKey { return r } -// Expiration Expiration time for the API key. +// The expiration time for the API key. +// By default, API keys never expire. +// This property can be omitted to leave the expiration unchanged. // API name: expiration -func (r *UpdateApiKey) Expiration(duration types.Duration) *UpdateApiKey { - r.req.Expiration = duration +func (r *UpdateApiKey) Expiration(duration types.DurationVariant) *UpdateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Expiration = *duration.DurationCaster() return r } -// Metadata Arbitrary metadata that you want to associate with the API key. It supports -// nested data structure. Within the metadata object, keys beginning with _ are -// reserved for system usage. +// Arbitrary metadata that you want to associate with the API key. +// It supports a nested data structure. +// Within the metadata object, keys beginning with `_` are reserved for system +// usage. +// When specified, this value fully replaces the metadata previously associated +// with the API key. // API name: metadata -func (r *UpdateApiKey) Metadata(metadata types.Metadata) *UpdateApiKey { - r.req.Metadata = metadata +func (r *UpdateApiKey) Metadata(metadata types.MetadataVariant) *UpdateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() return r } -// RoleDescriptors An array of role descriptors for this API key. This parameter is optional. -// When it is not specified or is an empty array, then the API key will have a -// point in time snapshot of permissions of the authenticated user. If you -// supply role descriptors then the resultant permissions would be an -// intersection of API keys permissions and authenticated user’s permissions -// thereby limiting the access scope for API keys. The structure of role -// descriptor is the same as the request for create role API. For more details, -// see create or update roles API. +// The role descriptors to assign to this API key. +// The API key's effective permissions are an intersection of its assigned +// privileges and the point in time snapshot of permissions of the owner user. +// You can assign new privileges by specifying them in this parameter. +// To remove assigned privileges, you can supply an empty `role_descriptors` +// parameter, that is to say, an empty object `{}`. +// If an API key has no assigned privileges, it inherits the owner user's full +// permissions. +// The snapshot of the owner's permissions is always updated, whether you supply +// the `role_descriptors` parameter or not. +// The structure of a role descriptor is the same as the request for the create +// API keys API. // API name: role_descriptors func (r *UpdateApiKey) RoleDescriptors(roledescriptors map[string]types.RoleDescriptor) *UpdateApiKey { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RoleDescriptors = roledescriptors + return r +} + +func (r *UpdateApiKey) AddRoleDescriptor(key string, value types.RoleDescriptorVariant) *UpdateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.RoleDescriptor + if r.req.RoleDescriptors == nil { + r.req.RoleDescriptors = make(map[string]types.RoleDescriptor) + } else { + tmp = r.req.RoleDescriptors + } + + tmp[key] = *value.RoleDescriptorCaster() + r.req.RoleDescriptors = tmp return r } diff --git a/typedapi/security/updatecrossclusterapikey/request.go b/typedapi/security/updatecrossclusterapikey/request.go index f517772810..cb0b679b2b 100644 --- a/typedapi/security/updatecrossclusterapikey/request.go +++ b/typedapi/security/updatecrossclusterapikey/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatecrossclusterapikey @@ -32,7 +32,7 @@ import ( // Request holds the request body struct for the package updatecrossclusterapikey // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/update_cross_cluster_api_key/UpdateCrossClusterApiKeyRequest.ts#L25-L61 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/update_cross_cluster_api_key/UpdateCrossClusterApiKeyRequest.ts#L25-L83 type Request struct { // Access The access to be granted to this API key. @@ -42,7 +42,7 @@ type Request struct { // When specified, the new access assignment fully replaces the previously // assigned access. Access types.Access `json:"access"` - // Expiration Expiration time for the API key. + // Expiration The expiration time for the API key. // By default, API keys never expire. This property can be omitted to leave the // value unchanged. Expiration types.Duration `json:"expiration,omitempty"` diff --git a/typedapi/security/updatecrossclusterapikey/response.go b/typedapi/security/updatecrossclusterapikey/response.go index 039d5eae3f..f794d02798 100644 --- a/typedapi/security/updatecrossclusterapikey/response.go +++ b/typedapi/security/updatecrossclusterapikey/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatecrossclusterapikey // Response holds the response body struct for the package updatecrossclusterapikey // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/update_cross_cluster_api_key/UpdateCrossClusterApiKeyResponse.ts#L20-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/update_cross_cluster_api_key/UpdateCrossClusterApiKeyResponse.ts#L20-L28 type Response struct { // Updated If `true`, the API key was updated. diff --git a/typedapi/security/updatecrossclusterapikey/update_cross_cluster_api_key.go b/typedapi/security/updatecrossclusterapikey/update_cross_cluster_api_key.go index 0ff3664ebf..0c638aa6fb 100644 --- a/typedapi/security/updatecrossclusterapikey/update_cross_cluster_api_key.go +++ b/typedapi/security/updatecrossclusterapikey/update_cross_cluster_api_key.go @@ -16,12 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update a cross-cluster API key. // // Update the attributes of an existing cross-cluster API key, which is used for // API key based remote cluster access. +// +// To use this API, you must have at least the `manage_security` cluster +// privilege. +// Users can only update API keys that they created. +// To update another user's API key, use the `run_as` feature to submit a +// request on behalf of another user. +// +// IMPORTANT: It's not possible to use an API key as the authentication +// credential for this API. +// To update an API key, the owner user's credentials are required. +// +// It's not possible to update expired API keys, or API keys that have been +// invalidated by the invalidate API key API. +// +// This API supports updates to an API key's access scope, metadata, and +// expiration. +// The owner user's information, such as the `username` and `realm`, is also +// updated automatically on every call. +// +// NOTE: This API cannot update REST API keys, which should be updated by either +// the update API key or bulk update API keys API. package updatecrossclusterapikey import ( @@ -89,7 +110,28 @@ func NewUpdateCrossClusterApiKeyFunc(tp elastictransport.Interface) NewUpdateCro // Update the attributes of an existing cross-cluster API key, which is used for // API key based remote cluster access. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-cross-cluster-api-key.html +// To use this API, you must have at least the `manage_security` cluster +// privilege. +// Users can only update API keys that they created. +// To update another user's API key, use the `run_as` feature to submit a +// request on behalf of another user. +// +// IMPORTANT: It's not possible to use an API key as the authentication +// credential for this API. +// To update an API key, the owner user's credentials are required. +// +// It's not possible to update expired API keys, or API keys that have been +// invalidated by the invalidate API key API. +// +// This API supports updates to an API key's access scope, metadata, and +// expiration. +// The owner user's information, such as the `username` and `realm`, is also +// updated automatically on every call. +// +// NOTE: This API cannot update REST API keys, which should be updated by either +// the update API key or bulk update API keys API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key func New(tp elastictransport.Interface) *UpdateCrossClusterApiKey { r := &UpdateCrossClusterApiKey{ transport: tp, @@ -97,8 +139,6 @@ func New(tp elastictransport.Interface) *UpdateCrossClusterApiKey { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -367,39 +407,53 @@ func (r *UpdateCrossClusterApiKey) Pretty(pretty bool) *UpdateCrossClusterApiKey return r } -// Access The access to be granted to this API key. +// The access to be granted to this API key. // The access is composed of permissions for cross cluster search and cross // cluster replication. // At least one of them must be specified. // When specified, the new access assignment fully replaces the previously // assigned access. // API name: access -func (r *UpdateCrossClusterApiKey) Access(access *types.Access) *UpdateCrossClusterApiKey { +func (r *UpdateCrossClusterApiKey) Access(access types.AccessVariant) *UpdateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Access = *access + r.req.Access = *access.AccessCaster() return r } -// Expiration Expiration time for the API key. +// The expiration time for the API key. // By default, API keys never expire. This property can be omitted to leave the // value unchanged. // API name: expiration -func (r *UpdateCrossClusterApiKey) Expiration(duration types.Duration) *UpdateCrossClusterApiKey { - r.req.Expiration = duration +func (r *UpdateCrossClusterApiKey) Expiration(duration types.DurationVariant) *UpdateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Expiration = *duration.DurationCaster() return r } -// Metadata Arbitrary metadata that you want to associate with the API key. +// Arbitrary metadata that you want to associate with the API key. // It supports nested data structure. // Within the metadata object, keys beginning with `_` are reserved for system // usage. // When specified, this information fully replaces metadata previously // associated with the API key. // API name: metadata -func (r *UpdateCrossClusterApiKey) Metadata(metadata types.Metadata) *UpdateCrossClusterApiKey { - r.req.Metadata = metadata +func (r *UpdateCrossClusterApiKey) Metadata(metadata types.MetadataVariant) *UpdateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() return r } diff --git a/typedapi/security/updatesettings/request.go b/typedapi/security/updatesettings/request.go new file mode 100644 index 0000000000..8d3542c05b --- /dev/null +++ b/typedapi/security/updatesettings/request.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package updatesettings + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package updatesettings +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/update_settings/SecurityUpdateSettingsRequest.ts#L24-L71 +type Request struct { + + // Security Settings for the index used for most security configuration, including native + // realm users and roles configured with the API. + Security *types.SecuritySettings `json:"security,omitempty"` + // SecurityProfile Settings for the index used to store profile information. + SecurityProfile *types.SecuritySettings `json:"security-profile,omitempty"` + // SecurityTokens Settings for the index used to store tokens. + SecurityTokens *types.SecuritySettings `json:"security-tokens,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatesettings request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/security/updatesettings/response.go b/typedapi/security/updatesettings/response.go new file mode 100644 index 0000000000..1609427a0c --- /dev/null +++ b/typedapi/security/updatesettings/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package updatesettings + +// Response holds the response body struct for the package updatesettings +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/update_settings/SecurityUpdateSettingsResponse.ts#L20-L24 +type Response struct { + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/security/updatesettings/update_settings.go b/typedapi/security/updatesettings/update_settings.go index 78dc2cf887..c3a7629160 100644 --- a/typedapi/security/updatesettings/update_settings.go +++ b/typedapi/security/updatesettings/update_settings.go @@ -16,21 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Update settings for the security system index +// Update security index settings. +// +// Update the user-configurable settings for the security internal index +// (`.security` and associated indices). Only a subset of settings are allowed +// to be modified. This includes `index.auto_expand_replicas` and +// `index.number_of_replicas`. +// +// NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will +// be ignored during updates. +// +// If a specific index is not in use on the system and settings are provided for +// it, the request will be rejected. +// This API does not yet support configuring the settings for indices before +// they are in use. package updatesettings import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -45,6 +62,10 @@ type UpdateSettings struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -65,14 +86,29 @@ func NewUpdateSettingsFunc(tp elastictransport.Interface) NewUpdateSettings { } } -// Update settings for the security system index +// Update security index settings. +// +// Update the user-configurable settings for the security internal index +// (`.security` and associated indices). Only a subset of settings are allowed +// to be modified. This includes `index.auto_expand_replicas` and +// `index.number_of_replicas`. +// +// NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will +// be ignored during updates. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-settings.html +// If a specific index is not in use on the system and settings are provided for +// it, the request will be rejected. +// This API does not yet support configuring the settings for indices before +// they are in use. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-settings func New(tp elastictransport.Interface) *UpdateSettings { r := &UpdateSettings{ transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -84,6 +120,21 @@ func New(tp elastictransport.Interface) *UpdateSettings { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateSettings) Raw(raw io.Reader) *UpdateSettings { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateSettings) Request(req *Request) *UpdateSettings { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *UpdateSettings) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -93,6 +144,31 @@ func (r *UpdateSettings) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateSettings: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -180,13 +256,7 @@ func (r UpdateSettings) Perform(providedCtx context.Context) (*http.Response, er } // Do runs the request through the transport, handle the response and returns a updatesettings.Response -func (r UpdateSettings) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r UpdateSettings) IsSuccess(providedCtx context.Context) (bool, error) { +func (r UpdateSettings) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -197,30 +267,46 @@ func (r UpdateSettings) IsSuccess(providedCtx context.Context) (bool, error) { ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the UpdateSettings query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err } - return false, nil + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the UpdateSettings headers map. @@ -229,3 +315,107 @@ func (r *UpdateSettings) Header(key, value string) *UpdateSettings { return r } + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *UpdateSettings) MasterTimeout(duration string) *UpdateSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *UpdateSettings) Timeout(duration string) *UpdateSettings { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateSettings) ErrorTrace(errortrace bool) *UpdateSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateSettings) FilterPath(filterpaths ...string) *UpdateSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateSettings) Human(human bool) *UpdateSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateSettings) Pretty(pretty bool) *UpdateSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Settings for the index used for most security configuration, including native +// realm users and roles configured with the API. +// API name: security +func (r *UpdateSettings) Security(security types.SecuritySettingsVariant) *UpdateSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Security = security.SecuritySettingsCaster() + + return r +} + +// Settings for the index used to store profile information. +// API name: security-profile +func (r *UpdateSettings) SecurityProfile(securityprofile types.SecuritySettingsVariant) *UpdateSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SecurityProfile = securityprofile.SecuritySettingsCaster() + + return r +} + +// Settings for the index used to store tokens. +// API name: security-tokens +func (r *UpdateSettings) SecurityTokens(securitytokens types.SecuritySettingsVariant) *UpdateSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SecurityTokens = securitytokens.SecuritySettingsCaster() + + return r +} diff --git a/typedapi/security/updateuserprofiledata/request.go b/typedapi/security/updateuserprofiledata/request.go index 186fc855a9..b5fc75b4db 100644 --- a/typedapi/security/updateuserprofiledata/request.go +++ b/typedapi/security/updateuserprofiledata/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updateuserprofiledata @@ -27,14 +27,20 @@ import ( // Request holds the request body struct for the package updateuserprofiledata // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/update_user_profile_data/Request.ts#L27-L72 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/update_user_profile_data/Request.ts#L27-L98 type Request struct { // Data Non-searchable data that you want to associate with the user profile. // This field supports a nested data structure. + // Within the `data` object, top-level keys cannot begin with an underscore + // (`_`) or contain a period (`.`). + // The data object is not searchable, but can be retrieved with the get user + // profile API. Data map[string]json.RawMessage `json:"data,omitempty"` - // Labels Searchable data that you want to associate with the user profile. This - // field supports a nested data structure. + // Labels Searchable data that you want to associate with the user profile. + // This field supports a nested data structure. + // Within the labels object, top-level keys cannot begin with an underscore + // (`_`) or contain a period (`.`). Labels map[string]json.RawMessage `json:"labels,omitempty"` } diff --git a/typedapi/security/updateuserprofiledata/response.go b/typedapi/security/updateuserprofiledata/response.go index bea80256be..09ac87e27a 100644 --- a/typedapi/security/updateuserprofiledata/response.go +++ b/typedapi/security/updateuserprofiledata/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updateuserprofiledata // Response holds the response body struct for the package updateuserprofiledata // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/update_user_profile_data/Response.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/update_user_profile_data/Response.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/security/updateuserprofiledata/update_user_profile_data.go b/typedapi/security/updateuserprofiledata/update_user_profile_data.go index ac0f305d06..0cb974a1e2 100644 --- a/typedapi/security/updateuserprofiledata/update_user_profile_data.go +++ b/typedapi/security/updateuserprofiledata/update_user_profile_data.go @@ -16,12 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update user profile data. // // Update specific data for the user profile that is associated with a unique // ID. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_user_profile` cluster privilege. +// * The `update_profile_data` global privilege for the namespaces that are +// referenced in the request. +// +// This API updates the `labels` and `data` fields of an existing user profile +// document with JSON objects. +// New keys and their values are added to the profile document and conflicting +// keys are replaced by data that's included in the request. +// +// For both labels and data, content is namespaced by the top-level fields. +// The `update_profile_data` global privilege grants privileges for updating +// only the allowed namespaces. package updateuserprofiledata import ( @@ -90,7 +111,28 @@ func NewUpdateUserProfileDataFunc(tp elastictransport.Interface) NewUpdateUserPr // Update specific data for the user profile that is associated with a unique // ID. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-user-profile-data.html +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_user_profile` cluster privilege. +// * The `update_profile_data` global privilege for the namespaces that are +// referenced in the request. +// +// This API updates the `labels` and `data` fields of an existing user profile +// document with JSON objects. +// New keys and their values are added to the profile document and conflicting +// keys are replaced by data that's included in the request. +// +// For both labels and data, content is namespaced by the top-level fields. +// The `update_profile_data` global privilege grants privileges for updating +// only the allowed namespaces. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-user-profile-data func New(tp elastictransport.Interface) *UpdateUserProfileData { r := &UpdateUserProfileData{ transport: tp, @@ -98,8 +140,6 @@ func New(tp elastictransport.Interface) *UpdateUserProfileData { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -341,9 +381,10 @@ func (r *UpdateUserProfileData) IfPrimaryTerm(ifprimaryterm string) *UpdateUserP } // Refresh If 'true', Elasticsearch refreshes the affected shards to make this operation -// visible to search, if 'wait_for' then wait for a refresh to make this -// operation -// visible to search, if 'false' do nothing with refreshes. +// visible to search. +// If 'wait_for', it waits for a refresh to make this operation visible to +// search. +// If 'false', nothing is done with refreshes. // API name: refresh func (r *UpdateUserProfileData) Refresh(refresh refresh.Refresh) *UpdateUserProfileData { r.values.Set("refresh", refresh.String()) @@ -395,22 +436,70 @@ func (r *UpdateUserProfileData) Pretty(pretty bool) *UpdateUserProfileData { return r } -// Data Non-searchable data that you want to associate with the user profile. +// Non-searchable data that you want to associate with the user profile. // This field supports a nested data structure. +// Within the `data` object, top-level keys cannot begin with an underscore +// (`_`) or contain a period (`.`). +// The data object is not searchable, but can be retrieved with the get user +// profile API. // API name: data func (r *UpdateUserProfileData) Data(data map[string]json.RawMessage) *UpdateUserProfileData { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Data = data + return r +} + +func (r *UpdateUserProfileData) AddDatum(key string, value json.RawMessage) *UpdateUserProfileData { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Data == nil { + r.req.Data = make(map[string]json.RawMessage) + } else { + tmp = r.req.Data + } + tmp[key] = value + + r.req.Data = tmp return r } -// Labels Searchable data that you want to associate with the user profile. This -// field supports a nested data structure. +// Searchable data that you want to associate with the user profile. +// This field supports a nested data structure. +// Within the labels object, top-level keys cannot begin with an underscore +// (`_`) or contain a period (`.`). // API name: labels func (r *UpdateUserProfileData) Labels(labels map[string]json.RawMessage) *UpdateUserProfileData { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Labels = labels + return r +} + +func (r *UpdateUserProfileData) AddLabel(key string, value json.RawMessage) *UpdateUserProfileData { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Labels == nil { + r.req.Labels = make(map[string]json.RawMessage) + } else { + tmp = r.req.Labels + } + + tmp[key] = value + r.req.Labels = tmp return r } diff --git a/typedapi/shutdown/deletenode/delete_node.go b/typedapi/shutdown/deletenode/delete_node.go index f3949ca6df..02b7e26368 100644 --- a/typedapi/shutdown/deletenode/delete_node.go +++ b/typedapi/shutdown/deletenode/delete_node.go @@ -16,10 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Removes a node from the shutdown list. Designed for indirect use by ECE/ESS -// and ECK. Direct use is not supported. +// Cancel node shutdown preparations. +// Remove a node from the shutdown list so it can resume normal operations. +// You must explicitly clear the shutdown request when a node rejoins the +// cluster or when a node has permanently left the cluster. +// Shutdown requests are never removed automatically by Elasticsearch. +// +// NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic +// Cloud Enterprise, and Elastic Cloud on Kubernetes. +// Direct use is not supported. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. package deletenode import ( @@ -78,10 +88,20 @@ func NewDeleteNodeFunc(tp elastictransport.Interface) NewDeleteNode { } } -// Removes a node from the shutdown list. Designed for indirect use by ECE/ESS -// and ECK. Direct use is not supported. +// Cancel node shutdown preparations. +// Remove a node from the shutdown list so it can resume normal operations. +// You must explicitly clear the shutdown request when a node rejoins the +// cluster or when a node has permanently left the cluster. +// Shutdown requests are never removed automatically by Elasticsearch. +// +// NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic +// Cloud Enterprise, and Elastic Cloud on Kubernetes. +// Direct use is not supported. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-delete-node func New(tp elastictransport.Interface) *DeleteNode { r := &DeleteNode{ transport: tp, diff --git a/typedapi/shutdown/deletenode/response.go b/typedapi/shutdown/deletenode/response.go index 42f3e89c60..f53cefebb3 100644 --- a/typedapi/shutdown/deletenode/response.go +++ b/typedapi/shutdown/deletenode/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletenode // Response holds the response body struct for the package deletenode // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/shutdown/delete_node/ShutdownDeleteNodeResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/shutdown/delete_node/ShutdownDeleteNodeResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/shutdown/getnode/get_node.go b/typedapi/shutdown/getnode/get_node.go index 4a26c28b72..4d2c2dfafd 100644 --- a/typedapi/shutdown/getnode/get_node.go +++ b/typedapi/shutdown/getnode/get_node.go @@ -16,11 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieve status of a node or nodes that are currently marked as shutting -// down. Designed for indirect use by ECE/ESS and ECK. Direct use is not +// Get the shutdown status. +// +// Get information about nodes that are ready to be shut down, have shut down +// preparations still in progress, or have stalled. +// The API returns status information for each part of the shut down process. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not // supported. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. package getnode import ( @@ -77,11 +86,20 @@ func NewGetNodeFunc(tp elastictransport.Interface) NewGetNode { } } -// Retrieve status of a node or nodes that are currently marked as shutting -// down. Designed for indirect use by ECE/ESS and ECK. Direct use is not +// Get the shutdown status. +// +// Get information about nodes that are ready to be shut down, have shut down +// preparations still in progress, or have stalled. +// The API returns status information for each part of the shut down process. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not // supported. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current +// If the operator privileges feature is enabled, you must be an operator to use +// this API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-get-node func New(tp elastictransport.Interface) *GetNode { r := &GetNode{ transport: tp, @@ -324,15 +342,6 @@ func (r *GetNode) MasterTimeout(mastertimeout timeunit.TimeUnit) *GetNode { return r } -// Timeout Period to wait for a response. If no response is received before the timeout -// expires, the request fails and returns an error. -// API name: timeout -func (r *GetNode) Timeout(timeout timeunit.TimeUnit) *GetNode { - r.values.Set("timeout", timeout.String()) - - return r -} - // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/shutdown/getnode/response.go b/typedapi/shutdown/getnode/response.go index 023534b812..a2f60eb3db 100644 --- a/typedapi/shutdown/getnode/response.go +++ b/typedapi/shutdown/getnode/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getnode @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getnode // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L23-L27 type Response struct { Nodes []types.NodeShutdownStatus `json:"nodes"` } diff --git a/typedapi/shutdown/putnode/put_node.go b/typedapi/shutdown/putnode/put_node.go index 7981326bf2..e980a24d32 100644 --- a/typedapi/shutdown/putnode/put_node.go +++ b/typedapi/shutdown/putnode/put_node.go @@ -16,10 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. -// Direct use is not supported. +// Prepare a node to be shut down. +// +// NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic +// Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// +// If you specify a node that is offline, it will be prepared for shut down when +// it rejoins the cluster. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. +// +// The API migrates ongoing tasks and index shards to other nodes as needed to +// prepare a node to be restarted or shut down and removed from the cluster. +// This ensures that Elasticsearch can be stopped safely with minimal disruption +// to the cluster. +// +// You must specify the type of shutdown: `restart`, `remove`, or `replace`. +// If a node is already being prepared for shutdown, you can use this API to +// change the shutdown type. +// +// IMPORTANT: This API does NOT terminate the Elasticsearch process. +// Monitor the node shutdown status to determine when it is safe to stop +// Elasticsearch. package putnode import ( @@ -84,10 +106,32 @@ func NewPutNodeFunc(tp elastictransport.Interface) NewPutNode { } } -// Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. -// Direct use is not supported. +// Prepare a node to be shut down. +// +// NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic +// Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// +// If you specify a node that is offline, it will be prepared for shut down when +// it rejoins the cluster. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current +// The API migrates ongoing tasks and index shards to other nodes as needed to +// prepare a node to be restarted or shut down and removed from the cluster. +// This ensures that Elasticsearch can be stopped safely with minimal disruption +// to the cluster. +// +// You must specify the type of shutdown: `restart`, `remove`, or `replace`. +// If a node is already being prepared for shutdown, you can use this API to +// change the shutdown type. +// +// IMPORTANT: This API does NOT terminate the Elasticsearch process. +// Monitor the node shutdown status to determine when it is safe to stop +// Elasticsearch. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node func New(tp elastictransport.Interface) *PutNode { r := &PutNode{ transport: tp, @@ -95,8 +139,6 @@ func New(tp elastictransport.Interface) *PutNode { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -310,7 +352,10 @@ func (r *PutNode) Header(key, value string) *PutNode { return r } -// NodeId The node id of node to be shut down +// NodeId The node identifier. +// This parameter is not validated against the cluster's active nodes. +// This enables you to register a node for shut down while it is offline. +// No error is thrown if you specify an invalid node ID. // API Name: nodeid func (r *PutNode) _nodeid(nodeid string) *PutNode { r.paramSet |= nodeidMask @@ -319,8 +364,9 @@ func (r *PutNode) _nodeid(nodeid string) *PutNode { return r } -// MasterTimeout Period to wait for a connection to the master node. If no response is -// received before the timeout expires, the request fails and returns an error. +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout func (r *PutNode) MasterTimeout(mastertimeout timeunit.TimeUnit) *PutNode { r.values.Set("master_timeout", mastertimeout.String()) @@ -328,8 +374,9 @@ func (r *PutNode) MasterTimeout(mastertimeout timeunit.TimeUnit) *PutNode { return r } -// Timeout Period to wait for a response. If no response is received before the timeout -// expires, the request fails and returns an error. +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout func (r *PutNode) Timeout(timeout timeunit.TimeUnit) *PutNode { r.values.Set("timeout", timeout.String()) @@ -381,7 +428,7 @@ func (r *PutNode) Pretty(pretty bool) *PutNode { return r } -// AllocationDelay Only valid if type is restart. +// Only valid if type is restart. // Controls how long Elasticsearch will wait for the node to restart and join // the cluster before reassigning its shards to other nodes. // This works the same as delaying allocation with the @@ -390,24 +437,32 @@ func (r *PutNode) Pretty(pretty bool) *PutNode { // delay, the longer of the two is used. // API name: allocation_delay func (r *PutNode) AllocationDelay(allocationdelay string) *PutNode { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.AllocationDelay = &allocationdelay return r } -// Reason A human-readable reason that the node is being shut down. +// A human-readable reason that the node is being shut down. // This field provides information for other cluster operators; it does not // affect the shut down process. // API name: reason func (r *PutNode) Reason(reason string) *PutNode { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Reason = reason return r } -// TargetNodeName Only valid if type is replace. +// Only valid if type is replace. // Specifies the name of the node that is replacing the node being shut down. // Shards from the shut down node are only allowed to be allocated to the target // node, and no other data will be allocated to the target node. @@ -415,13 +470,17 @@ func (r *PutNode) Reason(reason string) *PutNode { // watermarks or user attribute filtering rules. // API name: target_node_name func (r *PutNode) TargetNodeName(targetnodename string) *PutNode { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.TargetNodeName = &targetnodename return r } -// Type Valid values are restart, remove, or replace. +// Valid values are restart, remove, or replace. // Use restart when you need to temporarily shut down a node to perform an // upgrade, make configuration changes, or perform other maintenance. // Because the node is expected to rejoin the cluster, data is not migrated off @@ -435,7 +494,10 @@ func (r *PutNode) TargetNodeName(targetnodename string) *PutNode { // unassigned shards, and shrink may fail until the replacement is complete. // API name: type func (r *PutNode) Type(type_ type_.Type) *PutNode { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Type = type_ - return r } diff --git a/typedapi/shutdown/putnode/request.go b/typedapi/shutdown/putnode/request.go index 42ce153999..24655840a2 100644 --- a/typedapi/shutdown/putnode/request.go +++ b/typedapi/shutdown/putnode/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putnode @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package putnode // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/shutdown/put_node/ShutdownPutNodeRequest.ts#L25-L76 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/shutdown/put_node/ShutdownPutNodeRequest.ts#L25-L108 type Request struct { // AllocationDelay Only valid if type is restart. diff --git a/typedapi/shutdown/putnode/response.go b/typedapi/shutdown/putnode/response.go index 3e0797bc43..b9eb272bbe 100644 --- a/typedapi/shutdown/putnode/response.go +++ b/typedapi/shutdown/putnode/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putnode // Response holds the response body struct for the package putnode // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/shutdown/put_node/ShutdownPutNodeResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/shutdown/put_node/ShutdownPutNodeResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/simulate/ingest/ingest.go b/typedapi/simulate/ingest/ingest.go new file mode 100644 index 0000000000..d9ec49ac00 --- /dev/null +++ b/typedapi/simulate/ingest/ingest.go @@ -0,0 +1,564 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Simulate data ingestion. +// Run ingest pipelines against a set of provided documents, optionally with +// substitute pipeline definitions, to simulate ingesting data into an index. +// +// This API is meant to be used for troubleshooting or pipeline development, as +// it does not actually index any data into Elasticsearch. +// +// The API runs the default and final pipeline for that index against a set of +// documents provided in the body of the request. +// If a pipeline contains a reroute processor, it follows that reroute processor +// to the new index, running that index's pipelines as well the same way that a +// non-simulated ingest would. +// No data is indexed into Elasticsearch. +// Instead, the transformed document is returned, along with the list of +// pipelines that have been run and the name of the index where the document +// would have been indexed if this were not a simulation. +// The transformed document is validated against the mappings that would apply +// to this index, and any validation error is reported in the result. +// +// This API differs from the simulate pipeline API in that you specify a single +// pipeline for that API, and it runs only that one pipeline. +// The simulate pipeline API is more useful for developing a single pipeline, +// while the simulate ingest API is more useful for troubleshooting the +// interaction of the various pipelines that get applied when ingesting into an +// index. +// +// By default, the pipeline definitions that are currently in the system are +// used. +// However, you can supply substitute pipeline definitions in the body of the +// request. +// These will be used in place of the pipeline definitions that are already in +// the system. This can be used to replace existing pipeline definitions or to +// create new ones. The pipeline substitutions are used only within this +// request. +package ingest + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Ingest struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewIngest type alias for index. +type NewIngest func() *Ingest + +// NewIngestFunc returns a new instance of Ingest with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewIngestFunc(tp elastictransport.Interface) NewIngest { + return func() *Ingest { + n := New(tp) + + return n + } +} + +// Simulate data ingestion. +// Run ingest pipelines against a set of provided documents, optionally with +// substitute pipeline definitions, to simulate ingesting data into an index. +// +// This API is meant to be used for troubleshooting or pipeline development, as +// it does not actually index any data into Elasticsearch. +// +// The API runs the default and final pipeline for that index against a set of +// documents provided in the body of the request. +// If a pipeline contains a reroute processor, it follows that reroute processor +// to the new index, running that index's pipelines as well the same way that a +// non-simulated ingest would. +// No data is indexed into Elasticsearch. +// Instead, the transformed document is returned, along with the list of +// pipelines that have been run and the name of the index where the document +// would have been indexed if this were not a simulation. +// The transformed document is validated against the mappings that would apply +// to this index, and any validation error is reported in the result. +// +// This API differs from the simulate pipeline API in that you specify a single +// pipeline for that API, and it runs only that one pipeline. +// The simulate pipeline API is more useful for developing a single pipeline, +// while the simulate ingest API is more useful for troubleshooting the +// interaction of the various pipelines that get applied when ingesting into an +// index. +// +// By default, the pipeline definitions that are currently in the system are +// used. +// However, you can supply substitute pipeline definitions in the body of the +// request. +// These will be used in place of the pipeline definitions that are already in +// the system. This can be used to replace existing pipeline definitions or to +// create new ones. The pipeline substitutions are used only within this +// request. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-simulate-ingest +func New(tp elastictransport.Interface) *Ingest { + r := &Ingest{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Ingest) Raw(raw io.Reader) *Ingest { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Ingest) Request(req *Request) *Ingest { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Ingest) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Ingest: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("_simulate") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_simulate") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Ingest) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "simulate.ingest") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "simulate.ingest") + if reader := instrument.RecordRequestBody(ctx, "simulate.ingest", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "simulate.ingest") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Ingest query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a ingest.Response +func (r Ingest) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "simulate.ingest") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Ingest headers map. +func (r *Ingest) Header(key, value string) *Ingest { + r.headers.Set(key, value) + + return r +} + +// Index The index to simulate ingesting into. +// This value can be overridden by specifying an index on each document. +// If you specify this parameter in the request path, it is used for any +// documents that do not explicitly specify an index argument. +// API Name: index +func (r *Ingest) Index(index string) *Ingest { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Pipeline The pipeline to use as the default pipeline. +// This value can be used to override the default pipeline of the index. +// API name: pipeline +func (r *Ingest) Pipeline(pipelinename string) *Ingest { + r.values.Set("pipeline", pipelinename) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Ingest) ErrorTrace(errortrace bool) *Ingest { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Ingest) FilterPath(filterpaths ...string) *Ingest { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Ingest) Human(human bool) *Ingest { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Ingest) Pretty(pretty bool) *Ingest { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// A map of component template names to substitute component template definition +// objects. +// API name: component_template_substitutions +func (r *Ingest) ComponentTemplateSubstitutions(componenttemplatesubstitutions map[string]types.ComponentTemplateNode) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ComponentTemplateSubstitutions = componenttemplatesubstitutions + return r +} + +func (r *Ingest) AddComponentTemplateSubstitution(key string, value types.ComponentTemplateNodeVariant) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ComponentTemplateNode + if r.req.ComponentTemplateSubstitutions == nil { + r.req.ComponentTemplateSubstitutions = make(map[string]types.ComponentTemplateNode) + } else { + tmp = r.req.ComponentTemplateSubstitutions + } + + tmp[key] = *value.ComponentTemplateNodeCaster() + + r.req.ComponentTemplateSubstitutions = tmp + return r +} + +// Sample documents to test in the pipeline. +// API name: docs +func (r *Ingest) Docs(docs ...types.DocumentVariant) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docs { + + r.req.Docs = append(r.req.Docs, *v.DocumentCaster()) + + } + return r +} + +// A map of index template names to substitute index template definition +// objects. +// API name: index_template_substitutions +func (r *Ingest) IndexTemplateSubstitutions(indextemplatesubstitutions map[string]types.IndexTemplate) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexTemplateSubstitutions = indextemplatesubstitutions + return r +} + +func (r *Ingest) AddIndexTemplateSubstitution(key string, value types.IndexTemplateVariant) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.IndexTemplate + if r.req.IndexTemplateSubstitutions == nil { + r.req.IndexTemplateSubstitutions = make(map[string]types.IndexTemplate) + } else { + tmp = r.req.IndexTemplateSubstitutions + } + + tmp[key] = *value.IndexTemplateCaster() + + r.req.IndexTemplateSubstitutions = tmp + return r +} + +// API name: mapping_addition +func (r *Ingest) MappingAddition(mappingaddition types.TypeMappingVariant) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MappingAddition = mappingaddition.TypeMappingCaster() + + return r +} + +// Pipelines to test. +// If you don’t specify the `pipeline` request path parameter, this parameter is +// required. +// If you specify both this and the request path parameter, the API only uses +// the request path parameter. +// API name: pipeline_substitutions +func (r *Ingest) PipelineSubstitutions(pipelinesubstitutions map[string]types.IngestPipeline) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.PipelineSubstitutions = pipelinesubstitutions + return r +} + +func (r *Ingest) AddPipelineSubstitution(key string, value types.IngestPipelineVariant) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.IngestPipeline + if r.req.PipelineSubstitutions == nil { + r.req.PipelineSubstitutions = make(map[string]types.IngestPipeline) + } else { + tmp = r.req.PipelineSubstitutions + } + + tmp[key] = *value.IngestPipelineCaster() + + r.req.PipelineSubstitutions = tmp + return r +} diff --git a/typedapi/simulate/ingest/request.go b/typedapi/simulate/ingest/request.go new file mode 100644 index 0000000000..bee39c4900 --- /dev/null +++ b/typedapi/simulate/ingest/request.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package ingest + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package ingest +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/simulate/ingest/SimulateIngestRequest.ts#L29-L100 +type Request struct { + + // ComponentTemplateSubstitutions A map of component template names to substitute component template definition + // objects. + ComponentTemplateSubstitutions map[string]types.ComponentTemplateNode `json:"component_template_substitutions,omitempty"` + // Docs Sample documents to test in the pipeline. + Docs []types.Document `json:"docs"` + // IndexTemplateSubstitutions A map of index template names to substitute index template definition + // objects. + IndexTemplateSubstitutions map[string]types.IndexTemplate `json:"index_template_substitutions,omitempty"` + MappingAddition *types.TypeMapping `json:"mapping_addition,omitempty"` + // PipelineSubstitutions Pipelines to test. + // If you don’t specify the `pipeline` request path parameter, this parameter is + // required. + // If you specify both this and the request path parameter, the API only uses + // the request path parameter. + PipelineSubstitutions map[string]types.IngestPipeline `json:"pipeline_substitutions,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + ComponentTemplateSubstitutions: make(map[string]types.ComponentTemplateNode, 0), + IndexTemplateSubstitutions: make(map[string]types.IndexTemplate, 0), + PipelineSubstitutions: make(map[string]types.IngestPipeline, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Ingest request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/simulate/ingest/response.go b/typedapi/simulate/ingest/response.go new file mode 100644 index 0000000000..c91e9a0b7b --- /dev/null +++ b/typedapi/simulate/ingest/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package ingest + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package ingest +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/simulate/ingest/SimulateIngestResponse.ts#L27-L29 +type Response struct { + Docs []types.SimulateIngestDocumentResult `json:"docs"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/slm/deletelifecycle/delete_lifecycle.go b/typedapi/slm/deletelifecycle/delete_lifecycle.go index 11f787cb76..06fad419ca 100644 --- a/typedapi/slm/deletelifecycle/delete_lifecycle.go +++ b/typedapi/slm/deletelifecycle/delete_lifecycle.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes an existing snapshot lifecycle policy. +// Delete a policy. +// Delete a snapshot lifecycle policy definition. +// This operation prevents any future snapshots from being taken but does not +// cancel in-progress snapshots or remove previously-taken snapshots. package deletelifecycle import ( @@ -76,9 +79,12 @@ func NewDeleteLifecycleFunc(tp elastictransport.Interface) NewDeleteLifecycle { } } -// Deletes an existing snapshot lifecycle policy. +// Delete a policy. +// Delete a snapshot lifecycle policy definition. +// This operation prevents any future snapshots from being taken but does not +// cancel in-progress snapshots or remove previously-taken snapshots. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-delete-policy.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle func New(tp elastictransport.Interface) *DeleteLifecycle { r := &DeleteLifecycle{ transport: tp, @@ -299,6 +305,26 @@ func (r *DeleteLifecycle) _policyid(policyid string) *DeleteLifecycle { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *DeleteLifecycle) MasterTimeout(duration string) *DeleteLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *DeleteLifecycle) Timeout(duration string) *DeleteLifecycle { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/slm/deletelifecycle/response.go b/typedapi/slm/deletelifecycle/response.go index 6afcfc5de7..7a11e3ad99 100644 --- a/typedapi/slm/deletelifecycle/response.go +++ b/typedapi/slm/deletelifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletelifecycle // Response holds the response body struct for the package deletelifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/delete_lifecycle/DeleteSnapshotLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/delete_lifecycle/DeleteSnapshotLifecycleResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/slm/executelifecycle/execute_lifecycle.go b/typedapi/slm/executelifecycle/execute_lifecycle.go index 0674f568e3..c6d46f0ced 100644 --- a/typedapi/slm/executelifecycle/execute_lifecycle.go +++ b/typedapi/slm/executelifecycle/execute_lifecycle.go @@ -16,10 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// Immediately creates a snapshot according to the lifecycle policy, without -// waiting for the scheduled time. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Run a policy. +// Immediately create a snapshot according to the snapshot lifecycle policy +// without waiting for the scheduled time. +// The snapshot policy is normally applied according to its schedule, but you +// might want to manually run a policy before performing an upgrade or other +// maintenance. package executelifecycle import ( @@ -77,10 +81,14 @@ func NewExecuteLifecycleFunc(tp elastictransport.Interface) NewExecuteLifecycle } } -// Immediately creates a snapshot according to the lifecycle policy, without -// waiting for the scheduled time. +// Run a policy. +// Immediately create a snapshot according to the snapshot lifecycle policy +// without waiting for the scheduled time. +// The snapshot policy is normally applied according to its schedule, but you +// might want to manually run a policy before performing an upgrade or other +// maintenance. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-lifecycle.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle func New(tp elastictransport.Interface) *ExecuteLifecycle { r := &ExecuteLifecycle{ transport: tp, @@ -303,6 +311,26 @@ func (r *ExecuteLifecycle) _policyid(policyid string) *ExecuteLifecycle { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *ExecuteLifecycle) MasterTimeout(duration string) *ExecuteLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *ExecuteLifecycle) Timeout(duration string) *ExecuteLifecycle { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/slm/executelifecycle/response.go b/typedapi/slm/executelifecycle/response.go index d52aac5bc4..9c568089f8 100644 --- a/typedapi/slm/executelifecycle/response.go +++ b/typedapi/slm/executelifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package executelifecycle // Response holds the response body struct for the package executelifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/execute_lifecycle/ExecuteSnapshotLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/execute_lifecycle/ExecuteSnapshotLifecycleResponse.ts#L22-L24 type Response struct { SnapshotName string `json:"snapshot_name"` } diff --git a/typedapi/slm/executeretention/execute_retention.go b/typedapi/slm/executeretention/execute_retention.go index aee2adf647..bc1495eaac 100644 --- a/typedapi/slm/executeretention/execute_retention.go +++ b/typedapi/slm/executeretention/execute_retention.go @@ -16,10 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes any snapshots that are expired according to the policy's retention -// rules. +// Run a retention policy. +// Manually apply the retention policy to force immediate removal of snapshots +// that are expired according to the snapshot lifecycle policy retention rules. +// The retention policy is normally applied according to its schedule. package executeretention import ( @@ -69,10 +71,12 @@ func NewExecuteRetentionFunc(tp elastictransport.Interface) NewExecuteRetention } } -// Deletes any snapshots that are expired according to the policy's retention -// rules. +// Run a retention policy. +// Manually apply the retention policy to force immediate removal of snapshots +// that are expired according to the snapshot lifecycle policy retention rules. +// The retention policy is normally applied according to its schedule. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-retention.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention func New(tp elastictransport.Interface) *ExecuteRetention { r := &ExecuteRetention{ transport: tp, @@ -278,6 +282,26 @@ func (r *ExecuteRetention) Header(key, value string) *ExecuteRetention { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *ExecuteRetention) MasterTimeout(duration string) *ExecuteRetention { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *ExecuteRetention) Timeout(duration string) *ExecuteRetention { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/slm/executeretention/response.go b/typedapi/slm/executeretention/response.go index 1e2a867ce7..78b33a08fc 100644 --- a/typedapi/slm/executeretention/response.go +++ b/typedapi/slm/executeretention/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package executeretention // Response holds the response body struct for the package executeretention // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/execute_retention/ExecuteRetentionResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/execute_retention/ExecuteRetentionResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/slm/getlifecycle/get_lifecycle.go b/typedapi/slm/getlifecycle/get_lifecycle.go index 8c443ee006..e5fd697537 100644 --- a/typedapi/slm/getlifecycle/get_lifecycle.go +++ b/typedapi/slm/getlifecycle/get_lifecycle.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves one or more snapshot lifecycle policy definitions and information -// about the latest snapshot attempts. +// Get policy information. +// Get snapshot lifecycle policy definitions and information about the latest +// snapshot attempts. package getlifecycle import ( @@ -75,10 +76,11 @@ func NewGetLifecycleFunc(tp elastictransport.Interface) NewGetLifecycle { } } -// Retrieves one or more snapshot lifecycle policy definitions and information -// about the latest snapshot attempts. +// Get policy information. +// Get snapshot lifecycle policy definitions and information about the latest +// snapshot attempts. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-policy.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle func New(tp elastictransport.Interface) *GetLifecycle { r := &GetLifecycle{ transport: tp, @@ -306,6 +308,26 @@ func (r *GetLifecycle) PolicyId(policyid string) *GetLifecycle { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetLifecycle) MasterTimeout(duration string) *GetLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *GetLifecycle) Timeout(duration string) *GetLifecycle { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/slm/getlifecycle/response.go b/typedapi/slm/getlifecycle/response.go index 97cc03e480..ca00fa33e2 100644 --- a/typedapi/slm/getlifecycle/response.go +++ b/typedapi/slm/getlifecycle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getlifecycle @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/get_lifecycle/GetSnapshotLifecycleResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/get_lifecycle/GetSnapshotLifecycleResponse.ts#L24-L27 type Response map[string]types.SnapshotLifecycle diff --git a/typedapi/slm/getstats/get_stats.go b/typedapi/slm/getstats/get_stats.go index 2d70d1cf2e..265e425ebb 100644 --- a/typedapi/slm/getstats/get_stats.go +++ b/typedapi/slm/getstats/get_stats.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns global and policy-level statistics about actions taken by snapshot +// Get snapshot lifecycle management statistics. +// Get global and policy-level statistics about actions taken by snapshot // lifecycle management. package getstats @@ -69,10 +70,11 @@ func NewGetStatsFunc(tp elastictransport.Interface) NewGetStats { } } -// Returns global and policy-level statistics about actions taken by snapshot +// Get snapshot lifecycle management statistics. +// Get global and policy-level statistics about actions taken by snapshot // lifecycle management. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-stats.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats func New(tp elastictransport.Interface) *GetStats { r := &GetStats{ transport: tp, @@ -278,6 +280,24 @@ func (r *GetStats) Header(key, value string) *GetStats { return r } +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *GetStats) MasterTimeout(duration string) *GetStats { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *GetStats) Timeout(duration string) *GetStats { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/slm/getstats/response.go b/typedapi/slm/getstats/response.go index 57e6b2515a..d79bd6fd66 100644 --- a/typedapi/slm/getstats/response.go +++ b/typedapi/slm/getstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getstats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/get_stats/GetSnapshotLifecycleStatsResponse.ts#L23-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/get_stats/GetSnapshotLifecycleStatsResponse.ts#L23-L36 type Response struct { PolicyStats []string `json:"policy_stats"` RetentionDeletionTime types.Duration `json:"retention_deletion_time"` diff --git a/typedapi/slm/getstatus/get_status.go b/typedapi/slm/getstatus/get_status.go index e574831bca..c6ab476665 100644 --- a/typedapi/slm/getstatus/get_status.go +++ b/typedapi/slm/getstatus/get_status.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves the status of snapshot lifecycle management (SLM). +// Get the snapshot lifecycle management status. package getstatus import ( @@ -68,9 +68,9 @@ func NewGetStatusFunc(tp elastictransport.Interface) NewGetStatus { } } -// Retrieves the status of snapshot lifecycle management (SLM). +// Get the snapshot lifecycle management status. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-status.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status func New(tp elastictransport.Interface) *GetStatus { r := &GetStatus{ transport: tp, @@ -276,6 +276,28 @@ func (r *GetStatus) Header(key, value string) *GetStatus { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *GetStatus) MasterTimeout(duration string) *GetStatus { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: timeout +func (r *GetStatus) Timeout(duration string) *GetStatus { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/slm/getstatus/response.go b/typedapi/slm/getstatus/response.go index 7c9b9dc65f..76459bfe5e 100644 --- a/typedapi/slm/getstatus/response.go +++ b/typedapi/slm/getstatus/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getstatus @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getstatus // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/get_status/GetSnapshotLifecycleManagementStatusResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/get_status/GetSnapshotLifecycleManagementStatusResponse.ts#L22-L24 type Response struct { OperationMode lifecycleoperationmode.LifecycleOperationMode `json:"operation_mode"` } diff --git a/typedapi/slm/putlifecycle/put_lifecycle.go b/typedapi/slm/putlifecycle/put_lifecycle.go index bf3cc417d9..6f3ad43ecb 100644 --- a/typedapi/slm/putlifecycle/put_lifecycle.go +++ b/typedapi/slm/putlifecycle/put_lifecycle.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Creates or updates a snapshot lifecycle policy. +// Create or update a policy. +// Create or update a snapshot lifecycle policy. +// If the policy already exists, this request increments the policy version. +// Only the latest version of a policy is stored. package putlifecycle import ( @@ -81,9 +84,12 @@ func NewPutLifecycleFunc(tp elastictransport.Interface) NewPutLifecycle { } } -// Creates or updates a snapshot lifecycle policy. +// Create or update a policy. +// Create or update a snapshot lifecycle policy. +// If the policy already exists, this request increments the policy version. +// Only the latest version of a policy is stored. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-put-policy.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle func New(tp elastictransport.Interface) *PutLifecycle { r := &PutLifecycle{ transport: tp, @@ -91,8 +97,6 @@ func New(tp elastictransport.Interface) *PutLifecycle { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -306,7 +310,8 @@ func (r *PutLifecycle) Header(key, value string) *PutLifecycle { return r } -// PolicyId ID for the snapshot lifecycle policy you want to create or update. +// PolicyId The identifier for the snapshot lifecycle policy you want to create or +// update. // API Name: policyid func (r *PutLifecycle) _policyid(policyid string) *PutLifecycle { r.paramSet |= policyidMask @@ -315,8 +320,10 @@ func (r *PutLifecycle) _policyid(policyid string) *PutLifecycle { return r } -// MasterTimeout Period to wait for a connection to the master node. If no response is -// received before the timeout expires, the request fails and returns an error. +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: master_timeout func (r *PutLifecycle) MasterTimeout(duration string) *PutLifecycle { r.values.Set("master_timeout", duration) @@ -324,8 +331,10 @@ func (r *PutLifecycle) MasterTimeout(duration string) *PutLifecycle { return r } -// Timeout Period to wait for a response. If no response is received before the timeout -// expires, the request fails and returns an error. +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: timeout func (r *PutLifecycle) Timeout(duration string) *PutLifecycle { r.values.Set("timeout", duration) @@ -377,49 +386,71 @@ func (r *PutLifecycle) Pretty(pretty bool) *PutLifecycle { return r } -// Config Configuration for each snapshot created by the policy. +// Configuration for each snapshot created by the policy. // API name: config -func (r *PutLifecycle) Config(config *types.Configuration) *PutLifecycle { +func (r *PutLifecycle) Config(config types.ConfigurationVariant) *PutLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Config = config + r.req.Config = config.ConfigurationCaster() return r } -// Name Name automatically assigned to each snapshot created by the policy. Date math +// Name automatically assigned to each snapshot created by the policy. Date math // is supported. To prevent conflicting snapshot names, a UUID is automatically // appended to each snapshot name. // API name: name func (r *PutLifecycle) Name(name string) *PutLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Name = &name return r } -// Repository Repository used to store snapshots created by this policy. This repository +// Repository used to store snapshots created by this policy. This repository // must exist prior to the policy’s creation. You can create a repository using // the snapshot repository API. // API name: repository func (r *PutLifecycle) Repository(repository string) *PutLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Repository = &repository return r } -// Retention Retention rules used to retain and delete snapshots created by the policy. +// Retention rules used to retain and delete snapshots created by the policy. // API name: retention -func (r *PutLifecycle) Retention(retention *types.Retention) *PutLifecycle { +func (r *PutLifecycle) Retention(retention types.RetentionVariant) *PutLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Retention = retention + r.req.Retention = retention.RetentionCaster() return r } -// Schedule Periodic or absolute schedule at which the policy creates snapshots. SLM +// Periodic or absolute schedule at which the policy creates snapshots. SLM // applies schedule changes immediately. // API name: schedule func (r *PutLifecycle) Schedule(cronexpression string) *PutLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Schedule = &cronexpression return r diff --git a/typedapi/slm/putlifecycle/request.go b/typedapi/slm/putlifecycle/request.go index b50d79292f..8d3e42cb75 100644 --- a/typedapi/slm/putlifecycle/request.go +++ b/typedapi/slm/putlifecycle/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putlifecycle @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/put_lifecycle/PutSnapshotLifecycleRequest.ts#L26-L72 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/put_lifecycle/PutSnapshotLifecycleRequest.ts#L26-L89 type Request struct { // Config Configuration for each snapshot created by the policy. diff --git a/typedapi/slm/putlifecycle/response.go b/typedapi/slm/putlifecycle/response.go index b74a294ffb..6075653245 100644 --- a/typedapi/slm/putlifecycle/response.go +++ b/typedapi/slm/putlifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putlifecycle // Response holds the response body struct for the package putlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/put_lifecycle/PutSnapshotLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/put_lifecycle/PutSnapshotLifecycleResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/slm/start/response.go b/typedapi/slm/start/response.go index b85a469265..5696d75ec5 100644 --- a/typedapi/slm/start/response.go +++ b/typedapi/slm/start/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package start // Response holds the response body struct for the package start // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/start/StartSnapshotLifecycleManagementResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/start/StartSnapshotLifecycleManagementResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/slm/start/start.go b/typedapi/slm/start/start.go index e422f6f527..a35108880b 100644 --- a/typedapi/slm/start/start.go +++ b/typedapi/slm/start/start.go @@ -16,9 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Turns on snapshot lifecycle management (SLM). +// Start snapshot lifecycle management. +// Snapshot lifecycle management (SLM) starts automatically when a cluster is +// formed. +// Manually starting SLM is necessary only if it has been stopped using the stop +// SLM API. package start import ( @@ -68,9 +72,13 @@ func NewStartFunc(tp elastictransport.Interface) NewStart { } } -// Turns on snapshot lifecycle management (SLM). +// Start snapshot lifecycle management. +// Snapshot lifecycle management (SLM) starts automatically when a cluster is +// formed. +// Manually starting SLM is necessary only if it has been stopped using the stop +// SLM API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-start.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start func New(tp elastictransport.Interface) *Start { r := &Start{ transport: tp, @@ -276,6 +284,28 @@ func (r *Start) Header(key, value string) *Start { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *Start) MasterTimeout(duration string) *Start { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: timeout +func (r *Start) Timeout(duration string) *Start { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/slm/stop/response.go b/typedapi/slm/stop/response.go index 45fa1776df..103e52c559 100644 --- a/typedapi/slm/stop/response.go +++ b/typedapi/slm/stop/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package stop // Response holds the response body struct for the package stop // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/stop/StopSnapshotLifecycleManagementResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/stop/StopSnapshotLifecycleManagementResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/slm/stop/stop.go b/typedapi/slm/stop/stop.go index 156ff01b8f..8385df267c 100644 --- a/typedapi/slm/stop/stop.go +++ b/typedapi/slm/stop/stop.go @@ -16,9 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// Turns off snapshot lifecycle management (SLM). +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Stop snapshot lifecycle management. +// Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. +// This API is useful when you are performing maintenance on a cluster and need +// to prevent SLM from performing any actions on your data streams or indices. +// Stopping SLM does not stop any snapshots that are in progress. +// You can manually trigger snapshots with the run snapshot lifecycle policy API +// even if SLM is stopped. +// +// The API returns a response as soon as the request is acknowledged, but the +// plugin might continue to run until in-progress operations complete and it can +// be safely stopped. +// Use the get snapshot lifecycle management status API to see if SLM is +// running. package stop import ( @@ -68,9 +80,21 @@ func NewStopFunc(tp elastictransport.Interface) NewStop { } } -// Turns off snapshot lifecycle management (SLM). +// Stop snapshot lifecycle management. +// Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. +// This API is useful when you are performing maintenance on a cluster and need +// to prevent SLM from performing any actions on your data streams or indices. +// Stopping SLM does not stop any snapshots that are in progress. +// You can manually trigger snapshots with the run snapshot lifecycle policy API +// even if SLM is stopped. +// +// The API returns a response as soon as the request is acknowledged, but the +// plugin might continue to run until in-progress operations complete and it can +// be safely stopped. +// Use the get snapshot lifecycle management status API to see if SLM is +// running. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-stop.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop func New(tp elastictransport.Interface) *Stop { r := &Stop{ transport: tp, @@ -276,6 +300,28 @@ func (r *Stop) Header(key, value string) *Stop { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *Stop) MasterTimeout(duration string) *Stop { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: timeout +func (r *Stop) Timeout(duration string) *Stop { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/snapshot/cleanuprepository/cleanup_repository.go b/typedapi/snapshot/cleanuprepository/cleanup_repository.go index 91860ae16d..fd022fc03c 100644 --- a/typedapi/snapshot/cleanuprepository/cleanup_repository.go +++ b/typedapi/snapshot/cleanuprepository/cleanup_repository.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Triggers the review of a snapshot repository’s contents and deletes any stale -// data not referenced by existing snapshots. +// Clean up the snapshot repository. +// Trigger the review of the contents of a snapshot repository and delete any +// stale data not referenced by existing snapshots. package cleanuprepository import ( @@ -77,10 +78,11 @@ func NewCleanupRepositoryFunc(tp elastictransport.Interface) NewCleanupRepositor } } -// Triggers the review of a snapshot repository’s contents and deletes any stale -// data not referenced by existing snapshots. +// Clean up the snapshot repository. +// Trigger the review of the contents of a snapshot repository and delete any +// stale data not referenced by existing snapshots. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/clean-up-snapshot-repo-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository func New(tp elastictransport.Interface) *CleanupRepository { r := &CleanupRepository{ transport: tp, @@ -292,7 +294,7 @@ func (r *CleanupRepository) Header(key, value string) *CleanupRepository { return r } -// Repository Snapshot repository to clean up. +// Repository The name of the snapshot repository to clean up. // API Name: repository func (r *CleanupRepository) _repository(repository string) *CleanupRepository { r.paramSet |= repositoryMask @@ -301,7 +303,10 @@ func (r *CleanupRepository) _repository(repository string) *CleanupRepository { return r } -// MasterTimeout Period to wait for a connection to the master node. +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1` // API name: master_timeout func (r *CleanupRepository) MasterTimeout(duration string) *CleanupRepository { r.values.Set("master_timeout", duration) @@ -309,7 +314,12 @@ func (r *CleanupRepository) MasterTimeout(duration string) *CleanupRepository { return r } -// Timeout Period to wait for a response. +// Timeout The period to wait for a response from all relevant nodes in the cluster +// after updating the cluster metadata. +// If no response is received before the timeout expires, the cluster metadata +// update still applies but the response will indicate that it was not +// completely acknowledged. +// To indicate that the request should never timeout, set it to `-1`. // API name: timeout func (r *CleanupRepository) Timeout(duration string) *CleanupRepository { r.values.Set("timeout", duration) diff --git a/typedapi/snapshot/cleanuprepository/response.go b/typedapi/snapshot/cleanuprepository/response.go index 772eb766c1..fbcf8fad07 100644 --- a/typedapi/snapshot/cleanuprepository/response.go +++ b/typedapi/snapshot/cleanuprepository/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package cleanuprepository @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package cleanuprepository // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L22-L27 type Response struct { // Results Statistics for cleanup operations. diff --git a/typedapi/snapshot/clone/clone.go b/typedapi/snapshot/clone/clone.go index 9b23037d35..008c65fbc7 100644 --- a/typedapi/snapshot/clone/clone.go +++ b/typedapi/snapshot/clone/clone.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Clones indices from one snapshot into another snapshot in the same -// repository. +// Clone a snapshot. +// Clone part of all of a snapshot into another snapshot in the same repository. package clone import ( @@ -92,10 +92,10 @@ func NewCloneFunc(tp elastictransport.Interface) NewClone { } } -// Clones indices from one snapshot into another snapshot in the same -// repository. +// Clone a snapshot. +// Clone part of all of a snapshot into another snapshot in the same repository. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone func New(tp elastictransport.Interface) *Clone { r := &Clone{ transport: tp, @@ -103,8 +103,6 @@ func New(tp elastictransport.Interface) *Clone { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -330,7 +328,8 @@ func (r *Clone) Header(key, value string) *Clone { return r } -// Repository A repository name +// Repository The name of the snapshot repository that both source and target snapshot +// belong to. // API Name: repository func (r *Clone) _repository(repository string) *Clone { r.paramSet |= repositoryMask @@ -339,7 +338,7 @@ func (r *Clone) _repository(repository string) *Clone { return r } -// Snapshot The name of the snapshot to clone from +// Snapshot The source snapshot name. // API Name: snapshot func (r *Clone) _snapshot(snapshot string) *Clone { r.paramSet |= snapshotMask @@ -348,7 +347,7 @@ func (r *Clone) _snapshot(snapshot string) *Clone { return r } -// TargetSnapshot The name of the cloned snapshot to create +// TargetSnapshot The target snapshot name. // API Name: targetsnapshot func (r *Clone) _targetsnapshot(targetsnapshot string) *Clone { r.paramSet |= targetsnapshotMask @@ -357,7 +356,10 @@ func (r *Clone) _targetsnapshot(targetsnapshot string) *Clone { return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: master_timeout func (r *Clone) MasterTimeout(duration string) *Clone { r.values.Set("master_timeout", duration) @@ -365,6 +367,9 @@ func (r *Clone) MasterTimeout(duration string) *Clone { return r } +// Timeout The period of time to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout func (r *Clone) Timeout(duration string) *Clone { r.values.Set("timeout", duration) @@ -416,8 +421,14 @@ func (r *Clone) Pretty(pretty bool) *Clone { return r } +// A comma-separated list of indices to include in the snapshot. +// Multi-target syntax is supported. // API name: indices func (r *Clone) Indices(indices string) *Clone { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Indices = indices diff --git a/typedapi/snapshot/clone/request.go b/typedapi/snapshot/clone/request.go index 99650f6446..907b90f059 100644 --- a/typedapi/snapshot/clone/request.go +++ b/typedapi/snapshot/clone/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package clone @@ -27,8 +27,11 @@ import ( // Request holds the request body struct for the package clone // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/clone/SnapshotCloneRequest.ts#L24-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/clone/SnapshotCloneRequest.ts#L24-L76 type Request struct { + + // Indices A comma-separated list of indices to include in the snapshot. + // Multi-target syntax is supported. Indices string `json:"indices"` } diff --git a/typedapi/snapshot/clone/response.go b/typedapi/snapshot/clone/response.go index 03e147a3fc..0673d39c52 100644 --- a/typedapi/snapshot/clone/response.go +++ b/typedapi/snapshot/clone/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package clone // Response holds the response body struct for the package clone // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/clone/SnapshotCloneResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/clone/SnapshotCloneResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/snapshot/create/create.go b/typedapi/snapshot/create/create.go index fe107b7195..6378f9b47f 100644 --- a/typedapi/snapshot/create/create.go +++ b/typedapi/snapshot/create/create.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Creates a snapshot in a repository. +// Create a snapshot. +// Take a snapshot of a cluster or of data streams and indices. package create import ( @@ -35,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -86,9 +88,10 @@ func NewCreateFunc(tp elastictransport.Interface) NewCreate { } } -// Creates a snapshot in a repository. +// Create a snapshot. +// Take a snapshot of a cluster or of data streams and indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create func New(tp elastictransport.Interface) *Create { r := &Create{ transport: tp, @@ -96,8 +99,6 @@ func New(tp elastictransport.Interface) *Create { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -315,7 +316,7 @@ func (r *Create) Header(key, value string) *Create { return r } -// Repository Repository for the snapshot. +// Repository The name of the repository for the snapshot. // API Name: repository func (r *Create) _repository(repository string) *Create { r.paramSet |= repositoryMask @@ -324,7 +325,9 @@ func (r *Create) _repository(repository string) *Create { return r } -// Snapshot Name of the snapshot. Must be unique in the repository. +// Snapshot The name of the snapshot. +// It supportes date math. +// It must be unique in the repository. // API Name: snapshot func (r *Create) _snapshot(snapshot string) *Create { r.paramSet |= snapshotMask @@ -333,8 +336,9 @@ func (r *Create) _snapshot(snapshot string) *Create { return r } -// MasterTimeout Period to wait for a connection to the master node. If no response is -// received before the timeout expires, the request fails and returns an error. +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout func (r *Create) MasterTimeout(duration string) *Create { r.values.Set("master_timeout", duration) @@ -342,8 +346,8 @@ func (r *Create) MasterTimeout(duration string) *Create { return r } -// WaitForCompletion If `true`, the request returns a response when the snapshot is complete. If -// `false`, the request returns a response when the snapshot initializes. +// WaitForCompletion If `true`, the request returns a response when the snapshot is complete. +// If `false`, the request returns a response when the snapshot initializes. // API name: wait_for_completion func (r *Create) WaitForCompletion(waitforcompletion bool) *Create { r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) @@ -395,65 +399,133 @@ func (r *Create) Pretty(pretty bool) *Create { return r } -// FeatureStates Feature states to include in the snapshot. Each feature state includes one or -// more system indices containing related data. You can view a list of eligible -// features using the get features API. If `include_global_state` is `true`, all -// current feature states are included by default. If `include_global_state` is -// `false`, no feature states are included by default. +// Determines how wildcard patterns in the `indices` parameter match data +// streams and indices. +// It supports comma-separated values such as `open,hidden`. +// API name: expand_wildcards +func (r *Create) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ExpandWildcards = expandwildcards + + return r +} + +// The feature states to include in the snapshot. +// Each feature state includes one or more system indices containing related +// data. +// You can view a list of eligible features using the get features API. +// +// If `include_global_state` is `true`, all current feature states are included +// by default. +// If `include_global_state` is `false`, no feature states are included by +// default. +// +// Note that specifying an empty array will result in the default behavior. +// To exclude all feature states, regardless of the `include_global_state` +// value, specify an array with only the value `none` (`["none"]`). // API name: feature_states func (r *Create) FeatureStates(featurestates ...string) *Create { - r.req.FeatureStates = featurestates + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range featurestates { + + r.req.FeatureStates = append(r.req.FeatureStates, v) + } return r } -// IgnoreUnavailable If `true`, the request ignores data streams and indices in `indices` that are -// missing or closed. If `false`, the request returns an error for any data -// stream or index that is missing or closed. +// If `true`, the request ignores data streams and indices in `indices` that are +// missing or closed. +// If `false`, the request returns an error for any data stream or index that is +// missing or closed. // API name: ignore_unavailable func (r *Create) IgnoreUnavailable(ignoreunavailable bool) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IgnoreUnavailable = &ignoreunavailable return r } -// IncludeGlobalState If `true`, the current cluster state is included in the snapshot. The cluster -// state includes persistent cluster settings, composable index templates, -// legacy index templates, ingest pipelines, and ILM policies. It also includes -// data stored in system indices, such as Watches and task records (configurable -// via `feature_states`). +// If `true`, the current cluster state is included in the snapshot. +// The cluster state includes persistent cluster settings, composable index +// templates, legacy index templates, ingest pipelines, and ILM policies. +// It also includes data stored in system indices, such as Watches and task +// records (configurable via `feature_states`). // API name: include_global_state func (r *Create) IncludeGlobalState(includeglobalstate bool) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IncludeGlobalState = &includeglobalstate return r } -// Indices Data streams and indices to include in the snapshot. Supports multi-target -// syntax. Includes all data streams and indices by default. +// A comma-separated list of data streams and indices to include in the +// snapshot. +// It supports a multi-target syntax. +// The default is an empty array (`[]`), which includes all regular data streams +// and regular indices. +// To exclude all data streams and indices, use `-*`. +// +// You can't use this parameter to include or exclude system indices or system +// data streams from a snapshot. +// Use `feature_states` instead. // API name: indices func (r *Create) Indices(indices ...string) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Indices = indices return r } -// Metadata Optional metadata for the snapshot. May have any contents. Must be less than -// 1024 bytes. This map is not automatically generated by Elasticsearch. +// Arbitrary metadata to the snapshot, such as a record of who took the +// snapshot, why it was taken, or any other useful data. +// It can have any contents but it must be less than 1024 bytes. +// This information is not automatically generated by Elasticsearch. // API name: metadata -func (r *Create) Metadata(metadata types.Metadata) *Create { - r.req.Metadata = metadata +func (r *Create) Metadata(metadata types.MetadataVariant) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() return r } -// Partial If `true`, allows restoring a partial snapshot of indices with unavailable -// shards. Only shards that were successfully included in the snapshot will be -// restored. All missing shards will be recreated as empty. If `false`, the -// entire restore operation will fail if one or more indices included in the -// snapshot do not have all primary shards available. +// If `true`, it enables you to restore a partial snapshot of indices with +// unavailable shards. +// Only shards that were successfully included in the snapshot will be restored. +// All missing shards will be recreated as empty. +// +// If `false`, the entire restore operation will fail if one or more indices +// included in the snapshot do not have all primary shards available. // API name: partial func (r *Create) Partial(partial bool) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Partial = &partial return r diff --git a/typedapi/snapshot/create/request.go b/typedapi/snapshot/create/request.go index 1b3d8fb959..589b278195 100644 --- a/typedapi/snapshot/create/request.go +++ b/typedapi/snapshot/create/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package create @@ -29,40 +29,66 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) // Request holds the request body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/create/SnapshotCreateRequest.ts#L24-L81 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/create/SnapshotCreateRequest.ts#L24-L126 type Request struct { - // FeatureStates Feature states to include in the snapshot. Each feature state includes one or - // more system indices containing related data. You can view a list of eligible - // features using the get features API. If `include_global_state` is `true`, all - // current feature states are included by default. If `include_global_state` is - // `false`, no feature states are included by default. + // ExpandWildcards Determines how wildcard patterns in the `indices` parameter match data + // streams and indices. + // It supports comma-separated values such as `open,hidden`. + ExpandWildcards []expandwildcard.ExpandWildcard `json:"expand_wildcards,omitempty"` + // FeatureStates The feature states to include in the snapshot. + // Each feature state includes one or more system indices containing related + // data. + // You can view a list of eligible features using the get features API. + // + // If `include_global_state` is `true`, all current feature states are included + // by default. + // If `include_global_state` is `false`, no feature states are included by + // default. + // + // Note that specifying an empty array will result in the default behavior. + // To exclude all feature states, regardless of the `include_global_state` + // value, specify an array with only the value `none` (`["none"]`). FeatureStates []string `json:"feature_states,omitempty"` // IgnoreUnavailable If `true`, the request ignores data streams and indices in `indices` that are - // missing or closed. If `false`, the request returns an error for any data - // stream or index that is missing or closed. + // missing or closed. + // If `false`, the request returns an error for any data stream or index that is + // missing or closed. IgnoreUnavailable *bool `json:"ignore_unavailable,omitempty"` - // IncludeGlobalState If `true`, the current cluster state is included in the snapshot. The cluster - // state includes persistent cluster settings, composable index templates, - // legacy index templates, ingest pipelines, and ILM policies. It also includes - // data stored in system indices, such as Watches and task records (configurable - // via `feature_states`). + // IncludeGlobalState If `true`, the current cluster state is included in the snapshot. + // The cluster state includes persistent cluster settings, composable index + // templates, legacy index templates, ingest pipelines, and ILM policies. + // It also includes data stored in system indices, such as Watches and task + // records (configurable via `feature_states`). IncludeGlobalState *bool `json:"include_global_state,omitempty"` - // Indices Data streams and indices to include in the snapshot. Supports multi-target - // syntax. Includes all data streams and indices by default. + // Indices A comma-separated list of data streams and indices to include in the + // snapshot. + // It supports a multi-target syntax. + // The default is an empty array (`[]`), which includes all regular data streams + // and regular indices. + // To exclude all data streams and indices, use `-*`. + // + // You can't use this parameter to include or exclude system indices or system + // data streams from a snapshot. + // Use `feature_states` instead. Indices []string `json:"indices,omitempty"` - // Metadata Optional metadata for the snapshot. May have any contents. Must be less than - // 1024 bytes. This map is not automatically generated by Elasticsearch. + // Metadata Arbitrary metadata to the snapshot, such as a record of who took the + // snapshot, why it was taken, or any other useful data. + // It can have any contents but it must be less than 1024 bytes. + // This information is not automatically generated by Elasticsearch. Metadata types.Metadata `json:"metadata,omitempty"` - // Partial If `true`, allows restoring a partial snapshot of indices with unavailable - // shards. Only shards that were successfully included in the snapshot will be - // restored. All missing shards will be recreated as empty. If `false`, the - // entire restore operation will fail if one or more indices included in the - // snapshot do not have all primary shards available. + // Partial If `true`, it enables you to restore a partial snapshot of indices with + // unavailable shards. + // Only shards that were successfully included in the snapshot will be restored. + // All missing shards will be recreated as empty. + // + // If `false`, the entire restore operation will fail if one or more indices + // included in the snapshot do not have all primary shards available. Partial *bool `json:"partial,omitempty"` } @@ -99,6 +125,22 @@ func (s *Request) UnmarshalJSON(data []byte) error { switch t { + case "expand_wildcards": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := &expandwildcard.ExpandWildcard{} + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ExpandWildcards", err) + } + + s.ExpandWildcards = append(s.ExpandWildcards, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.ExpandWildcards); err != nil { + return fmt.Errorf("%s | %w", "ExpandWildcards", err) + } + } + case "feature_states": if err := dec.Decode(&s.FeatureStates); err != nil { return fmt.Errorf("%s | %w", "FeatureStates", err) diff --git a/typedapi/snapshot/create/response.go b/typedapi/snapshot/create/response.go index 8ce8a963b4..37293ce954 100644 --- a/typedapi/snapshot/create/response.go +++ b/typedapi/snapshot/create/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package create @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/create/SnapshotCreateResponse.ts#L22-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/create/SnapshotCreateResponse.ts#L22-L35 type Response struct { // Accepted Equals `true` if the snapshot was accepted. Present when the request had diff --git a/typedapi/snapshot/createrepository/create_repository.go b/typedapi/snapshot/createrepository/create_repository.go index 207fde5c26..162f40bf35 100644 --- a/typedapi/snapshot/createrepository/create_repository.go +++ b/typedapi/snapshot/createrepository/create_repository.go @@ -16,9 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// Creates a repository. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Create or update a snapshot repository. +// IMPORTANT: If you are migrating searchable snapshots, the repository name +// must be identical in the source and destination clusters. +// To register a snapshot repository, the cluster's global metadata must be +// writeable. +// Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` +// and `clsuter.blocks.read_only_allow_delete` settings) that prevent write +// access. +// +// Several options for this API can be specified using a query parameter or a +// request body parameter. +// If both parameters are specified, only the query parameter is used. package createrepository import ( @@ -81,9 +92,20 @@ func NewCreateRepositoryFunc(tp elastictransport.Interface) NewCreateRepository } } -// Creates a repository. +// Create or update a snapshot repository. +// IMPORTANT: If you are migrating searchable snapshots, the repository name +// must be identical in the source and destination clusters. +// To register a snapshot repository, the cluster's global metadata must be +// writeable. +// Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` +// and `clsuter.blocks.read_only_allow_delete` settings) that prevent write +// access. +// +// Several options for this API can be specified using a query parameter or a +// request body parameter. +// If both parameters are specified, only the query parameter is used. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository func New(tp elastictransport.Interface) *CreateRepository { r := &CreateRepository{ transport: tp, @@ -302,7 +324,7 @@ func (r *CreateRepository) Header(key, value string) *CreateRepository { return r } -// Repository A repository name +// Repository The name of the snapshot repository to register or update. // API Name: repository func (r *CreateRepository) _repository(repository string) *CreateRepository { r.paramSet |= repositoryMask @@ -311,7 +333,10 @@ func (r *CreateRepository) _repository(repository string) *CreateRepository { return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: master_timeout func (r *CreateRepository) MasterTimeout(duration string) *CreateRepository { r.values.Set("master_timeout", duration) @@ -319,7 +344,12 @@ func (r *CreateRepository) MasterTimeout(duration string) *CreateRepository { return r } -// Timeout Explicit operation timeout +// Timeout The period to wait for a response from all relevant nodes in the cluster +// after updating the cluster metadata. +// If no response is received before the timeout expires, the cluster metadata +// update still applies but the response will indicate that it was not +// completely acknowledged. +// To indicate that the request should never timeout, set it to `-1`. // API name: timeout func (r *CreateRepository) Timeout(duration string) *CreateRepository { r.values.Set("timeout", duration) @@ -327,7 +357,11 @@ func (r *CreateRepository) Timeout(duration string) *CreateRepository { return r } -// Verify Whether to verify the repository after creation +// Verify If `true`, the request verifies the repository is functional on all master +// and data nodes in the cluster. +// If `false`, this verification is skipped. +// You can also perform this verification with the verify snapshot repository +// API. // API name: verify func (r *CreateRepository) Verify(verify bool) *CreateRepository { r.values.Set("verify", strconv.FormatBool(verify)) diff --git a/typedapi/snapshot/createrepository/request.go b/typedapi/snapshot/createrepository/request.go index 9a044dcb7a..23cbe29bab 100644 --- a/typedapi/snapshot/createrepository/request.go +++ b/typedapi/snapshot/createrepository/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package createrepository @@ -26,5 +26,5 @@ import ( // Request holds the request body struct for the package createrepository // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/create_repository/SnapshotCreateRepositoryRequest.ts#L25-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/create_repository/SnapshotCreateRepositoryRequest.ts#L25-L79 type Request = types.Repository diff --git a/typedapi/snapshot/createrepository/response.go b/typedapi/snapshot/createrepository/response.go index 69aa7ff4eb..872ddfbd9c 100644 --- a/typedapi/snapshot/createrepository/response.go +++ b/typedapi/snapshot/createrepository/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package createrepository // Response holds the response body struct for the package createrepository // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/create_repository/SnapshotCreateRepositoryResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/create_repository/SnapshotCreateRepositoryResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/snapshot/delete/delete.go b/typedapi/snapshot/delete/delete.go index 2339443d29..a3f3a86dea 100644 --- a/typedapi/snapshot/delete/delete.go +++ b/typedapi/snapshot/delete/delete.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes one or more snapshots. +// Delete snapshots. package delete import ( @@ -81,9 +81,9 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { } } -// Deletes one or more snapshots. +// Delete snapshots. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete func New(tp elastictransport.Interface) *Delete { r := &Delete{ transport: tp, @@ -299,7 +299,7 @@ func (r *Delete) Header(key, value string) *Delete { return r } -// Repository A repository name +// Repository The name of the repository to delete a snapshot from. // API Name: repository func (r *Delete) _repository(repository string) *Delete { r.paramSet |= repositoryMask @@ -308,7 +308,8 @@ func (r *Delete) _repository(repository string) *Delete { return r } -// Snapshot A comma-separated list of snapshot names +// Snapshot A comma-separated list of snapshot names to delete. +// It also accepts wildcards (`*`). // API Name: snapshot func (r *Delete) _snapshot(snapshot string) *Delete { r.paramSet |= snapshotMask @@ -317,7 +318,10 @@ func (r *Delete) _snapshot(snapshot string) *Delete { return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: master_timeout func (r *Delete) MasterTimeout(duration string) *Delete { r.values.Set("master_timeout", duration) diff --git a/typedapi/snapshot/delete/response.go b/typedapi/snapshot/delete/response.go index f859066aa9..19f4231a49 100644 --- a/typedapi/snapshot/delete/response.go +++ b/typedapi/snapshot/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/delete/SnapshotDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/delete/SnapshotDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/snapshot/deleterepository/delete_repository.go b/typedapi/snapshot/deleterepository/delete_repository.go index 21f61fac46..d33c73dbe9 100644 --- a/typedapi/snapshot/deleterepository/delete_repository.go +++ b/typedapi/snapshot/deleterepository/delete_repository.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes a repository. +// Delete snapshot repositories. +// When a repository is unregistered, Elasticsearch removes only the reference +// to the location where the repository is storing the snapshots. +// The snapshots themselves are left untouched and in place. package deleterepository import ( @@ -76,9 +79,12 @@ func NewDeleteRepositoryFunc(tp elastictransport.Interface) NewDeleteRepository } } -// Deletes a repository. +// Delete snapshot repositories. +// When a repository is unregistered, Elasticsearch removes only the reference +// to the location where the repository is storing the snapshots. +// The snapshots themselves are left untouched and in place. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository func New(tp elastictransport.Interface) *DeleteRepository { r := &DeleteRepository{ transport: tp, @@ -288,8 +294,8 @@ func (r *DeleteRepository) Header(key, value string) *DeleteRepository { return r } -// Repository Name of the snapshot repository to unregister. Wildcard (`*`) patterns are -// supported. +// Repository The ame of the snapshot repositories to unregister. +// Wildcard (`*`) patterns are supported. // API Name: repository func (r *DeleteRepository) _repository(repository string) *DeleteRepository { r.paramSet |= repositoryMask @@ -298,7 +304,10 @@ func (r *DeleteRepository) _repository(repository string) *DeleteRepository { return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: master_timeout func (r *DeleteRepository) MasterTimeout(duration string) *DeleteRepository { r.values.Set("master_timeout", duration) @@ -306,7 +315,12 @@ func (r *DeleteRepository) MasterTimeout(duration string) *DeleteRepository { return r } -// Timeout Explicit operation timeout +// Timeout The period to wait for a response from all relevant nodes in the cluster +// after updating the cluster metadata. +// If no response is received before the timeout expires, the cluster metadata +// update still applies but the response will indicate that it was not +// completely acknowledged. +// To indicate that the request should never timeout, set it to `-1`. // API name: timeout func (r *DeleteRepository) Timeout(duration string) *DeleteRepository { r.values.Set("timeout", duration) diff --git a/typedapi/snapshot/deleterepository/response.go b/typedapi/snapshot/deleterepository/response.go index c53e26dca7..3956952c2e 100644 --- a/typedapi/snapshot/deleterepository/response.go +++ b/typedapi/snapshot/deleterepository/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deleterepository // Response holds the response body struct for the package deleterepository // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/delete_repository/SnapshotDeleteRepositoryResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/delete_repository/SnapshotDeleteRepositoryResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/snapshot/get/get.go b/typedapi/snapshot/get/get.go index 042270f159..2247a2ecc9 100644 --- a/typedapi/snapshot/get/get.go +++ b/typedapi/snapshot/get/get.go @@ -16,9 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information about a snapshot. +// Get snapshot information. +// +// NOTE: The `after` parameter and `next` field enable you to iterate through +// snapshots with some consistency guarantees regarding concurrent creation or +// deletion of snapshots. +// It is guaranteed that any snapshot that exists at the beginning of the +// iteration and is not concurrently deleted will be seen during the iteration. +// Snapshots concurrently created may be seen during an iteration. package get import ( @@ -83,9 +90,16 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { } } -// Returns information about a snapshot. +// Get snapshot information. +// +// NOTE: The `after` parameter and `next` field enable you to iterate through +// snapshots with some consistency guarantees regarding concurrent creation or +// deletion of snapshots. +// It is guaranteed that any snapshot that exists at the beginning of the +// iteration and is not concurrently deleted will be seen during the iteration. +// Snapshots concurrently created may be seen during an iteration. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get func New(tp elastictransport.Interface) *Get { r := &Get{ transport: tp, @@ -301,8 +315,9 @@ func (r *Get) Header(key, value string) *Get { return r } -// Repository Comma-separated list of snapshot repository names used to limit the request. -// Wildcard (*) expressions are supported. +// Repository A comma-separated list of snapshot repository names used to limit the +// request. +// Wildcard (`*`) expressions are supported. // API Name: repository func (r *Get) _repository(repository string) *Get { r.paramSet |= repositoryMask @@ -311,12 +326,13 @@ func (r *Get) _repository(repository string) *Get { return r } -// Snapshot Comma-separated list of snapshot names to retrieve. Also accepts wildcards -// (*). -// - To get information about all snapshots in a registered repository, use a -// wildcard (*) or _all. -// - To get information about any snapshots that are currently running, use -// _current. +// Snapshot A comma-separated list of snapshot names to retrieve +// Wildcards (`*`) are supported. +// +// * To get information about all snapshots in a registered repository, use a +// wildcard (`*`) or `_all`. +// * To get information about any snapshots that are currently running, use +// `_current`. // API Name: snapshot func (r *Get) _snapshot(snapshot string) *Get { r.paramSet |= snapshotMask @@ -325,38 +341,41 @@ func (r *Get) _snapshot(snapshot string) *Get { return r } -// IgnoreUnavailable If false, the request returns an error for any snapshots that are -// unavailable. -// API name: ignore_unavailable -func (r *Get) IgnoreUnavailable(ignoreunavailable bool) *Get { - r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) +// After An offset identifier to start pagination from as returned by the next field +// in the response body. +// API name: after +func (r *Get) After(after string) *Get { + r.values.Set("after", after) return r } -// MasterTimeout Period to wait for a connection to the master node. If no response is -// received before the timeout expires, the request fails and returns an error. -// API name: master_timeout -func (r *Get) MasterTimeout(duration string) *Get { - r.values.Set("master_timeout", duration) +// FromSortValue The value of the current sort column at which to start retrieval. +// It can be a string `snapshot-` or a repository name when sorting by snapshot +// or repository name. +// It can be a millisecond time value or a number when sorting by `index-` or +// shard count. +// API name: from_sort_value +func (r *Get) FromSortValue(fromsortvalue string) *Get { + r.values.Set("from_sort_value", fromsortvalue) return r } -// Verbose If true, returns additional information about each snapshot such as the -// version of Elasticsearch which took the snapshot, the start and end times of -// the snapshot, and the number of shards snapshotted. -// API name: verbose -func (r *Get) Verbose(verbose bool) *Get { - r.values.Set("verbose", strconv.FormatBool(verbose)) +// IgnoreUnavailable If `false`, the request returns an error for any snapshots that are +// unavailable. +// API name: ignore_unavailable +func (r *Get) IgnoreUnavailable(ignoreunavailable bool) *Get { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// IndexDetails If true, returns additional information about each index in the snapshot -// comprising the number of shards in the index, the total size of the index in -// bytes, and the maximum number of segments per shard in the index. Defaults to -// false, meaning that this information is omitted. +// IndexDetails If `true`, the response includes additional information about each index in +// the snapshot comprising the number of shards in the index, the total size of +// the index in bytes, and the maximum number of segments per shard in the +// index. +// The default is `false`, meaning that this information is omitted. // API name: index_details func (r *Get) IndexDetails(indexdetails bool) *Get { r.values.Set("index_details", strconv.FormatBool(indexdetails)) @@ -364,7 +383,7 @@ func (r *Get) IndexDetails(indexdetails bool) *Get { return r } -// IndexNames If true, returns the name of each index in each snapshot. +// IndexNames If `true`, the response includes the name of each index in each snapshot. // API name: index_names func (r *Get) IndexNames(indexnames bool) *Get { r.values.Set("index_names", strconv.FormatBool(indexnames)) @@ -372,7 +391,7 @@ func (r *Get) IndexNames(indexnames bool) *Get { return r } -// IncludeRepository If true, returns the repository name in each snapshot. +// IncludeRepository If `true`, the response includes the repository name in each snapshot. // API name: include_repository func (r *Get) IncludeRepository(includerepository bool) *Get { r.values.Set("include_repository", strconv.FormatBool(includerepository)) @@ -380,26 +399,19 @@ func (r *Get) IncludeRepository(includerepository bool) *Get { return r } -// Sort Allows setting a sort order for the result. Defaults to start_time, i.e. -// sorting by snapshot start time stamp. -// API name: sort -func (r *Get) Sort(sort snapshotsort.SnapshotSort) *Get { - r.values.Set("sort", sort.String()) - - return r -} - -// Size Maximum number of snapshots to return. Defaults to 0 which means return all -// that match the request without limit. -// API name: size -func (r *Get) Size(size int) *Get { - r.values.Set("size", strconv.Itoa(size)) +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *Get) MasterTimeout(duration string) *Get { + r.values.Set("master_timeout", duration) return r } -// Order Sort order. Valid values are asc for ascending and desc for descending order. -// Defaults to asc, meaning ascending order. +// Order The sort order. +// Valid values are `asc` for ascending and `desc` for descending order. +// The default behavior is ascending order. // API name: order func (r *Get) Order(order sortorder.SortOrder) *Get { r.values.Set("order", order.String()) @@ -407,15 +419,6 @@ func (r *Get) Order(order sortorder.SortOrder) *Get { return r } -// After Offset identifier to start pagination from as returned by the next field in -// the response body. -// API name: after -func (r *Get) After(after string) *Get { - r.values.Set("after", after) - - return r -} - // Offset Numeric offset to start pagination from based on the snapshots matching this // request. Using a non-zero value for this parameter is mutually exclusive with // using the after parameter. Defaults to 0. @@ -426,22 +429,28 @@ func (r *Get) Offset(offset int) *Get { return r } -// FromSortValue Value of the current sort column at which to start retrieval. Can either be a -// string snapshot- or repository name when sorting by snapshot or repository -// name, a millisecond time value or a number when sorting by index- or shard -// count. -// API name: from_sort_value -func (r *Get) FromSortValue(fromsortvalue string) *Get { - r.values.Set("from_sort_value", fromsortvalue) +// Size The maximum number of snapshots to return. +// The default is 0, which means to return all that match the request without +// limit. +// API name: size +func (r *Get) Size(size int) *Get { + r.values.Set("size", strconv.Itoa(size)) return r } -// SlmPolicyFilter Filter snapshots by a comma-separated list of SLM policy names that snapshots -// belong to. Also accepts wildcards (*) and combinations of wildcards followed -// by exclude patterns starting with -. To include snapshots not created by an -// SLM policy you can use the special pattern _none that will match all -// snapshots without an SLM policy. +// SlmPolicyFilter Filter snapshots by a comma-separated list of snapshot lifecycle management +// (SLM) policy names that snapshots belong to. +// +// You can use wildcards (`*`) and combinations of wildcards followed by exclude +// patterns starting with `-`. +// For example, the pattern `*,-policy-a-\*` will return all snapshots except +// for those that were created by an SLM policy with a name starting with +// `policy-a-`. +// Note that the wildcard pattern `*` matches all snapshots created by an SLM +// policy but not those snapshots that were not created by an SLM policy. +// To include snapshots that were not created by an SLM policy, you can use the +// special pattern `_none` that will match all snapshots without an SLM policy. // API name: slm_policy_filter func (r *Get) SlmPolicyFilter(name string) *Get { r.values.Set("slm_policy_filter", name) @@ -449,6 +458,30 @@ func (r *Get) SlmPolicyFilter(name string) *Get { return r } +// Sort The sort order for the result. +// The default behavior is sorting by snapshot start time stamp. +// API name: sort +func (r *Get) Sort(sort snapshotsort.SnapshotSort) *Get { + r.values.Set("sort", sort.String()) + + return r +} + +// Verbose If `true`, returns additional information about each snapshot such as the +// version of Elasticsearch which took the snapshot, the start and end times of +// the snapshot, and the number of shards snapshotted. +// +// NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, +// `slm_policy_filter`, and `sort` are not supported when you set +// `verbose=false` and the sort order for requests with `verbose=false` is +// undefined. +// API name: verbose +func (r *Get) Verbose(verbose bool) *Get { + r.values.Set("verbose", strconv.FormatBool(verbose)) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/snapshot/get/response.go b/typedapi/snapshot/get/response.go index ad0ad21805..15e0d34408 100644 --- a/typedapi/snapshot/get/response.go +++ b/typedapi/snapshot/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package get @@ -26,16 +26,20 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/get/SnapshotGetResponse.ts#L25-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/get/SnapshotGetResponse.ts#L25-L47 type Response struct { + // Next If the request contained a size limit and there might be more results, a + // `next` field will be added to the response. + // It can be used as the `after` query parameter to fetch additional results. + Next *string `json:"next,omitempty"` // Remaining The number of remaining snapshots that were not returned due to size limits - // and that can be fetched by additional requests using the next field value. + // and that can be fetched by additional requests using the `next` field value. Remaining int `json:"remaining"` Responses []types.SnapshotResponseItem `json:"responses,omitempty"` Snapshots []types.SnapshotInfo `json:"snapshots,omitempty"` - // Total The total number of snapshots that match the request when ignoring size limit - // or after query parameter. + // Total The total number of snapshots that match the request when ignoring the size + // limit or `after` query parameter. Total int `json:"total"` } diff --git a/typedapi/snapshot/getrepository/get_repository.go b/typedapi/snapshot/getrepository/get_repository.go index f62b7883b4..20a5f5f4dd 100644 --- a/typedapi/snapshot/getrepository/get_repository.go +++ b/typedapi/snapshot/getrepository/get_repository.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information about a repository. +// Get snapshot repository information. package getrepository import ( @@ -74,9 +74,9 @@ func NewGetRepositoryFunc(tp elastictransport.Interface) NewGetRepository { } } -// Returns information about a repository. +// Get snapshot repository information. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository func New(tp elastictransport.Interface) *GetRepository { r := &GetRepository{ transport: tp, @@ -291,7 +291,13 @@ func (r *GetRepository) Header(key, value string) *GetRepository { return r } -// Repository A comma-separated list of repository names +// Repository A comma-separated list of snapshot repository names used to limit the +// request. +// Wildcard (`*`) expressions are supported including combining wildcards with +// exclude patterns starting with `-`. +// +// To get information about all snapshot repositories registered in the cluster, +// omit this parameter or use `*` or `_all`. // API Name: repository func (r *GetRepository) Repository(repository string) *GetRepository { r.paramSet |= repositoryMask @@ -300,8 +306,8 @@ func (r *GetRepository) Repository(repository string) *GetRepository { return r } -// Local Return local information, do not retrieve the state from master node -// (default: false) +// Local If `true`, the request gets information from the local node only. +// If `false`, the request gets information from the master node. // API name: local func (r *GetRepository) Local(local bool) *GetRepository { r.values.Set("local", strconv.FormatBool(local)) @@ -309,7 +315,10 @@ func (r *GetRepository) Local(local bool) *GetRepository { return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: master_timeout func (r *GetRepository) MasterTimeout(duration string) *GetRepository { r.values.Set("master_timeout", duration) diff --git a/typedapi/snapshot/getrepository/response.go b/typedapi/snapshot/getrepository/response.go index 971b63ec82..2837d790dd 100644 --- a/typedapi/snapshot/getrepository/response.go +++ b/typedapi/snapshot/getrepository/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getrepository @@ -32,7 +32,7 @@ import ( // Response holds the response body struct for the package getrepository // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/get_repository/SnapshotGetRepositoryResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/get_repository/SnapshotGetRepositoryResponse.ts#L23-L25 type Response map[string]types.Repository diff --git a/typedapi/snapshot/repositoryanalyze/repository_analyze.go b/typedapi/snapshot/repositoryanalyze/repository_analyze.go new file mode 100644 index 0000000000..4f02a6d540 --- /dev/null +++ b/typedapi/snapshot/repositoryanalyze/repository_analyze.go @@ -0,0 +1,804 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Analyze a snapshot repository. +// Analyze the performance characteristics and any incorrect behaviour found in +// a repository. +// +// The response exposes implementation details of the analysis which may change +// from version to version. +// The response body format is therefore not considered stable and may be +// different in newer versions. +// +// There are a large number of third-party storage systems available, not all of +// which are suitable for use as a snapshot repository by Elasticsearch. +// Some storage systems behave incorrectly, or perform poorly, especially when +// accessed concurrently by multiple clients as the nodes of an Elasticsearch +// cluster do. This API performs a collection of read and write operations on +// your repository which are designed to detect incorrect behaviour and to +// measure the performance characteristics of your storage system. +// +// The default values for the parameters are deliberately low to reduce the +// impact of running an analysis inadvertently and to provide a sensible +// starting point for your investigations. +// Run your first analysis with the default parameter values to check for simple +// problems. +// If successful, run a sequence of increasingly large analyses until you +// encounter a failure or you reach a `blob_count` of at least `2000`, a +// `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, +// and a `register_operation_count` of at least `100`. +// Always specify a generous timeout, possibly `1h` or longer, to allow time for +// each analysis to run to completion. +// Perform the analyses using a multi-node cluster of a similar size to your +// production cluster so that it can detect any problems that only arise when +// the repository is accessed by many nodes at once. +// +// If the analysis fails, Elasticsearch detected that your repository behaved +// unexpectedly. +// This usually means you are using a third-party storage system with an +// incorrect or incompatible implementation of the API it claims to support. +// If so, this storage system is not suitable for use as a snapshot repository. +// You will need to work with the supplier of your storage system to address the +// incompatibilities that Elasticsearch detects. +// +// If the analysis is successful, the API returns details of the testing +// process, optionally including how long each operation took. +// You can use this information to determine the performance of your storage +// system. +// If any operation fails or returns an incorrect result, the API returns an +// error. +// If the API returns an error, it may not have removed all the data it wrote to +// the repository. +// The error will indicate the location of any leftover data and this path is +// also recorded in the Elasticsearch logs. +// You should verify that this location has been cleaned up correctly. +// If there is still leftover data at the specified location, you should +// manually remove it. +// +// If the connection from your client to Elasticsearch is closed while the +// client is waiting for the result of the analysis, the test is cancelled. +// Some clients are configured to close their connection if no response is +// received within a certain timeout. +// An analysis takes a long time to complete so you might need to relax any such +// client-side timeouts. +// On cancellation the analysis attempts to clean up the data it was writing, +// but it may not be able to remove it all. +// The path to the leftover data is recorded in the Elasticsearch logs. +// You should verify that this location has been cleaned up correctly. +// If there is still leftover data at the specified location, you should +// manually remove it. +// +// If the analysis is successful then it detected no incorrect behaviour, but +// this does not mean that correct behaviour is guaranteed. +// The analysis attempts to detect common bugs but it does not offer 100% +// coverage. +// Additionally, it does not test the following: +// +// * Your repository must perform durable writes. Once a blob has been written +// it must remain in place until it is deleted, even after a power loss or +// similar disaster. +// * Your repository must not suffer from silent data corruption. Once a blob +// has been written, its contents must remain unchanged until it is deliberately +// modified or deleted. +// * Your repository must behave correctly even if connectivity from the cluster +// is disrupted. Reads and writes may fail in this case, but they must not +// return incorrect results. +// +// IMPORTANT: An analysis writes a substantial amount of data to your repository +// and then reads it back again. +// This consumes bandwidth on the network between the cluster and the +// repository, and storage space and I/O bandwidth on the repository itself. +// You must ensure this load does not affect other users of these systems. +// Analyses respect the repository settings `max_snapshot_bytes_per_sec` and +// `max_restore_bytes_per_sec` if available and the cluster setting +// `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth +// they consume. +// +// NOTE: This API is intended for exploratory use by humans. You should expect +// the request parameters and the response format to vary in future versions. +// +// NOTE: Different versions of Elasticsearch may perform different checks for +// repository compatibility, with newer versions typically being stricter than +// older ones. +// A storage system that passes repository analysis with one version of +// Elasticsearch may fail with a different version. +// This indicates it behaves incorrectly in ways that the former version did not +// detect. +// You must work with the supplier of your storage system to address the +// incompatibilities detected by the repository analysis API in any version of +// Elasticsearch. +// +// NOTE: This API may not work correctly in a mixed-version cluster. +// +// *Implementation details* +// +// NOTE: This section of documentation describes how the repository analysis API +// works in this version of Elasticsearch, but you should expect the +// implementation to vary between versions. The request parameters and response +// format depend on details of the implementation so may also be different in +// newer versions. +// +// The analysis comprises a number of blob-level tasks, as set by the +// `blob_count` parameter and a number of compare-and-exchange operations on +// linearizable registers, as set by the `register_operation_count` parameter. +// These tasks are distributed over the data and master-eligible nodes in the +// cluster for execution. +// +// For most blob-level tasks, the executing node first writes a blob to the +// repository and then instructs some of the other nodes in the cluster to +// attempt to read the data it just wrote. +// The size of the blob is chosen randomly, according to the `max_blob_size` and +// `max_total_data_size` parameters. +// If any of these reads fails then the repository does not implement the +// necessary read-after-write semantics that Elasticsearch requires. +// +// For some blob-level tasks, the executing node will instruct some of its peers +// to attempt to read the data before the writing process completes. +// These reads are permitted to fail, but must not return partial data. +// If any read returns partial data then the repository does not implement the +// necessary atomicity semantics that Elasticsearch requires. +// +// For some blob-level tasks, the executing node will overwrite the blob while +// its peers are reading it. +// In this case the data read may come from either the original or the +// overwritten blob, but the read operation must not return partial data or a +// mix of data from the two blobs. +// If any of these reads returns partial data or a mix of the two blobs then the +// repository does not implement the necessary atomicity semantics that +// Elasticsearch requires for overwrites. +// +// The executing node will use a variety of different methods to write the blob. +// For instance, where applicable, it will use both single-part and multi-part +// uploads. +// Similarly, the reading nodes will use a variety of different methods to read +// the data back again. +// For instance they may read the entire blob from start to end or may read only +// a subset of the data. +// +// For some blob-level tasks, the executing node will cancel the write before it +// is complete. +// In this case, it still instructs some of the other nodes in the cluster to +// attempt to read the blob but all of these reads must fail to find the blob. +// +// Linearizable registers are special blobs that Elasticsearch manipulates using +// an atomic compare-and-exchange operation. +// This operation ensures correct and strongly-consistent behavior even when the +// blob is accessed by multiple nodes at the same time. +// The detailed implementation of the compare-and-exchange operation on +// linearizable registers varies by repository type. +// Repository analysis verifies that that uncontended compare-and-exchange +// operations on a linearizable register blob always succeed. +// Repository analysis also verifies that contended operations either succeed or +// report the contention but do not return incorrect results. +// If an operation fails due to contention, Elasticsearch retries the operation +// until it succeeds. +// Most of the compare-and-exchange operations performed by repository analysis +// atomically increment a counter which is represented as an 8-byte blob. +// Some operations also verify the behavior on small blobs with sizes other than +// 8 bytes. +package repositoryanalyze + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + repositoryMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type RepositoryAnalyze struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + repository string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRepositoryAnalyze type alias for index. +type NewRepositoryAnalyze func(repository string) *RepositoryAnalyze + +// NewRepositoryAnalyzeFunc returns a new instance of RepositoryAnalyze with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRepositoryAnalyzeFunc(tp elastictransport.Interface) NewRepositoryAnalyze { + return func(repository string) *RepositoryAnalyze { + n := New(tp) + + n._repository(repository) + + return n + } +} + +// Analyze a snapshot repository. +// Analyze the performance characteristics and any incorrect behaviour found in +// a repository. +// +// The response exposes implementation details of the analysis which may change +// from version to version. +// The response body format is therefore not considered stable and may be +// different in newer versions. +// +// There are a large number of third-party storage systems available, not all of +// which are suitable for use as a snapshot repository by Elasticsearch. +// Some storage systems behave incorrectly, or perform poorly, especially when +// accessed concurrently by multiple clients as the nodes of an Elasticsearch +// cluster do. This API performs a collection of read and write operations on +// your repository which are designed to detect incorrect behaviour and to +// measure the performance characteristics of your storage system. +// +// The default values for the parameters are deliberately low to reduce the +// impact of running an analysis inadvertently and to provide a sensible +// starting point for your investigations. +// Run your first analysis with the default parameter values to check for simple +// problems. +// If successful, run a sequence of increasingly large analyses until you +// encounter a failure or you reach a `blob_count` of at least `2000`, a +// `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, +// and a `register_operation_count` of at least `100`. +// Always specify a generous timeout, possibly `1h` or longer, to allow time for +// each analysis to run to completion. +// Perform the analyses using a multi-node cluster of a similar size to your +// production cluster so that it can detect any problems that only arise when +// the repository is accessed by many nodes at once. +// +// If the analysis fails, Elasticsearch detected that your repository behaved +// unexpectedly. +// This usually means you are using a third-party storage system with an +// incorrect or incompatible implementation of the API it claims to support. +// If so, this storage system is not suitable for use as a snapshot repository. +// You will need to work with the supplier of your storage system to address the +// incompatibilities that Elasticsearch detects. +// +// If the analysis is successful, the API returns details of the testing +// process, optionally including how long each operation took. +// You can use this information to determine the performance of your storage +// system. +// If any operation fails or returns an incorrect result, the API returns an +// error. +// If the API returns an error, it may not have removed all the data it wrote to +// the repository. +// The error will indicate the location of any leftover data and this path is +// also recorded in the Elasticsearch logs. +// You should verify that this location has been cleaned up correctly. +// If there is still leftover data at the specified location, you should +// manually remove it. +// +// If the connection from your client to Elasticsearch is closed while the +// client is waiting for the result of the analysis, the test is cancelled. +// Some clients are configured to close their connection if no response is +// received within a certain timeout. +// An analysis takes a long time to complete so you might need to relax any such +// client-side timeouts. +// On cancellation the analysis attempts to clean up the data it was writing, +// but it may not be able to remove it all. +// The path to the leftover data is recorded in the Elasticsearch logs. +// You should verify that this location has been cleaned up correctly. +// If there is still leftover data at the specified location, you should +// manually remove it. +// +// If the analysis is successful then it detected no incorrect behaviour, but +// this does not mean that correct behaviour is guaranteed. +// The analysis attempts to detect common bugs but it does not offer 100% +// coverage. +// Additionally, it does not test the following: +// +// * Your repository must perform durable writes. Once a blob has been written +// it must remain in place until it is deleted, even after a power loss or +// similar disaster. +// * Your repository must not suffer from silent data corruption. Once a blob +// has been written, its contents must remain unchanged until it is deliberately +// modified or deleted. +// * Your repository must behave correctly even if connectivity from the cluster +// is disrupted. Reads and writes may fail in this case, but they must not +// return incorrect results. +// +// IMPORTANT: An analysis writes a substantial amount of data to your repository +// and then reads it back again. +// This consumes bandwidth on the network between the cluster and the +// repository, and storage space and I/O bandwidth on the repository itself. +// You must ensure this load does not affect other users of these systems. +// Analyses respect the repository settings `max_snapshot_bytes_per_sec` and +// `max_restore_bytes_per_sec` if available and the cluster setting +// `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth +// they consume. +// +// NOTE: This API is intended for exploratory use by humans. You should expect +// the request parameters and the response format to vary in future versions. +// +// NOTE: Different versions of Elasticsearch may perform different checks for +// repository compatibility, with newer versions typically being stricter than +// older ones. +// A storage system that passes repository analysis with one version of +// Elasticsearch may fail with a different version. +// This indicates it behaves incorrectly in ways that the former version did not +// detect. +// You must work with the supplier of your storage system to address the +// incompatibilities detected by the repository analysis API in any version of +// Elasticsearch. +// +// NOTE: This API may not work correctly in a mixed-version cluster. +// +// *Implementation details* +// +// NOTE: This section of documentation describes how the repository analysis API +// works in this version of Elasticsearch, but you should expect the +// implementation to vary between versions. The request parameters and response +// format depend on details of the implementation so may also be different in +// newer versions. +// +// The analysis comprises a number of blob-level tasks, as set by the +// `blob_count` parameter and a number of compare-and-exchange operations on +// linearizable registers, as set by the `register_operation_count` parameter. +// These tasks are distributed over the data and master-eligible nodes in the +// cluster for execution. +// +// For most blob-level tasks, the executing node first writes a blob to the +// repository and then instructs some of the other nodes in the cluster to +// attempt to read the data it just wrote. +// The size of the blob is chosen randomly, according to the `max_blob_size` and +// `max_total_data_size` parameters. +// If any of these reads fails then the repository does not implement the +// necessary read-after-write semantics that Elasticsearch requires. +// +// For some blob-level tasks, the executing node will instruct some of its peers +// to attempt to read the data before the writing process completes. +// These reads are permitted to fail, but must not return partial data. +// If any read returns partial data then the repository does not implement the +// necessary atomicity semantics that Elasticsearch requires. +// +// For some blob-level tasks, the executing node will overwrite the blob while +// its peers are reading it. +// In this case the data read may come from either the original or the +// overwritten blob, but the read operation must not return partial data or a +// mix of data from the two blobs. +// If any of these reads returns partial data or a mix of the two blobs then the +// repository does not implement the necessary atomicity semantics that +// Elasticsearch requires for overwrites. +// +// The executing node will use a variety of different methods to write the blob. +// For instance, where applicable, it will use both single-part and multi-part +// uploads. +// Similarly, the reading nodes will use a variety of different methods to read +// the data back again. +// For instance they may read the entire blob from start to end or may read only +// a subset of the data. +// +// For some blob-level tasks, the executing node will cancel the write before it +// is complete. +// In this case, it still instructs some of the other nodes in the cluster to +// attempt to read the blob but all of these reads must fail to find the blob. +// +// Linearizable registers are special blobs that Elasticsearch manipulates using +// an atomic compare-and-exchange operation. +// This operation ensures correct and strongly-consistent behavior even when the +// blob is accessed by multiple nodes at the same time. +// The detailed implementation of the compare-and-exchange operation on +// linearizable registers varies by repository type. +// Repository analysis verifies that that uncontended compare-and-exchange +// operations on a linearizable register blob always succeed. +// Repository analysis also verifies that contended operations either succeed or +// report the contention but do not return incorrect results. +// If an operation fails due to contention, Elasticsearch retries the operation +// until it succeeds. +// Most of the compare-and-exchange operations performed by repository analysis +// atomically increment a counter which is represented as an 8-byte blob. +// Some operations also verify the behavior on small blobs with sizes other than +// 8 bytes. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze +func New(tp elastictransport.Interface) *RepositoryAnalyze { + r := &RepositoryAnalyze{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *RepositoryAnalyze) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == repositoryMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + path.WriteString("/") + path.WriteString("_analyze") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r RepositoryAnalyze) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "snapshot.repository_analyze") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.repository_analyze") + if reader := instrument.RecordRequestBody(ctx, "snapshot.repository_analyze", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.repository_analyze") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the RepositoryAnalyze query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a repositoryanalyze.Response +func (r RepositoryAnalyze) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.repository_analyze") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r RepositoryAnalyze) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.repository_analyze") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the RepositoryAnalyze query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the RepositoryAnalyze headers map. +func (r *RepositoryAnalyze) Header(key, value string) *RepositoryAnalyze { + r.headers.Set(key, value) + + return r +} + +// Repository The name of the repository. +// API Name: repository +func (r *RepositoryAnalyze) _repository(repository string) *RepositoryAnalyze { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// BlobCount The total number of blobs to write to the repository during the test. +// For realistic experiments, you should set it to at least `2000`. +// API name: blob_count +func (r *RepositoryAnalyze) BlobCount(blobcount int) *RepositoryAnalyze { + r.values.Set("blob_count", strconv.Itoa(blobcount)) + + return r +} + +// Concurrency The number of operations to run concurrently during the test. +// API name: concurrency +func (r *RepositoryAnalyze) Concurrency(concurrency int) *RepositoryAnalyze { + r.values.Set("concurrency", strconv.Itoa(concurrency)) + + return r +} + +// Detailed Indicates whether to return detailed results, including timing information +// for every operation performed during the analysis. +// If false, it returns only a summary of the analysis. +// API name: detailed +func (r *RepositoryAnalyze) Detailed(detailed bool) *RepositoryAnalyze { + r.values.Set("detailed", strconv.FormatBool(detailed)) + + return r +} + +// EarlyReadNodeCount The number of nodes on which to perform an early read operation while writing +// each blob. +// Early read operations are only rarely performed. +// API name: early_read_node_count +func (r *RepositoryAnalyze) EarlyReadNodeCount(earlyreadnodecount int) *RepositoryAnalyze { + r.values.Set("early_read_node_count", strconv.Itoa(earlyreadnodecount)) + + return r +} + +// MaxBlobSize The maximum size of a blob to be written during the test. +// For realistic experiments, you should set it to at least `2gb`. +// API name: max_blob_size +func (r *RepositoryAnalyze) MaxBlobSize(bytesize string) *RepositoryAnalyze { + r.values.Set("max_blob_size", bytesize) + + return r +} + +// MaxTotalDataSize An upper limit on the total size of all the blobs written during the test. +// For realistic experiments, you should set it to at least `1tb`. +// API name: max_total_data_size +func (r *RepositoryAnalyze) MaxTotalDataSize(bytesize string) *RepositoryAnalyze { + r.values.Set("max_total_data_size", bytesize) + + return r +} + +// RareActionProbability The probability of performing a rare action such as an early read, an +// overwrite, or an aborted write on each blob. +// API name: rare_action_probability +func (r *RepositoryAnalyze) RareActionProbability(rareactionprobability string) *RepositoryAnalyze { + r.values.Set("rare_action_probability", rareactionprobability) + + return r +} + +// RarelyAbortWrites Indicates whether to rarely cancel writes before they complete. +// API name: rarely_abort_writes +func (r *RepositoryAnalyze) RarelyAbortWrites(rarelyabortwrites bool) *RepositoryAnalyze { + r.values.Set("rarely_abort_writes", strconv.FormatBool(rarelyabortwrites)) + + return r +} + +// ReadNodeCount The number of nodes on which to read a blob after writing. +// API name: read_node_count +func (r *RepositoryAnalyze) ReadNodeCount(readnodecount int) *RepositoryAnalyze { + r.values.Set("read_node_count", strconv.Itoa(readnodecount)) + + return r +} + +// RegisterOperationCount The minimum number of linearizable register operations to perform in total. +// For realistic experiments, you should set it to at least `100`. +// API name: register_operation_count +func (r *RepositoryAnalyze) RegisterOperationCount(registeroperationcount int) *RepositoryAnalyze { + r.values.Set("register_operation_count", strconv.Itoa(registeroperationcount)) + + return r +} + +// Seed The seed for the pseudo-random number generator used to generate the list of +// operations performed during the test. +// To repeat the same set of operations in multiple experiments, use the same +// seed in each experiment. +// Note that the operations are performed concurrently so might not always +// happen in the same order on each run. +// API name: seed +func (r *RepositoryAnalyze) Seed(seed int) *RepositoryAnalyze { + r.values.Set("seed", strconv.Itoa(seed)) + + return r +} + +// Timeout The period of time to wait for the test to complete. +// If no response is received before the timeout expires, the test is cancelled +// and returns an error. +// API name: timeout +func (r *RepositoryAnalyze) Timeout(duration string) *RepositoryAnalyze { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *RepositoryAnalyze) ErrorTrace(errortrace bool) *RepositoryAnalyze { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *RepositoryAnalyze) FilterPath(filterpaths ...string) *RepositoryAnalyze { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *RepositoryAnalyze) Human(human bool) *RepositoryAnalyze { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *RepositoryAnalyze) Pretty(pretty bool) *RepositoryAnalyze { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/snapshot/repositoryanalyze/response.go b/typedapi/snapshot/repositoryanalyze/response.go new file mode 100644 index 0000000000..b250233a6c --- /dev/null +++ b/typedapi/snapshot/repositoryanalyze/response.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package repositoryanalyze + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package repositoryanalyze +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L24-L108 +type Response struct { + + // BlobCount The number of blobs written to the repository during the test. + BlobCount int `json:"blob_count"` + // BlobPath The path in the repository under which all the blobs were written during the + // test. + BlobPath string `json:"blob_path"` + // Concurrency The number of write operations performed concurrently during the test. + Concurrency int `json:"concurrency"` + // CoordinatingNode The node that coordinated the analysis and performed the final cleanup. + CoordinatingNode types.SnapshotNodeInfo `json:"coordinating_node"` + // DeleteElapsed The time it took to delete all the blobs in the container. + DeleteElapsed types.Duration `json:"delete_elapsed"` + // DeleteElapsedNanos The time it took to delete all the blobs in the container, in nanoseconds. + DeleteElapsedNanos int64 `json:"delete_elapsed_nanos"` + // Details A description of every read and write operation performed during the test. + Details types.DetailsInfo `json:"details"` + // EarlyReadNodeCount The limit on the number of nodes on which early read operations were + // performed after writing each blob. + EarlyReadNodeCount int `json:"early_read_node_count"` + // IssuesDetected A list of correctness issues detected, which is empty if the API succeeded. + // It is included to emphasize that a successful response does not guarantee + // correct behaviour in future. + IssuesDetected []string `json:"issues_detected"` + // ListingElapsed The time it took to retrieve a list of all the blobs in the container. + ListingElapsed types.Duration `json:"listing_elapsed"` + // ListingElapsedNanos The time it took to retrieve a list of all the blobs in the container, in + // nanoseconds. + ListingElapsedNanos int64 `json:"listing_elapsed_nanos"` + // MaxBlobSize The limit on the size of a blob written during the test. + MaxBlobSize types.ByteSize `json:"max_blob_size"` + // MaxBlobSizeBytes The limit, in bytes, on the size of a blob written during the test. + MaxBlobSizeBytes int64 `json:"max_blob_size_bytes"` + // MaxTotalDataSize The limit on the total size of all blob written during the test. + MaxTotalDataSize types.ByteSize `json:"max_total_data_size"` + // MaxTotalDataSizeBytes The limit, in bytes, on the total size of all blob written during the test. + MaxTotalDataSizeBytes int64 `json:"max_total_data_size_bytes"` + // RareActionProbability The probability of performing rare actions during the test. + RareActionProbability types.Float64 `json:"rare_action_probability"` + // ReadNodeCount The limit on the number of nodes on which read operations were performed + // after writing each blob. + ReadNodeCount int `json:"read_node_count"` + // Repository The name of the repository that was the subject of the analysis. + Repository string `json:"repository"` + // Seed The seed for the pseudo-random number generator used to generate the + // operations used during the test. + Seed int64 `json:"seed"` + // Summary A collection of statistics that summarize the results of the test. + Summary types.SummaryInfo `json:"summary"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/snapshot/repositoryverifyintegrity/repository_verify_integrity.go b/typedapi/snapshot/repositoryverifyintegrity/repository_verify_integrity.go index d34bfe25c9..f464b08831 100644 --- a/typedapi/snapshot/repositoryverifyintegrity/repository_verify_integrity.go +++ b/typedapi/snapshot/repositoryverifyintegrity/repository_verify_integrity.go @@ -16,9 +16,73 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Verifies the integrity of the contents of a snapshot repository +// Verify the repository integrity. +// Verify the integrity of the contents of a snapshot repository. +// +// This API enables you to perform a comprehensive check of the contents of a +// repository, looking for any anomalies in its data or metadata which might +// prevent you from restoring snapshots from the repository or which might cause +// future snapshot create or delete operations to fail. +// +// If you suspect the integrity of the contents of one of your snapshot +// repositories, cease all write activity to this repository immediately, set +// its `read_only` option to `true`, and use this API to verify its integrity. +// Until you do so: +// +// * It may not be possible to restore some snapshots from this repository. +// * Searchable snapshots may report errors when searched or may have unassigned +// shards. +// * Taking snapshots into this repository may fail or may appear to succeed but +// have created a snapshot which cannot be restored. +// * Deleting snapshots from this repository may fail or may appear to succeed +// but leave the underlying data on disk. +// * Continuing to write to the repository while it is in an invalid state may +// causing additional damage to its contents. +// +// If the API finds any problems with the integrity of the contents of your +// repository, Elasticsearch will not be able to repair the damage. +// The only way to bring the repository back into a fully working state after +// its contents have been damaged is by restoring its contents from a repository +// backup which was taken before the damage occurred. +// You must also identify what caused the damage and take action to prevent it +// from happening again. +// +// If you cannot restore a repository backup, register a new repository and use +// this for all future snapshot operations. +// In some cases it may be possible to recover some of the contents of a damaged +// repository, either by restoring as many of its snapshots as needed and taking +// new snapshots of the restored data, or by using the reindex API to copy data +// from any searchable snapshots mounted from the damaged repository. +// +// Avoid all operations which write to the repository while the verify +// repository integrity API is running. +// If something changes the repository contents while an integrity verification +// is running then Elasticsearch may incorrectly report having detected some +// anomalies in its contents due to the concurrent writes. +// It may also incorrectly fail to report some anomalies that the concurrent +// writes prevented it from detecting. +// +// NOTE: This API is intended for exploratory use by humans. You should expect +// the request parameters and the response format to vary in future versions. +// +// NOTE: This API may not work correctly in a mixed-version cluster. +// +// The default values for the parameters of this API are designed to limit the +// impact of the integrity verification on other activities in your cluster. +// For instance, by default it will only use at most half of the `snapshot_meta` +// threads to verify the integrity of each snapshot, allowing other snapshot +// operations to use the other half of this thread pool. +// If you modify these parameters to speed up the verification process, you risk +// disrupting other snapshot-related operations in your cluster. +// For large repositories, consider setting up a separate single-node +// Elasticsearch cluster just for running the integrity verification API. +// +// The response exposes implementation details of the analysis which may change +// from version to version. +// The response body format is therefore not considered stable and may be +// different in newer versions. package repositoryverifyintegrity import ( @@ -76,9 +140,73 @@ func NewRepositoryVerifyIntegrityFunc(tp elastictransport.Interface) NewReposito } } -// Verifies the integrity of the contents of a snapshot repository +// Verify the repository integrity. +// Verify the integrity of the contents of a snapshot repository. +// +// This API enables you to perform a comprehensive check of the contents of a +// repository, looking for any anomalies in its data or metadata which might +// prevent you from restoring snapshots from the repository or which might cause +// future snapshot create or delete operations to fail. +// +// If you suspect the integrity of the contents of one of your snapshot +// repositories, cease all write activity to this repository immediately, set +// its `read_only` option to `true`, and use this API to verify its integrity. +// Until you do so: +// +// * It may not be possible to restore some snapshots from this repository. +// * Searchable snapshots may report errors when searched or may have unassigned +// shards. +// * Taking snapshots into this repository may fail or may appear to succeed but +// have created a snapshot which cannot be restored. +// * Deleting snapshots from this repository may fail or may appear to succeed +// but leave the underlying data on disk. +// * Continuing to write to the repository while it is in an invalid state may +// causing additional damage to its contents. +// +// If the API finds any problems with the integrity of the contents of your +// repository, Elasticsearch will not be able to repair the damage. +// The only way to bring the repository back into a fully working state after +// its contents have been damaged is by restoring its contents from a repository +// backup which was taken before the damage occurred. +// You must also identify what caused the damage and take action to prevent it +// from happening again. +// +// If you cannot restore a repository backup, register a new repository and use +// this for all future snapshot operations. +// In some cases it may be possible to recover some of the contents of a damaged +// repository, either by restoring as many of its snapshots as needed and taking +// new snapshots of the restored data, or by using the reindex API to copy data +// from any searchable snapshots mounted from the damaged repository. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// Avoid all operations which write to the repository while the verify +// repository integrity API is running. +// If something changes the repository contents while an integrity verification +// is running then Elasticsearch may incorrectly report having detected some +// anomalies in its contents due to the concurrent writes. +// It may also incorrectly fail to report some anomalies that the concurrent +// writes prevented it from detecting. +// +// NOTE: This API is intended for exploratory use by humans. You should expect +// the request parameters and the response format to vary in future versions. +// +// NOTE: This API may not work correctly in a mixed-version cluster. +// +// The default values for the parameters of this API are designed to limit the +// impact of the integrity verification on other activities in your cluster. +// For instance, by default it will only use at most half of the `snapshot_meta` +// threads to verify the integrity of each snapshot, allowing other snapshot +// operations to use the other half of this thread pool. +// If you modify these parameters to speed up the verification process, you risk +// disrupting other snapshot-related operations in your cluster. +// For large repositories, consider setting up a separate single-node +// Elasticsearch cluster just for running the integrity verification API. +// +// The response exposes implementation details of the analysis which may change +// from version to version. +// The response body format is therefore not considered stable and may be +// different in newer versions. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-verify-integrity func New(tp elastictransport.Interface) *RepositoryVerifyIntegrity { r := &RepositoryVerifyIntegrity{ transport: tp, @@ -290,7 +418,7 @@ func (r *RepositoryVerifyIntegrity) Header(key, value string) *RepositoryVerifyI return r } -// Repository A repository name +// Repository The name of the snapshot repository. // API Name: repository func (r *RepositoryVerifyIntegrity) _repository(repository string) *RepositoryVerifyIntegrity { r.paramSet |= repositoryMask @@ -299,15 +427,8 @@ func (r *RepositoryVerifyIntegrity) _repository(repository string) *RepositoryVe return r } -// MetaThreadPoolConcurrency Number of threads to use for reading metadata -// API name: meta_thread_pool_concurrency -func (r *RepositoryVerifyIntegrity) MetaThreadPoolConcurrency(metathreadpoolconcurrency int) *RepositoryVerifyIntegrity { - r.values.Set("meta_thread_pool_concurrency", strconv.Itoa(metathreadpoolconcurrency)) - - return r -} - -// BlobThreadPoolConcurrency Number of threads to use for reading blob contents +// BlobThreadPoolConcurrency If `verify_blob_contents` is `true`, this parameter specifies how many blobs +// to verify at once. // API name: blob_thread_pool_concurrency func (r *RepositoryVerifyIntegrity) BlobThreadPoolConcurrency(blobthreadpoolconcurrency int) *RepositoryVerifyIntegrity { r.values.Set("blob_thread_pool_concurrency", strconv.Itoa(blobthreadpoolconcurrency)) @@ -315,15 +436,17 @@ func (r *RepositoryVerifyIntegrity) BlobThreadPoolConcurrency(blobthreadpoolconc return r } -// SnapshotVerificationConcurrency Number of snapshots to verify concurrently -// API name: snapshot_verification_concurrency -func (r *RepositoryVerifyIntegrity) SnapshotVerificationConcurrency(snapshotverificationconcurrency int) *RepositoryVerifyIntegrity { - r.values.Set("snapshot_verification_concurrency", strconv.Itoa(snapshotverificationconcurrency)) +// IndexSnapshotVerificationConcurrency The maximum number of index snapshots to verify concurrently within each +// index verification. +// API name: index_snapshot_verification_concurrency +func (r *RepositoryVerifyIntegrity) IndexSnapshotVerificationConcurrency(indexsnapshotverificationconcurrency int) *RepositoryVerifyIntegrity { + r.values.Set("index_snapshot_verification_concurrency", strconv.Itoa(indexsnapshotverificationconcurrency)) return r } -// IndexVerificationConcurrency Number of indices to verify concurrently +// IndexVerificationConcurrency The number of indices to verify concurrently. +// The default behavior is to use the entire `snapshot_meta` thread pool. // API name: index_verification_concurrency func (r *RepositoryVerifyIntegrity) IndexVerificationConcurrency(indexverificationconcurrency int) *RepositoryVerifyIntegrity { r.values.Set("index_verification_concurrency", strconv.Itoa(indexverificationconcurrency)) @@ -331,15 +454,19 @@ func (r *RepositoryVerifyIntegrity) IndexVerificationConcurrency(indexverificati return r } -// IndexSnapshotVerificationConcurrency Number of snapshots to verify concurrently within each index -// API name: index_snapshot_verification_concurrency -func (r *RepositoryVerifyIntegrity) IndexSnapshotVerificationConcurrency(indexsnapshotverificationconcurrency int) *RepositoryVerifyIntegrity { - r.values.Set("index_snapshot_verification_concurrency", strconv.Itoa(indexsnapshotverificationconcurrency)) +// MaxBytesPerSec If `verify_blob_contents` is `true`, this parameter specifies the maximum +// amount of data that Elasticsearch will read from the repository every second. +// API name: max_bytes_per_sec +func (r *RepositoryVerifyIntegrity) MaxBytesPerSec(maxbytespersec string) *RepositoryVerifyIntegrity { + r.values.Set("max_bytes_per_sec", maxbytespersec) return r } -// MaxFailedShardSnapshots Maximum permitted number of failed shard snapshots +// MaxFailedShardSnapshots The number of shard snapshot failures to track during integrity verification, +// in order to avoid excessive resource usage. +// If your repository contains more than this number of shard snapshot failures, +// the verification will fail. // API name: max_failed_shard_snapshots func (r *RepositoryVerifyIntegrity) MaxFailedShardSnapshots(maxfailedshardsnapshots int) *RepositoryVerifyIntegrity { r.values.Set("max_failed_shard_snapshots", strconv.Itoa(maxfailedshardsnapshots)) @@ -347,18 +474,33 @@ func (r *RepositoryVerifyIntegrity) MaxFailedShardSnapshots(maxfailedshardsnapsh return r } -// VerifyBlobContents Whether to verify the contents of individual blobs -// API name: verify_blob_contents -func (r *RepositoryVerifyIntegrity) VerifyBlobContents(verifyblobcontents bool) *RepositoryVerifyIntegrity { - r.values.Set("verify_blob_contents", strconv.FormatBool(verifyblobcontents)) +// MetaThreadPoolConcurrency The maximum number of snapshot metadata operations to run concurrently. +// The default behavior is to use at most half of the `snapshot_meta` thread +// pool at once. +// API name: meta_thread_pool_concurrency +func (r *RepositoryVerifyIntegrity) MetaThreadPoolConcurrency(metathreadpoolconcurrency int) *RepositoryVerifyIntegrity { + r.values.Set("meta_thread_pool_concurrency", strconv.Itoa(metathreadpoolconcurrency)) return r } -// MaxBytesPerSec Rate limit for individual blob verification -// API name: max_bytes_per_sec -func (r *RepositoryVerifyIntegrity) MaxBytesPerSec(maxbytespersec string) *RepositoryVerifyIntegrity { - r.values.Set("max_bytes_per_sec", maxbytespersec) +// SnapshotVerificationConcurrency The number of snapshots to verify concurrently. +// The default behavior is to use at most half of the `snapshot_meta` thread +// pool at once. +// API name: snapshot_verification_concurrency +func (r *RepositoryVerifyIntegrity) SnapshotVerificationConcurrency(snapshotverificationconcurrency int) *RepositoryVerifyIntegrity { + r.values.Set("snapshot_verification_concurrency", strconv.Itoa(snapshotverificationconcurrency)) + + return r +} + +// VerifyBlobContents Indicates whether to verify the checksum of every data blob in the +// repository. +// If this feature is enabled, Elasticsearch will read the entire repository +// contents, which may be extremely slow and expensive. +// API name: verify_blob_contents +func (r *RepositoryVerifyIntegrity) VerifyBlobContents(verifyblobcontents bool) *RepositoryVerifyIntegrity { + r.values.Set("verify_blob_contents", strconv.FormatBool(verifyblobcontents)) return r } diff --git a/typedapi/snapshot/repositoryverifyintegrity/response.go b/typedapi/snapshot/repositoryverifyintegrity/response.go index e31063b917..5aeb5e1e19 100644 --- a/typedapi/snapshot/repositoryverifyintegrity/response.go +++ b/typedapi/snapshot/repositoryverifyintegrity/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package repositoryverifyintegrity @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package repositoryverifyintegrity // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/repository_verify_integrity/SnapshotRepositoryVerifyIntegrityResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/repository_verify_integrity/SnapshotRepositoryVerifyIntegrityResponse.ts#L22-L24 type Response = json.RawMessage diff --git a/typedapi/snapshot/restore/request.go b/typedapi/snapshot/restore/request.go index f7d6eef56a..fb168cfc1b 100644 --- a/typedapi/snapshot/restore/request.go +++ b/typedapi/snapshot/restore/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package restore @@ -33,18 +33,92 @@ import ( // Request holds the request body struct for the package restore // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/restore/SnapshotRestoreRequest.ts#L25-L51 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/restore/SnapshotRestoreRequest.ts#L25-L175 type Request struct { - FeatureStates []string `json:"feature_states,omitempty"` - IgnoreIndexSettings []string `json:"ignore_index_settings,omitempty"` - IgnoreUnavailable *bool `json:"ignore_unavailable,omitempty"` - IncludeAliases *bool `json:"include_aliases,omitempty"` - IncludeGlobalState *bool `json:"include_global_state,omitempty"` - IndexSettings *types.IndexSettings `json:"index_settings,omitempty"` - Indices []string `json:"indices,omitempty"` - Partial *bool `json:"partial,omitempty"` - RenamePattern *string `json:"rename_pattern,omitempty"` - RenameReplacement *string `json:"rename_replacement,omitempty"` + + // FeatureStates The feature states to restore. + // If `include_global_state` is `true`, the request restores all feature states + // in the snapshot by default. + // If `include_global_state` is `false`, the request restores no feature states + // by default. + // Note that specifying an empty array will result in the default behavior. + // To restore no feature states, regardless of the `include_global_state` value, + // specify an array containing only the value `none` (`["none"]`). + FeatureStates []string `json:"feature_states,omitempty"` + // IgnoreIndexSettings The index settings to not restore from the snapshot. + // You can't use this option to ignore `index.number_of_shards`. + // + // For data streams, this option applies only to restored backing indices. + // New backing indices are configured using the data stream's matching index + // template. + IgnoreIndexSettings []string `json:"ignore_index_settings,omitempty"` + // IgnoreUnavailable If `true`, the request ignores any index or data stream in indices that's + // missing from the snapshot. + // If `false`, the request returns an error for any missing index or data + // stream. + IgnoreUnavailable *bool `json:"ignore_unavailable,omitempty"` + // IncludeAliases If `true`, the request restores aliases for any restored data streams and + // indices. + // If `false`, the request doesn’t restore aliases. + IncludeAliases *bool `json:"include_aliases,omitempty"` + // IncludeGlobalState If `true`, restore the cluster state. The cluster state includes: + // + // * Persistent cluster settings + // * Index templates + // * Legacy index templates + // * Ingest pipelines + // * Index lifecycle management (ILM) policies + // * Stored scripts + // * For snapshots taken after 7.12.0, feature states + // + // If `include_global_state` is `true`, the restore operation merges the legacy + // index templates in your cluster with the templates contained in the snapshot, + // replacing any existing ones whose name matches one in the snapshot. + // It completely removes all persistent settings, non-legacy index templates, + // ingest pipelines, and ILM lifecycle policies that exist in your cluster and + // replaces them with the corresponding items from the snapshot. + // + // Use the `feature_states` parameter to configure how feature states are + // restored. + // + // If `include_global_state` is `true` and a snapshot was created without a + // global state then the restore request will fail. + IncludeGlobalState *bool `json:"include_global_state,omitempty"` + // IndexSettings Index settings to add or change in restored indices, including backing + // indices. + // You can't use this option to change `index.number_of_shards`. + // + // For data streams, this option applies only to restored backing indices. + // New backing indices are configured using the data stream's matching index + // template. + IndexSettings *types.IndexSettings `json:"index_settings,omitempty"` + // Indices A comma-separated list of indices and data streams to restore. + // It supports a multi-target syntax. + // The default behavior is all regular indices and regular data streams in the + // snapshot. + // + // You can't use this parameter to restore system indices or system data + // streams. + // Use `feature_states` instead. + Indices []string `json:"indices,omitempty"` + // Partial If `false`, the entire restore operation will fail if one or more indices + // included in the snapshot do not have all primary shards available. + // + // If true, it allows restoring a partial snapshot of indices with unavailable + // shards. + // Only shards that were successfully included in the snapshot will be restored. + // All missing shards will be recreated as empty. + Partial *bool `json:"partial,omitempty"` + // RenamePattern A rename pattern to apply to restored data streams and indices. + // Data streams and indices matching the rename pattern will be renamed + // according to `rename_replacement`. + // + // The rename pattern is applied as defined by the regular expression that + // supports referencing the original text, according to the `appendReplacement` + // logic. + RenamePattern *string `json:"rename_pattern,omitempty"` + // RenameReplacement The rename replacement string that is used with the `rename_pattern`. + RenameReplacement *string `json:"rename_replacement,omitempty"` } // NewRequest returns a Request diff --git a/typedapi/snapshot/restore/response.go b/typedapi/snapshot/restore/response.go index 31a73b34b6..5b8fa9296e 100644 --- a/typedapi/snapshot/restore/response.go +++ b/typedapi/snapshot/restore/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package restore @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package restore // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/restore/SnapshotRestoreResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/restore/SnapshotRestoreResponse.ts#L23-L28 type Response struct { Accepted *bool `json:"accepted,omitempty"` Snapshot *types.SnapshotRestore `json:"snapshot,omitempty"` diff --git a/typedapi/snapshot/restore/restore.go b/typedapi/snapshot/restore/restore.go index 591744c4ce..e51665624b 100644 --- a/typedapi/snapshot/restore/restore.go +++ b/typedapi/snapshot/restore/restore.go @@ -16,9 +16,35 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Restores a snapshot. +// Restore a snapshot. +// Restore a snapshot of a cluster or data streams and indices. +// +// You can restore a snapshot only to a running cluster with an elected master +// node. +// The snapshot repository must be registered and available to the cluster. +// The snapshot and cluster versions must be compatible. +// +// To restore a snapshot, the cluster's global metadata must be writable. Ensure +// there are't any cluster blocks that prevent writes. The restore operation +// ignores index blocks. +// +// Before you restore a data stream, ensure the cluster contains a matching +// index template with data streams enabled. To check, use the index management +// feature in Kibana or the get index template API: +// +// ``` +// GET +// _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream +// ``` +// +// If no such template exists, you can create one or restore a cluster state +// that contains one. Without a matching index template, a data stream can't +// roll over or create backing indices. +// +// If your snapshot contains data from App Search or Workplace Search, you must +// restore the Enterprise Search encryption key before you restore the snapshot. package restore import ( @@ -86,9 +112,35 @@ func NewRestoreFunc(tp elastictransport.Interface) NewRestore { } } -// Restores a snapshot. +// Restore a snapshot. +// Restore a snapshot of a cluster or data streams and indices. +// +// You can restore a snapshot only to a running cluster with an elected master +// node. +// The snapshot repository must be registered and available to the cluster. +// The snapshot and cluster versions must be compatible. +// +// To restore a snapshot, the cluster's global metadata must be writable. Ensure +// there are't any cluster blocks that prevent writes. The restore operation +// ignores index blocks. +// +// Before you restore a data stream, ensure the cluster contains a matching +// index template with data streams enabled. To check, use the index management +// feature in Kibana or the get index template API: +// +// ``` +// GET +// _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream +// ``` +// +// If no such template exists, you can create one or restore a cluster state +// that contains one. Without a matching index template, a data stream can't +// roll over or create backing indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// If your snapshot contains data from App Search or Workplace Search, you must +// restore the Enterprise Search encryption key before you restore the snapshot. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore func New(tp elastictransport.Interface) *Restore { r := &Restore{ transport: tp, @@ -96,8 +148,6 @@ func New(tp elastictransport.Interface) *Restore { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -317,7 +367,7 @@ func (r *Restore) Header(key, value string) *Restore { return r } -// Repository A repository name +// Repository The name of the repository to restore a snapshot from. // API Name: repository func (r *Restore) _repository(repository string) *Restore { r.paramSet |= repositoryMask @@ -326,7 +376,7 @@ func (r *Restore) _repository(repository string) *Restore { return r } -// Snapshot A snapshot name +// Snapshot The name of the snapshot to restore. // API Name: snapshot func (r *Restore) _snapshot(snapshot string) *Restore { r.paramSet |= snapshotMask @@ -335,7 +385,10 @@ func (r *Restore) _snapshot(snapshot string) *Restore { return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: master_timeout func (r *Restore) MasterTimeout(duration string) *Restore { r.values.Set("master_timeout", duration) @@ -343,7 +396,14 @@ func (r *Restore) MasterTimeout(duration string) *Restore { return r } -// WaitForCompletion Should this request wait until the operation has completed before returning +// WaitForCompletion If `true`, the request returns a response when the restore operation +// completes. +// The operation is complete when it finishes all attempts to recover primary +// shards for restored indices. +// This applies even if one or more of the recovery attempts fail. +// +// If `false`, the request returns a response when the restore operation +// initializes. // API name: wait_for_completion func (r *Restore) WaitForCompletion(waitforcompletion bool) *Restore { r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) @@ -395,73 +455,197 @@ func (r *Restore) Pretty(pretty bool) *Restore { return r } +// The feature states to restore. +// If `include_global_state` is `true`, the request restores all feature states +// in the snapshot by default. +// If `include_global_state` is `false`, the request restores no feature states +// by default. +// Note that specifying an empty array will result in the default behavior. +// To restore no feature states, regardless of the `include_global_state` value, +// specify an array containing only the value `none` (`["none"]`). // API name: feature_states func (r *Restore) FeatureStates(featurestates ...string) *Restore { - r.req.FeatureStates = featurestates + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range featurestates { + + r.req.FeatureStates = append(r.req.FeatureStates, v) + } return r } +// The index settings to not restore from the snapshot. +// You can't use this option to ignore `index.number_of_shards`. +// +// For data streams, this option applies only to restored backing indices. +// New backing indices are configured using the data stream's matching index +// template. // API name: ignore_index_settings func (r *Restore) IgnoreIndexSettings(ignoreindexsettings ...string) *Restore { - r.req.IgnoreIndexSettings = ignoreindexsettings + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ignoreindexsettings { + r.req.IgnoreIndexSettings = append(r.req.IgnoreIndexSettings, v) + + } return r } +// If `true`, the request ignores any index or data stream in indices that's +// missing from the snapshot. +// If `false`, the request returns an error for any missing index or data +// stream. // API name: ignore_unavailable func (r *Restore) IgnoreUnavailable(ignoreunavailable bool) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IgnoreUnavailable = &ignoreunavailable return r } +// If `true`, the request restores aliases for any restored data streams and +// indices. +// If `false`, the request doesn’t restore aliases. // API name: include_aliases func (r *Restore) IncludeAliases(includealiases bool) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IncludeAliases = &includealiases return r } +// If `true`, restore the cluster state. The cluster state includes: +// +// * Persistent cluster settings +// * Index templates +// * Legacy index templates +// * Ingest pipelines +// * Index lifecycle management (ILM) policies +// * Stored scripts +// * For snapshots taken after 7.12.0, feature states +// +// If `include_global_state` is `true`, the restore operation merges the legacy +// index templates in your cluster with the templates contained in the snapshot, +// replacing any existing ones whose name matches one in the snapshot. +// It completely removes all persistent settings, non-legacy index templates, +// ingest pipelines, and ILM lifecycle policies that exist in your cluster and +// replaces them with the corresponding items from the snapshot. +// +// Use the `feature_states` parameter to configure how feature states are +// restored. +// +// If `include_global_state` is `true` and a snapshot was created without a +// global state then the restore request will fail. // API name: include_global_state func (r *Restore) IncludeGlobalState(includeglobalstate bool) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IncludeGlobalState = &includeglobalstate return r } +// Index settings to add or change in restored indices, including backing +// indices. +// You can't use this option to change `index.number_of_shards`. +// +// For data streams, this option applies only to restored backing indices. +// New backing indices are configured using the data stream's matching index +// template. // API name: index_settings -func (r *Restore) IndexSettings(indexsettings *types.IndexSettings) *Restore { +func (r *Restore) IndexSettings(indexsettings types.IndexSettingsVariant) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.IndexSettings = indexsettings + r.req.IndexSettings = indexsettings.IndexSettingsCaster() return r } +// A comma-separated list of indices and data streams to restore. +// It supports a multi-target syntax. +// The default behavior is all regular indices and regular data streams in the +// snapshot. +// +// You can't use this parameter to restore system indices or system data +// streams. +// Use `feature_states` instead. // API name: indices func (r *Restore) Indices(indices ...string) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Indices = indices return r } +// If `false`, the entire restore operation will fail if one or more indices +// included in the snapshot do not have all primary shards available. +// +// If true, it allows restoring a partial snapshot of indices with unavailable +// shards. +// Only shards that were successfully included in the snapshot will be restored. +// All missing shards will be recreated as empty. // API name: partial func (r *Restore) Partial(partial bool) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Partial = &partial return r } +// A rename pattern to apply to restored data streams and indices. +// Data streams and indices matching the rename pattern will be renamed +// according to `rename_replacement`. +// +// The rename pattern is applied as defined by the regular expression that +// supports referencing the original text, according to the `appendReplacement` +// logic. // API name: rename_pattern func (r *Restore) RenamePattern(renamepattern string) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RenamePattern = &renamepattern return r } +// The rename replacement string that is used with the `rename_pattern`. // API name: rename_replacement func (r *Restore) RenameReplacement(renamereplacement string) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RenameReplacement = &renamereplacement diff --git a/typedapi/snapshot/status/response.go b/typedapi/snapshot/status/response.go index 3e59301bfa..31af4e1a6c 100644 --- a/typedapi/snapshot/status/response.go +++ b/typedapi/snapshot/status/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package status @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package status // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/status/SnapshotStatusResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/status/SnapshotStatusResponse.ts#L22-L24 type Response struct { Snapshots []types.Status `json:"snapshots"` } diff --git a/typedapi/snapshot/status/status.go b/typedapi/snapshot/status/status.go index 0bca2f9627..d51579ad42 100644 --- a/typedapi/snapshot/status/status.go +++ b/typedapi/snapshot/status/status.go @@ -16,9 +16,34 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns information about the status of a snapshot. +// Get the snapshot status. +// Get a detailed description of the current state for each shard participating +// in the snapshot. +// +// Note that this API should be used only to obtain detailed shard-level +// information for ongoing snapshots. +// If this detail is not needed or you want to obtain information about one or +// more existing snapshots, use the get snapshot API. +// +// If you omit the `` request path parameter, the request retrieves +// information only for currently running snapshots. +// This usage is preferred. +// If needed, you can specify `` and `` to retrieve +// information for specific snapshots, even if they're not currently running. +// +// WARNING: Using the API to return the status of any snapshots other than +// currently running snapshots can be expensive. +// The API requires a read from the repository for each shard in each snapshot. +// For example, if you have 100 snapshots with 1,000 shards each, an API request +// that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 +// shards). +// +// Depending on the latency of your storage, such requests can take an extremely +// long time to return results. +// These requests can also tax machine resources and, when using cloud storage, +// incur high processing costs. package status import ( @@ -77,9 +102,34 @@ func NewStatusFunc(tp elastictransport.Interface) NewStatus { } } -// Returns information about the status of a snapshot. +// Get the snapshot status. +// Get a detailed description of the current state for each shard participating +// in the snapshot. +// +// Note that this API should be used only to obtain detailed shard-level +// information for ongoing snapshots. +// If this detail is not needed or you want to obtain information about one or +// more existing snapshots, use the get snapshot API. +// +// If you omit the `` request path parameter, the request retrieves +// information only for currently running snapshots. +// This usage is preferred. +// If needed, you can specify `` and `` to retrieve +// information for specific snapshots, even if they're not currently running. +// +// WARNING: Using the API to return the status of any snapshots other than +// currently running snapshots can be expensive. +// The API requires a read from the repository for each shard in each snapshot. +// For example, if you have 100 snapshots with 1,000 shards each, an API request +// that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 +// shards). +// +// Depending on the latency of your storage, such requests can take an extremely +// long time to return results. +// These requests can also tax machine resources and, when using cloud storage, +// incur high processing costs. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status func New(tp elastictransport.Interface) *Status { r := &Status{ transport: tp, @@ -317,7 +367,8 @@ func (r *Status) Header(key, value string) *Status { return r } -// Repository A repository name +// Repository The snapshot repository name used to limit the request. +// It supports wildcards (`*`) if `` isn't specified. // API Name: repository func (r *Status) Repository(repository string) *Status { r.paramSet |= repositoryMask @@ -326,7 +377,9 @@ func (r *Status) Repository(repository string) *Status { return r } -// Snapshot A comma-separated list of snapshot names +// Snapshot A comma-separated list of snapshots to retrieve status for. +// The default is currently running snapshots. +// Wildcards (`*`) are not supported. // API Name: snapshot func (r *Status) Snapshot(snapshot string) *Status { r.paramSet |= snapshotMask @@ -335,8 +388,10 @@ func (r *Status) Snapshot(snapshot string) *Status { return r } -// IgnoreUnavailable Whether to ignore unavailable snapshots, defaults to false which means a -// SnapshotMissingException is thrown +// IgnoreUnavailable If `false`, the request returns an error for any snapshots that are +// unavailable. +// If `true`, the request ignores snapshots that are unavailable, such as those +// that are corrupted or temporarily cannot be returned. // API name: ignore_unavailable func (r *Status) IgnoreUnavailable(ignoreunavailable bool) *Status { r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) @@ -344,7 +399,10 @@ func (r *Status) IgnoreUnavailable(ignoreunavailable bool) *Status { return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: master_timeout func (r *Status) MasterTimeout(duration string) *Status { r.values.Set("master_timeout", duration) diff --git a/typedapi/snapshot/verifyrepository/response.go b/typedapi/snapshot/verifyrepository/response.go index 2bb3fe6576..bbcbd26fb9 100644 --- a/typedapi/snapshot/verifyrepository/response.go +++ b/typedapi/snapshot/verifyrepository/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package verifyrepository @@ -26,8 +26,11 @@ import ( // Response holds the response body struct for the package verifyrepository // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L23-L31 type Response struct { + + // Nodes Information about the nodes connected to the snapshot repository. + // The key is the ID of the node. Nodes map[string]types.CompactNodeInfo `json:"nodes"` } diff --git a/typedapi/snapshot/verifyrepository/verify_repository.go b/typedapi/snapshot/verifyrepository/verify_repository.go index 36f5b6e2c6..31d65d01b9 100644 --- a/typedapi/snapshot/verifyrepository/verify_repository.go +++ b/typedapi/snapshot/verifyrepository/verify_repository.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Verifies a repository. +// Verify a snapshot repository. +// Check for common misconfigurations in a snapshot repository. package verifyrepository import ( @@ -76,9 +77,10 @@ func NewVerifyRepositoryFunc(tp elastictransport.Interface) NewVerifyRepository } } -// Verifies a repository. +// Verify a snapshot repository. +// Check for common misconfigurations in a snapshot repository. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository func New(tp elastictransport.Interface) *VerifyRepository { r := &VerifyRepository{ transport: tp, @@ -290,7 +292,7 @@ func (r *VerifyRepository) Header(key, value string) *VerifyRepository { return r } -// Repository A repository name +// Repository The name of the snapshot repository to verify. // API Name: repository func (r *VerifyRepository) _repository(repository string) *VerifyRepository { r.paramSet |= repositoryMask @@ -299,7 +301,10 @@ func (r *VerifyRepository) _repository(repository string) *VerifyRepository { return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: master_timeout func (r *VerifyRepository) MasterTimeout(duration string) *VerifyRepository { r.values.Set("master_timeout", duration) @@ -307,7 +312,12 @@ func (r *VerifyRepository) MasterTimeout(duration string) *VerifyRepository { return r } -// Timeout Explicit operation timeout +// Timeout The period to wait for a response from all relevant nodes in the cluster +// after updating the cluster metadata. +// If no response is received before the timeout expires, the cluster metadata +// update still applies but the response will indicate that it was not +// completely acknowledged. +// To indicate that the request should never timeout, set it to `-1`. // API name: timeout func (r *VerifyRepository) Timeout(duration string) *VerifyRepository { r.values.Set("timeout", duration) diff --git a/typedapi/sql/clearcursor/clear_cursor.go b/typedapi/sql/clearcursor/clear_cursor.go index b709819c6b..8059778539 100644 --- a/typedapi/sql/clearcursor/clear_cursor.go +++ b/typedapi/sql/clearcursor/clear_cursor.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Clears the SQL cursor +// Clear an SQL search cursor. package clearcursor import ( @@ -73,9 +73,9 @@ func NewClearCursorFunc(tp elastictransport.Interface) NewClearCursor { } } -// Clears the SQL cursor +// Clear an SQL search cursor. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-sql-cursor-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor func New(tp elastictransport.Interface) *ClearCursor { r := &ClearCursor{ transport: tp, @@ -83,8 +83,6 @@ func New(tp elastictransport.Interface) *ClearCursor { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -336,9 +334,13 @@ func (r *ClearCursor) Pretty(pretty bool) *ClearCursor { return r } -// Cursor Cursor to clear. +// Cursor to clear. // API name: cursor func (r *ClearCursor) Cursor(cursor string) *ClearCursor { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Cursor = cursor diff --git a/typedapi/sql/clearcursor/request.go b/typedapi/sql/clearcursor/request.go index 84f35ca224..8b2fd23600 100644 --- a/typedapi/sql/clearcursor/request.go +++ b/typedapi/sql/clearcursor/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package clearcursor @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package clearcursor // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/sql/clear_cursor/ClearSqlCursorRequest.ts#L22-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/sql/clear_cursor/ClearSqlCursorRequest.ts#L22-L42 type Request struct { // Cursor Cursor to clear. diff --git a/typedapi/sql/clearcursor/response.go b/typedapi/sql/clearcursor/response.go index 4633970597..d1aabbbe04 100644 --- a/typedapi/sql/clearcursor/response.go +++ b/typedapi/sql/clearcursor/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package clearcursor // Response holds the response body struct for the package clearcursor // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/sql/clear_cursor/ClearSqlCursorResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/sql/clear_cursor/ClearSqlCursorResponse.ts#L20-L22 type Response struct { Succeeded bool `json:"succeeded"` } diff --git a/typedapi/sql/deleteasync/delete_async.go b/typedapi/sql/deleteasync/delete_async.go index d285e5d596..f965435394 100644 --- a/typedapi/sql/deleteasync/delete_async.go +++ b/typedapi/sql/deleteasync/delete_async.go @@ -16,10 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes an async SQL search or a stored synchronous SQL search. If the search -// is still running, the API cancels it. +// Delete an async SQL search. +// Delete an async SQL search or a stored synchronous SQL search. +// If the search is still running, the API cancels it. +// +// If the Elasticsearch security features are enabled, only the following users +// can use this API to delete a search: +// +// * Users with the `cancel_task` cluster privilege. +// * The user who first submitted the search. package deleteasync import ( @@ -77,10 +84,17 @@ func NewDeleteAsyncFunc(tp elastictransport.Interface) NewDeleteAsync { } } -// Deletes an async SQL search or a stored synchronous SQL search. If the search -// is still running, the API cancels it. +// Delete an async SQL search. +// Delete an async SQL search or a stored synchronous SQL search. +// If the search is still running, the API cancels it. +// +// If the Elasticsearch security features are enabled, only the following users +// can use this API to delete a search: +// +// * Users with the `cancel_task` cluster privilege. +// * The user who first submitted the search. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-async-sql-search-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-delete-async func New(tp elastictransport.Interface) *DeleteAsync { r := &DeleteAsync{ transport: tp, @@ -294,7 +308,7 @@ func (r *DeleteAsync) Header(key, value string) *DeleteAsync { return r } -// Id Identifier for the search. +// Id The identifier for the search. // API Name: id func (r *DeleteAsync) _id(id string) *DeleteAsync { r.paramSet |= idMask diff --git a/typedapi/sql/deleteasync/response.go b/typedapi/sql/deleteasync/response.go index 7d3c3ae344..3be6d29b23 100644 --- a/typedapi/sql/deleteasync/response.go +++ b/typedapi/sql/deleteasync/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deleteasync // Response holds the response body struct for the package deleteasync // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/sql/delete_async/SqlDeleteAsyncResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/sql/delete_async/SqlDeleteAsyncResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/sql/getasync/get_async.go b/typedapi/sql/getasync/get_async.go index 86a190809a..c38bbe2c54 100644 --- a/typedapi/sql/getasync/get_async.go +++ b/typedapi/sql/getasync/get_async.go @@ -16,10 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns the current status and available results for an async SQL search or -// stored synchronous SQL search +// Get async SQL search results. +// Get the current status and available results for an async SQL search or +// stored synchronous SQL search. +// +// If the Elasticsearch security features are enabled, only the user who first +// submitted the SQL search can retrieve the search using this API. package getasync import ( @@ -77,10 +81,14 @@ func NewGetAsyncFunc(tp elastictransport.Interface) NewGetAsync { } } -// Returns the current status and available results for an async SQL search or -// stored synchronous SQL search +// Get async SQL search results. +// Get the current status and available results for an async SQL search or +// stored synchronous SQL search. +// +// If the Elasticsearch security features are enabled, only the user who first +// submitted the SQL search can retrieve the search using this API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-sql-search-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async func New(tp elastictransport.Interface) *GetAsync { r := &GetAsync{ transport: tp, @@ -292,7 +300,7 @@ func (r *GetAsync) Header(key, value string) *GetAsync { return r } -// Id Identifier for the search. +// Id The identifier for the search. // API Name: id func (r *GetAsync) _id(id string) *GetAsync { r.paramSet |= idMask @@ -301,8 +309,8 @@ func (r *GetAsync) _id(id string) *GetAsync { return r } -// Delimiter Separator for CSV results. The API only supports this parameter for CSV -// responses. +// Delimiter The separator for CSV results. +// The API supports this parameter only for CSV responses. // API name: delimiter func (r *GetAsync) Delimiter(delimiter string) *GetAsync { r.values.Set("delimiter", delimiter) @@ -310,9 +318,9 @@ func (r *GetAsync) Delimiter(delimiter string) *GetAsync { return r } -// Format Format for the response. You must specify a format using this parameter or -// the -// Accept HTTP header. If you specify both, the API uses this parameter. +// Format The format for the response. +// You must specify a format using this parameter or the `Accept` HTTP header. +// If you specify both, the API uses this parameter. // API name: format func (r *GetAsync) Format(format string) *GetAsync { r.values.Set("format", format) @@ -320,8 +328,8 @@ func (r *GetAsync) Format(format string) *GetAsync { return r } -// KeepAlive Retention period for the search and its results. Defaults -// to the `keep_alive` period for the original SQL search. +// KeepAlive The retention period for the search and its results. +// It defaults to the `keep_alive` period for the original SQL search. // API name: keep_alive func (r *GetAsync) KeepAlive(duration string) *GetAsync { r.values.Set("keep_alive", duration) @@ -329,8 +337,9 @@ func (r *GetAsync) KeepAlive(duration string) *GetAsync { return r } -// WaitForCompletionTimeout Period to wait for complete results. Defaults to no timeout, -// meaning the request waits for complete search results. +// WaitForCompletionTimeout The period to wait for complete results. +// It defaults to no timeout, meaning the request waits for complete search +// results. // API name: wait_for_completion_timeout func (r *GetAsync) WaitForCompletionTimeout(duration string) *GetAsync { r.values.Set("wait_for_completion_timeout", duration) diff --git a/typedapi/sql/getasync/response.go b/typedapi/sql/getasync/response.go index 398a43b37c..36493c91fd 100644 --- a/typedapi/sql/getasync/response.go +++ b/typedapi/sql/getasync/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getasync @@ -28,35 +28,36 @@ import ( // Response holds the response body struct for the package getasync // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/sql/get_async/SqlGetAsyncResponse.ts#L23-L60 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/sql/get_async/SqlGetAsyncResponse.ts#L23-L60 type Response struct { // Columns Column headings for the search results. Each object is a column. Columns []types.Column `json:"columns,omitempty"` - // Cursor Cursor for the next set of paginated results. For CSV, TSV, and - // TXT responses, this value is returned in the `Cursor` HTTP header. + // Cursor The cursor for the next set of paginated results. + // For CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP + // header. Cursor *string `json:"cursor,omitempty"` - // Id Identifier for the search. This value is only returned for async and saved - // synchronous searches. For CSV, TSV, and TXT responses, this value is returned - // in the `Async-ID` HTTP header. + // Id Identifier for the search. + // This value is returned only for async and saved synchronous searches. + // For CSV, TSV, and TXT responses, this value is returned in the `Async-ID` + // HTTP header. Id string `json:"id"` - // IsPartial If `true`, the response does not contain complete search results. If - // `is_partial` - // is `true` and `is_running` is `true`, the search is still running. If - // `is_partial` - // is `true` but `is_running` is `false`, the results are partial due to a - // failure or - // timeout. This value is only returned for async and saved synchronous - // searches. + // IsPartial If `true`, the response does not contain complete search results. + // If `is_partial` is `true` and `is_running` is `true`, the search is still + // running. + // If `is_partial` is `true` but `is_running` is `false`, the results are + // partial due to a failure or timeout. + // This value is returned only for async and saved synchronous searches. // For CSV, TSV, and TXT responses, this value is returned in the // `Async-partial` HTTP header. IsPartial bool `json:"is_partial"` - // IsRunning If `true`, the search is still running. If false, the search has finished. - // This value is only returned for async and saved synchronous searches. For - // CSV, TSV, and TXT responses, this value is returned in the `Async-partial` - // HTTP header. + // IsRunning If `true`, the search is still running. + // If `false`, the search has finished. + // This value is returned only for async and saved synchronous searches. + // For CSV, TSV, and TXT responses, this value is returned in the + // `Async-partial` HTTP header. IsRunning bool `json:"is_running"` - // Rows Values for the search results. + // Rows The values for the search results. Rows [][]json.RawMessage `json:"rows"` } diff --git a/typedapi/sql/getasyncstatus/get_async_status.go b/typedapi/sql/getasyncstatus/get_async_status.go index 4be663b726..c20f75425e 100644 --- a/typedapi/sql/getasyncstatus/get_async_status.go +++ b/typedapi/sql/getasyncstatus/get_async_status.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Returns the current status of an async SQL search or a stored synchronous SQL -// search +// Get the async SQL search status. +// Get the current status of an async SQL search or a stored synchronous SQL +// search. package getasyncstatus import ( @@ -77,10 +78,11 @@ func NewGetAsyncStatusFunc(tp elastictransport.Interface) NewGetAsyncStatus { } } -// Returns the current status of an async SQL search or a stored synchronous SQL -// search +// Get the async SQL search status. +// Get the current status of an async SQL search or a stored synchronous SQL +// search. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-sql-search-status-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status func New(tp elastictransport.Interface) *GetAsyncStatus { r := &GetAsyncStatus{ transport: tp, @@ -294,7 +296,7 @@ func (r *GetAsyncStatus) Header(key, value string) *GetAsyncStatus { return r } -// Id Identifier for the search. +// Id The identifier for the search. // API Name: id func (r *GetAsyncStatus) _id(id string) *GetAsyncStatus { r.paramSet |= idMask diff --git a/typedapi/sql/getasyncstatus/response.go b/typedapi/sql/getasyncstatus/response.go index b9d7d50e86..48058170b9 100644 --- a/typedapi/sql/getasyncstatus/response.go +++ b/typedapi/sql/getasyncstatus/response.go @@ -16,36 +16,34 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getasyncstatus // Response holds the response body struct for the package getasyncstatus // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/sql/get_async_status/SqlGetAsyncStatusResponse.ts#L23-L55 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/sql/get_async_status/SqlGetAsyncStatusResponse.ts#L23-L55 type Response struct { - // CompletionStatus HTTP status code for the search. The API only returns this property for - // completed searches. + // CompletionStatus The HTTP status code for the search. + // The API returns this property only for completed searches. CompletionStatus *uint `json:"completion_status,omitempty"` - // ExpirationTimeInMillis Timestamp, in milliseconds since the Unix epoch, when Elasticsearch will - // delete - // the search and its results, even if the search is still running. + // ExpirationTimeInMillis The timestamp, in milliseconds since the Unix epoch, when Elasticsearch will + // delete the search and its results, even if the search is still running. ExpirationTimeInMillis int64 `json:"expiration_time_in_millis"` - // Id Identifier for the search. + // Id The identifier for the search. Id string `json:"id"` - // IsPartial If `true`, the response does not contain complete search results. If - // `is_partial` - // is `true` and `is_running` is `true`, the search is still running. If - // `is_partial` - // is `true` but `is_running` is `false`, the results are partial due to a - // failure or - // timeout. + // IsPartial If `true`, the response does not contain complete search results. + // If `is_partial` is `true` and `is_running` is `true`, the search is still + // running. + // If `is_partial` is `true` but `is_running` is `false`, the results are + // partial due to a failure or timeout. IsPartial bool `json:"is_partial"` - // IsRunning If `true`, the search is still running. If `false`, the search has finished. + // IsRunning If `true`, the search is still running. + // If `false`, the search has finished. IsRunning bool `json:"is_running"` - // StartTimeInMillis Timestamp, in milliseconds since the Unix epoch, when the search started. - // The API only returns this property for running searches. + // StartTimeInMillis The timestamp, in milliseconds since the Unix epoch, when the search started. + // The API returns this property only for running searches. StartTimeInMillis int64 `json:"start_time_in_millis"` } diff --git a/typedapi/sql/query/query.go b/typedapi/sql/query/query.go index ef80432a78..1609faec35 100644 --- a/typedapi/sql/query/query.go +++ b/typedapi/sql/query/query.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Executes a SQL request +// Get SQL search results. +// Run an SQL request. package query import ( @@ -74,9 +75,10 @@ func NewQueryFunc(tp elastictransport.Interface) NewQuery { } } -// Executes a SQL request +// Get SQL search results. +// Run an SQL request. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-search-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query func New(tp elastictransport.Interface) *Query { r := &Query{ transport: tp, @@ -84,8 +86,6 @@ func New(tp elastictransport.Interface) *Query { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -291,7 +291,10 @@ func (r *Query) Header(key, value string) *Query { return r } -// Format Format for the response. +// Format The format for the response. +// You can also specify a format using the `Accept` HTTP header. +// If you specify both this parameter and the `Accept` HTTP header, this +// parameter takes precedence. // API name: format func (r *Query) Format(format sqlformat.SqlFormat) *Query { r.values.Set("format", format.String()) @@ -343,147 +346,266 @@ func (r *Query) Pretty(pretty bool) *Query { return r } -// Catalog Default catalog (cluster) for queries. If unspecified, the queries execute on -// the data in the local cluster only. +// If `true`, the response has partial results when there are shard request +// timeouts or shard failures. +// If `false`, the API returns an error with no partial results. +// API name: allow_partial_search_results +func (r *Query) AllowPartialSearchResults(allowpartialsearchresults bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowPartialSearchResults = &allowpartialsearchresults + + return r +} + +// The default catalog (cluster) for queries. +// If unspecified, the queries execute on the data in the local cluster only. // API name: catalog func (r *Query) Catalog(catalog string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Catalog = &catalog return r } -// Columnar If true, the results in a columnar fashion: one row represents all the values -// of a certain column from the current page of results. +// If `true`, the results are in a columnar fashion: one row represents all the +// values of a certain column from the current page of results. +// The API supports this parameter only for CBOR, JSON, SMILE, and YAML +// responses. // API name: columnar func (r *Query) Columnar(columnar bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Columnar = &columnar return r } -// Cursor Cursor used to retrieve a set of paginated results. +// The cursor used to retrieve a set of paginated results. // If you specify a cursor, the API only uses the `columnar` and `time_zone` // request body parameters. // It ignores other request body parameters. // API name: cursor func (r *Query) Cursor(cursor string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Cursor = &cursor return r } -// FetchSize The maximum number of rows (or entries) to return in one response +// The maximum number of rows (or entries) to return in one response. // API name: fetch_size func (r *Query) FetchSize(fetchsize int) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FetchSize = &fetchsize return r } -// FieldMultiValueLeniency Throw an exception when encountering multiple values for a field (default) or -// be lenient and return the first value from the list (without any guarantees -// of what that will be - typically the first in natural ascending order). +// If `false`, the API returns an exception when encountering multiple values +// for a field. +// If `true`, the API is lenient and returns the first value from the array with +// no guarantee of consistent results. // API name: field_multi_value_leniency func (r *Query) FieldMultiValueLeniency(fieldmultivalueleniency bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FieldMultiValueLeniency = &fieldmultivalueleniency return r } -// Filter Elasticsearch query DSL for additional filtering. +// The Elasticsearch query DSL for additional filtering. // API name: filter -func (r *Query) Filter(filter *types.Query) *Query { +func (r *Query) Filter(filter types.QueryVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Filter = filter + r.req.Filter = filter.QueryCaster() return r } -// IndexUsingFrozen If true, the search can run on frozen indices. Defaults to false. +// If `true`, the search can run on frozen indices. // API name: index_using_frozen func (r *Query) IndexUsingFrozen(indexusingfrozen bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexUsingFrozen = &indexusingfrozen return r } -// KeepAlive Retention period for an async or saved synchronous search. +// The retention period for an async or saved synchronous search. // API name: keep_alive -func (r *Query) KeepAlive(duration types.Duration) *Query { - r.req.KeepAlive = duration +func (r *Query) KeepAlive(duration types.DurationVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.KeepAlive = *duration.DurationCaster() return r } -// KeepOnCompletion If true, Elasticsearch stores synchronous searches if you also specify the -// wait_for_completion_timeout parameter. If false, Elasticsearch only stores -// async searches that don’t finish before the wait_for_completion_timeout. +// If `true`, Elasticsearch stores synchronous searches if you also specify the +// `wait_for_completion_timeout` parameter. +// If `false`, Elasticsearch only stores async searches that don't finish before +// the `wait_for_completion_timeout`. // API name: keep_on_completion func (r *Query) KeepOnCompletion(keeponcompletion bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.KeepOnCompletion = &keeponcompletion return r } -// PageTimeout The timeout before a pagination request fails. +// The minimum retention period for the scroll cursor. +// After this time period, a pagination request might fail because the scroll +// cursor is no longer available. +// Subsequent scroll requests prolong the lifetime of the scroll cursor by the +// duration of `page_timeout` in the scroll request. // API name: page_timeout -func (r *Query) PageTimeout(duration types.Duration) *Query { - r.req.PageTimeout = duration +func (r *Query) PageTimeout(duration types.DurationVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.PageTimeout = *duration.DurationCaster() return r } -// Params Values for parameters in the query. +// The values for parameters in the query. // API name: params func (r *Query) Params(params map[string]json.RawMessage) *Query { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Params = params + return r +} + +func (r *Query) AddParam(key string, value json.RawMessage) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Params == nil { + r.req.Params = make(map[string]json.RawMessage) + } else { + tmp = r.req.Params + } + tmp[key] = value + + r.req.Params = tmp return r } -// Query SQL query to run. +// The SQL query to run. // API name: query func (r *Query) Query(query string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Query = &query return r } -// RequestTimeout The timeout before the request fails. +// The timeout before the request fails. // API name: request_timeout -func (r *Query) RequestTimeout(duration types.Duration) *Query { - r.req.RequestTimeout = duration +func (r *Query) RequestTimeout(duration types.DurationVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RequestTimeout = *duration.DurationCaster() return r } -// RuntimeMappings Defines one or more runtime fields in the search request. These fields take -// precedence over mapped fields with the same name. +// One or more runtime fields for the search request. +// These fields take precedence over mapped fields with the same name. // API name: runtime_mappings -func (r *Query) RuntimeMappings(runtimefields types.RuntimeFields) *Query { - r.req.RuntimeMappings = runtimefields +func (r *Query) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } -// TimeZone ISO-8601 time zone ID for the search. +// The ISO-8601 time zone ID for the search. // API name: time_zone func (r *Query) TimeZone(timezone string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TimeZone = &timezone return r } -// WaitForCompletionTimeout Period to wait for complete results. Defaults to no timeout, meaning the -// request waits for complete search results. If the search doesn’t finish -// within this period, the search becomes async. +// The period to wait for complete results. +// It defaults to no timeout, meaning the request waits for complete search +// results. +// If the search doesn't finish within this period, the search becomes async. +// +// To save a synchronous search, you must specify this parameter and the +// `keep_on_completion` parameter. // API name: wait_for_completion_timeout -func (r *Query) WaitForCompletionTimeout(duration types.Duration) *Query { - r.req.WaitForCompletionTimeout = duration +func (r *Query) WaitForCompletionTimeout(duration types.DurationVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.WaitForCompletionTimeout = *duration.DurationCaster() return r } diff --git a/typedapi/sql/query/request.go b/typedapi/sql/query/request.go index decc4ffe3c..5506afc5d3 100644 --- a/typedapi/sql/query/request.go +++ b/typedapi/sql/query/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package query @@ -33,52 +33,68 @@ import ( // Request holds the request body struct for the package query // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/sql/query/QuerySqlRequest.ts#L28-L122 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/sql/query/QuerySqlRequest.ts#L28-L152 type Request struct { - // Catalog Default catalog (cluster) for queries. If unspecified, the queries execute on - // the data in the local cluster only. + // AllowPartialSearchResults If `true`, the response has partial results when there are shard request + // timeouts or shard failures. + // If `false`, the API returns an error with no partial results. + AllowPartialSearchResults *bool `json:"allow_partial_search_results,omitempty"` + // Catalog The default catalog (cluster) for queries. + // If unspecified, the queries execute on the data in the local cluster only. Catalog *string `json:"catalog,omitempty"` - // Columnar If true, the results in a columnar fashion: one row represents all the values - // of a certain column from the current page of results. + // Columnar If `true`, the results are in a columnar fashion: one row represents all the + // values of a certain column from the current page of results. + // The API supports this parameter only for CBOR, JSON, SMILE, and YAML + // responses. Columnar *bool `json:"columnar,omitempty"` - // Cursor Cursor used to retrieve a set of paginated results. + // Cursor The cursor used to retrieve a set of paginated results. // If you specify a cursor, the API only uses the `columnar` and `time_zone` // request body parameters. // It ignores other request body parameters. Cursor *string `json:"cursor,omitempty"` - // FetchSize The maximum number of rows (or entries) to return in one response + // FetchSize The maximum number of rows (or entries) to return in one response. FetchSize *int `json:"fetch_size,omitempty"` - // FieldMultiValueLeniency Throw an exception when encountering multiple values for a field (default) or - // be lenient and return the first value from the list (without any guarantees - // of what that will be - typically the first in natural ascending order). + // FieldMultiValueLeniency If `false`, the API returns an exception when encountering multiple values + // for a field. + // If `true`, the API is lenient and returns the first value from the array with + // no guarantee of consistent results. FieldMultiValueLeniency *bool `json:"field_multi_value_leniency,omitempty"` - // Filter Elasticsearch query DSL for additional filtering. + // Filter The Elasticsearch query DSL for additional filtering. Filter *types.Query `json:"filter,omitempty"` - // IndexUsingFrozen If true, the search can run on frozen indices. Defaults to false. + // IndexUsingFrozen If `true`, the search can run on frozen indices. IndexUsingFrozen *bool `json:"index_using_frozen,omitempty"` - // KeepAlive Retention period for an async or saved synchronous search. + // KeepAlive The retention period for an async or saved synchronous search. KeepAlive types.Duration `json:"keep_alive,omitempty"` - // KeepOnCompletion If true, Elasticsearch stores synchronous searches if you also specify the - // wait_for_completion_timeout parameter. If false, Elasticsearch only stores - // async searches that don’t finish before the wait_for_completion_timeout. + // KeepOnCompletion If `true`, Elasticsearch stores synchronous searches if you also specify the + // `wait_for_completion_timeout` parameter. + // If `false`, Elasticsearch only stores async searches that don't finish before + // the `wait_for_completion_timeout`. KeepOnCompletion *bool `json:"keep_on_completion,omitempty"` - // PageTimeout The timeout before a pagination request fails. + // PageTimeout The minimum retention period for the scroll cursor. + // After this time period, a pagination request might fail because the scroll + // cursor is no longer available. + // Subsequent scroll requests prolong the lifetime of the scroll cursor by the + // duration of `page_timeout` in the scroll request. PageTimeout types.Duration `json:"page_timeout,omitempty"` - // Params Values for parameters in the query. + // Params The values for parameters in the query. Params map[string]json.RawMessage `json:"params,omitempty"` - // Query SQL query to run. + // Query The SQL query to run. Query *string `json:"query,omitempty"` // RequestTimeout The timeout before the request fails. RequestTimeout types.Duration `json:"request_timeout,omitempty"` - // RuntimeMappings Defines one or more runtime fields in the search request. These fields take - // precedence over mapped fields with the same name. + // RuntimeMappings One or more runtime fields for the search request. + // These fields take precedence over mapped fields with the same name. RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` - // TimeZone ISO-8601 time zone ID for the search. + // TimeZone The ISO-8601 time zone ID for the search. TimeZone *string `json:"time_zone,omitempty"` - // WaitForCompletionTimeout Period to wait for complete results. Defaults to no timeout, meaning the - // request waits for complete search results. If the search doesn’t finish - // within this period, the search becomes async. + // WaitForCompletionTimeout The period to wait for complete results. + // It defaults to no timeout, meaning the request waits for complete search + // results. + // If the search doesn't finish within this period, the search becomes async. + // + // To save a synchronous search, you must specify this parameter and the + // `keep_on_completion` parameter. WaitForCompletionTimeout types.Duration `json:"wait_for_completion_timeout,omitempty"` } @@ -117,6 +133,20 @@ func (s *Request) UnmarshalJSON(data []byte) error { switch t { + case "allow_partial_search_results": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowPartialSearchResults", err) + } + s.AllowPartialSearchResults = &value + case bool: + s.AllowPartialSearchResults = &v + } + case "catalog": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { diff --git a/typedapi/sql/query/response.go b/typedapi/sql/query/response.go index ac7618fd0c..5473417199 100644 --- a/typedapi/sql/query/response.go +++ b/typedapi/sql/query/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package query @@ -28,35 +28,36 @@ import ( // Response holds the response body struct for the package query // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/sql/query/QuerySqlResponse.ts#L23-L60 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/sql/query/QuerySqlResponse.ts#L23-L60 type Response struct { // Columns Column headings for the search results. Each object is a column. Columns []types.Column `json:"columns,omitempty"` - // Cursor Cursor for the next set of paginated results. For CSV, TSV, and - // TXT responses, this value is returned in the `Cursor` HTTP header. + // Cursor The cursor for the next set of paginated results. + // For CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP + // header. Cursor *string `json:"cursor,omitempty"` - // Id Identifier for the search. This value is only returned for async and saved - // synchronous searches. For CSV, TSV, and TXT responses, this value is returned - // in the `Async-ID` HTTP header. + // Id The identifier for the search. + // This value is returned only for async and saved synchronous searches. + // For CSV, TSV, and TXT responses, this value is returned in the `Async-ID` + // HTTP header. Id *string `json:"id,omitempty"` - // IsPartial If `true`, the response does not contain complete search results. If - // `is_partial` - // is `true` and `is_running` is `true`, the search is still running. If - // `is_partial` - // is `true` but `is_running` is `false`, the results are partial due to a - // failure or - // timeout. This value is only returned for async and saved synchronous - // searches. + // IsPartial If `true`, the response does not contain complete search results. + // If `is_partial` is `true` and `is_running` is `true`, the search is still + // running. + // If `is_partial` is `true` but `is_running` is `false`, the results are + // partial due to a failure or timeout. + // This value is returned only for async and saved synchronous searches. // For CSV, TSV, and TXT responses, this value is returned in the // `Async-partial` HTTP header. IsPartial *bool `json:"is_partial,omitempty"` - // IsRunning If `true`, the search is still running. If false, the search has finished. - // This value is only returned for async and saved synchronous searches. For - // CSV, TSV, and TXT responses, this value is returned in the `Async-partial` - // HTTP header. + // IsRunning If `true`, the search is still running. + // If `false`, the search has finished. + // This value is returned only for async and saved synchronous searches. + // For CSV, TSV, and TXT responses, this value is returned in the + // `Async-partial` HTTP header. IsRunning *bool `json:"is_running,omitempty"` - // Rows Values for the search results. + // Rows The values for the search results. Rows [][]json.RawMessage `json:"rows"` } diff --git a/typedapi/sql/translate/request.go b/typedapi/sql/translate/request.go index 92d9d092a9..9ad889e55f 100644 --- a/typedapi/sql/translate/request.go +++ b/typedapi/sql/translate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package translate @@ -33,16 +33,16 @@ import ( // Request holds the request body struct for the package translate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/sql/translate/TranslateSqlRequest.ts#L25-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/sql/translate/TranslateSqlRequest.ts#L25-L65 type Request struct { // FetchSize The maximum number of rows (or entries) to return in one response. FetchSize *int `json:"fetch_size,omitempty"` - // Filter Elasticsearch query DSL for additional filtering. + // Filter The Elasticsearch query DSL for additional filtering. Filter *types.Query `json:"filter,omitempty"` - // Query SQL query to run. + // Query The SQL query to run. Query string `json:"query"` - // TimeZone ISO-8601 time zone ID for the search. + // TimeZone The ISO-8601 time zone ID for the search. TimeZone *string `json:"time_zone,omitempty"` } diff --git a/typedapi/sql/translate/response.go b/typedapi/sql/translate/response.go index 0e3ece5ada..81f78fcd9f 100644 --- a/typedapi/sql/translate/response.go +++ b/typedapi/sql/translate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package translate @@ -33,7 +33,7 @@ import ( // Response holds the response body struct for the package translate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/sql/translate/TranslateSqlResponse.ts#L27-L37 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/sql/translate/TranslateSqlResponse.ts#L27-L37 type Response struct { Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` Fields []types.FieldAndFormat `json:"fields,omitempty"` diff --git a/typedapi/sql/translate/translate.go b/typedapi/sql/translate/translate.go index f0be7303bf..50734219dc 100644 --- a/typedapi/sql/translate/translate.go +++ b/typedapi/sql/translate/translate.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Translates SQL into Elasticsearch queries +// Translate SQL into Elasticsearch queries. +// Translate an SQL search into a search API request containing Query DSL. +// It accepts the same request body parameters as the SQL search API, excluding +// `cursor`. package translate import ( @@ -73,9 +76,12 @@ func NewTranslateFunc(tp elastictransport.Interface) NewTranslate { } } -// Translates SQL into Elasticsearch queries +// Translate SQL into Elasticsearch queries. +// Translate an SQL search into a search API request containing Query DSL. +// It accepts the same request body parameters as the SQL search API, excluding +// `cursor`. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-translate-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate func New(tp elastictransport.Interface) *Translate { r := &Translate{ transport: tp, @@ -83,8 +89,6 @@ func New(tp elastictransport.Interface) *Translate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -336,35 +340,53 @@ func (r *Translate) Pretty(pretty bool) *Translate { return r } -// FetchSize The maximum number of rows (or entries) to return in one response. +// The maximum number of rows (or entries) to return in one response. // API name: fetch_size func (r *Translate) FetchSize(fetchsize int) *Translate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FetchSize = &fetchsize return r } -// Filter Elasticsearch query DSL for additional filtering. +// The Elasticsearch query DSL for additional filtering. // API name: filter -func (r *Translate) Filter(filter *types.Query) *Translate { +func (r *Translate) Filter(filter types.QueryVariant) *Translate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Filter = filter + r.req.Filter = filter.QueryCaster() return r } -// Query SQL query to run. +// The SQL query to run. // API name: query func (r *Translate) Query(query string) *Translate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Query = query return r } -// TimeZone ISO-8601 time zone ID for the search. +// The ISO-8601 time zone ID for the search. // API name: time_zone func (r *Translate) TimeZone(timezone string) *Translate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TimeZone = &timezone return r diff --git a/typedapi/ssl/certificates/certificates.go b/typedapi/ssl/certificates/certificates.go index 19443ca1a1..2581f72d48 100644 --- a/typedapi/ssl/certificates/certificates.go +++ b/typedapi/ssl/certificates/certificates.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get SSL certificates. // @@ -128,7 +128,7 @@ func NewCertificatesFunc(tp elastictransport.Interface) NewCertificates { // output includes all certificates in that store, even though some of the // certificates might not be in active use within the cluster. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-ssl.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ssl-certificates func New(tp elastictransport.Interface) *Certificates { r := &Certificates{ transport: tp, diff --git a/typedapi/ssl/certificates/response.go b/typedapi/ssl/certificates/response.go index 9326aee2a1..cbd7e2a785 100644 --- a/typedapi/ssl/certificates/response.go +++ b/typedapi/ssl/certificates/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package certificates @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package certificates // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ssl/certificates/GetCertificatesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ssl/certificates/GetCertificatesResponse.ts#L22-L24 type Response []types.CertificateInformation diff --git a/typedapi/synonyms/deletesynonym/delete_synonym.go b/typedapi/synonyms/deletesynonym/delete_synonym.go index 29b160536b..747f95befa 100644 --- a/typedapi/synonyms/deletesynonym/delete_synonym.go +++ b/typedapi/synonyms/deletesynonym/delete_synonym.go @@ -16,9 +16,34 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes a synonym set +// Delete a synonym set. +// +// You can only delete a synonyms set that is not in use by any index analyzer. +// +// Synonyms sets can be used in synonym graph token filters and synonym token +// filters. +// These synonym filters can be used as part of search analyzers. +// +// Analyzers need to be loaded when an index is restored (such as when a node +// starts, or the index becomes open). +// Even if the analyzer is not used on any field mapping, it still needs to be +// loaded on the index recovery phase. +// +// If any analyzers cannot be loaded, the index becomes unavailable and the +// cluster status becomes red or yellow as index shards are not available. +// To prevent that, synonyms sets that are used in analyzers can't be deleted. +// A delete request in this case will return a 400 response code. +// +// To remove a synonyms set, you must first remove all indices that contain +// analyzers using it. +// You can migrate an index by creating a new index that does not contain the +// token filter with the synonyms set, and use the reindex API in order to copy +// over the index data. +// Once finished, you can delete the index. +// When the synonyms set is not used in analyzers, you will be able to delete +// it. package deletesynonym import ( @@ -76,9 +101,34 @@ func NewDeleteSynonymFunc(tp elastictransport.Interface) NewDeleteSynonym { } } -// Deletes a synonym set +// Delete a synonym set. +// +// You can only delete a synonyms set that is not in use by any index analyzer. +// +// Synonyms sets can be used in synonym graph token filters and synonym token +// filters. +// These synonym filters can be used as part of search analyzers. +// +// Analyzers need to be loaded when an index is restored (such as when a node +// starts, or the index becomes open). +// Even if the analyzer is not used on any field mapping, it still needs to be +// loaded on the index recovery phase. +// +// If any analyzers cannot be loaded, the index becomes unavailable and the +// cluster status becomes red or yellow as index shards are not available. +// To prevent that, synonyms sets that are used in analyzers can't be deleted. +// A delete request in this case will return a 400 response code. +// +// To remove a synonyms set, you must first remove all indices that contain +// analyzers using it. +// You can migrate an index by creating a new index that does not contain the +// token filter with the synonyms set, and use the reindex API in order to copy +// over the index data. +// Once finished, you can delete the index. +// When the synonyms set is not used in analyzers, you will be able to delete +// it. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-synonyms-set.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym func New(tp elastictransport.Interface) *DeleteSynonym { r := &DeleteSynonym{ transport: tp, @@ -288,7 +338,7 @@ func (r *DeleteSynonym) Header(key, value string) *DeleteSynonym { return r } -// Id The id of the synonyms set to be deleted +// Id The synonyms set identifier to delete. // API Name: id func (r *DeleteSynonym) _id(id string) *DeleteSynonym { r.paramSet |= idMask diff --git a/typedapi/synonyms/deletesynonym/response.go b/typedapi/synonyms/deletesynonym/response.go index a8513e7b44..e056d23200 100644 --- a/typedapi/synonyms/deletesynonym/response.go +++ b/typedapi/synonyms/deletesynonym/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletesynonym // Response holds the response body struct for the package deletesynonym // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/synonyms/delete_synonym/SynonymsDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/synonyms/delete_synonym/SynonymsDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/synonyms/deletesynonymrule/delete_synonym_rule.go b/typedapi/synonyms/deletesynonymrule/delete_synonym_rule.go index e3636cde90..b132470042 100644 --- a/typedapi/synonyms/deletesynonymrule/delete_synonym_rule.go +++ b/typedapi/synonyms/deletesynonymrule/delete_synonym_rule.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deletes a synonym rule in a synonym set +// Delete a synonym rule. +// Delete a synonym rule from a synonym set. package deletesynonymrule import ( @@ -81,9 +82,10 @@ func NewDeleteSynonymRuleFunc(tp elastictransport.Interface) NewDeleteSynonymRul } } -// Deletes a synonym rule in a synonym set +// Delete a synonym rule. +// Delete a synonym rule from a synonym set. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-synonym-rule.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule func New(tp elastictransport.Interface) *DeleteSynonymRule { r := &DeleteSynonymRule{ transport: tp, @@ -305,7 +307,7 @@ func (r *DeleteSynonymRule) Header(key, value string) *DeleteSynonymRule { return r } -// SetId The id of the synonym set to be updated +// SetId The ID of the synonym set to update. // API Name: setid func (r *DeleteSynonymRule) _setid(setid string) *DeleteSynonymRule { r.paramSet |= setidMask @@ -314,7 +316,7 @@ func (r *DeleteSynonymRule) _setid(setid string) *DeleteSynonymRule { return r } -// RuleId The id of the synonym rule to be deleted +// RuleId The ID of the synonym rule to delete. // API Name: ruleid func (r *DeleteSynonymRule) _ruleid(ruleid string) *DeleteSynonymRule { r.paramSet |= ruleidMask diff --git a/typedapi/synonyms/deletesynonymrule/response.go b/typedapi/synonyms/deletesynonymrule/response.go index 59e67ba05d..39a6deeab5 100644 --- a/typedapi/synonyms/deletesynonymrule/response.go +++ b/typedapi/synonyms/deletesynonymrule/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletesynonymrule @@ -27,13 +27,13 @@ import ( // Response holds the response body struct for the package deletesynonymrule // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/synonyms/delete_synonym_rule/SynonymRuleDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/synonyms/delete_synonym_rule/SynonymRuleDeleteResponse.ts#L22-L24 type Response struct { // ReloadAnalyzersDetails Updating synonyms in a synonym set reloads the associated analyzers. - // This is the analyzers reloading result + // This information is the analyzers reloading result. ReloadAnalyzersDetails types.ReloadResult `json:"reload_analyzers_details"` - // Result Update operation result + // Result The update operation result. Result result.Result `json:"result"` } diff --git a/typedapi/synonyms/getsynonym/get_synonym.go b/typedapi/synonyms/getsynonym/get_synonym.go index e99b7dc320..afd5103b9e 100644 --- a/typedapi/synonyms/getsynonym/get_synonym.go +++ b/typedapi/synonyms/getsynonym/get_synonym.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves a synonym set +// Get a synonym set. package getsynonym import ( @@ -76,9 +76,9 @@ func NewGetSynonymFunc(tp elastictransport.Interface) NewGetSynonym { } } -// Retrieves a synonym set +// Get a synonym set. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-synonyms-set.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym func New(tp elastictransport.Interface) *GetSynonym { r := &GetSynonym{ transport: tp, @@ -288,7 +288,7 @@ func (r *GetSynonym) Header(key, value string) *GetSynonym { return r } -// Id "The id of the synonyms set to be retrieved +// Id The synonyms set identifier to retrieve. // API Name: id func (r *GetSynonym) _id(id string) *GetSynonym { r.paramSet |= idMask @@ -297,7 +297,7 @@ func (r *GetSynonym) _id(id string) *GetSynonym { return r } -// From Starting offset for query rules to be retrieved +// From The starting offset for query rules to retrieve. // API name: from func (r *GetSynonym) From(from int) *GetSynonym { r.values.Set("from", strconv.Itoa(from)) @@ -305,7 +305,7 @@ func (r *GetSynonym) From(from int) *GetSynonym { return r } -// Size specifies a max number of query rules to retrieve +// Size The max number of query rules to retrieve. // API name: size func (r *GetSynonym) Size(size int) *GetSynonym { r.values.Set("size", strconv.Itoa(size)) diff --git a/typedapi/synonyms/getsynonym/response.go b/typedapi/synonyms/getsynonym/response.go index 88a9c24449..dac15ae3ef 100644 --- a/typedapi/synonyms/getsynonym/response.go +++ b/typedapi/synonyms/getsynonym/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getsynonym @@ -26,9 +26,12 @@ import ( // Response holds the response body struct for the package getsynonym // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/synonyms/get_synonym/SynonymsGetResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/synonyms/get_synonym/SynonymsGetResponse.ts#L23-L34 type Response struct { - Count int `json:"count"` + + // Count The total number of synonyms rules that the synonyms set contains. + Count int `json:"count"` + // SynonymsSet Synonym rule details. SynonymsSet []types.SynonymRuleRead `json:"synonyms_set"` } diff --git a/typedapi/synonyms/getsynonymrule/get_synonym_rule.go b/typedapi/synonyms/getsynonymrule/get_synonym_rule.go index 0daafd65ad..a33fd75320 100644 --- a/typedapi/synonyms/getsynonymrule/get_synonym_rule.go +++ b/typedapi/synonyms/getsynonymrule/get_synonym_rule.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves a synonym rule from a synonym set +// Get a synonym rule. +// Get a synonym rule from a synonym set. package getsynonymrule import ( @@ -81,9 +82,10 @@ func NewGetSynonymRuleFunc(tp elastictransport.Interface) NewGetSynonymRule { } } -// Retrieves a synonym rule from a synonym set +// Get a synonym rule. +// Get a synonym rule from a synonym set. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-synonym-rule.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule func New(tp elastictransport.Interface) *GetSynonymRule { r := &GetSynonymRule{ transport: tp, @@ -305,7 +307,7 @@ func (r *GetSynonymRule) Header(key, value string) *GetSynonymRule { return r } -// SetId The id of the synonym set to retrieve the synonym rule from +// SetId The ID of the synonym set to retrieve the synonym rule from. // API Name: setid func (r *GetSynonymRule) _setid(setid string) *GetSynonymRule { r.paramSet |= setidMask @@ -314,7 +316,7 @@ func (r *GetSynonymRule) _setid(setid string) *GetSynonymRule { return r } -// RuleId The id of the synonym rule to retrieve +// RuleId The ID of the synonym rule to retrieve. // API Name: ruleid func (r *GetSynonymRule) _ruleid(ruleid string) *GetSynonymRule { r.paramSet |= ruleidMask diff --git a/typedapi/synonyms/getsynonymrule/response.go b/typedapi/synonyms/getsynonymrule/response.go index 70e17bd8b5..d51000c2bc 100644 --- a/typedapi/synonyms/getsynonymrule/response.go +++ b/typedapi/synonyms/getsynonymrule/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getsynonymrule // Response holds the response body struct for the package getsynonymrule // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/synonyms/get_synonym_rule/SynonymRuleGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/synonyms/get_synonym_rule/SynonymRuleGetResponse.ts#L22-L24 type Response struct { // Id Synonym Rule identifier diff --git a/typedapi/synonyms/getsynonymssets/get_synonyms_sets.go b/typedapi/synonyms/getsynonymssets/get_synonyms_sets.go index 8b196f0cb4..9dfcb0e20c 100644 --- a/typedapi/synonyms/getsynonymssets/get_synonyms_sets.go +++ b/typedapi/synonyms/getsynonymssets/get_synonyms_sets.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves a summary of all defined synonym sets +// Get all synonym sets. +// Get a summary of all defined synonym sets. package getsynonymssets import ( @@ -68,9 +69,10 @@ func NewGetSynonymsSetsFunc(tp elastictransport.Interface) NewGetSynonymsSets { } } -// Retrieves a summary of all defined synonym sets +// Get all synonym sets. +// Get a summary of all defined synonym sets. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/list-synonyms-sets.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym func New(tp elastictransport.Interface) *GetSynonymsSets { r := &GetSynonymsSets{ transport: tp, @@ -274,7 +276,7 @@ func (r *GetSynonymsSets) Header(key, value string) *GetSynonymsSets { return r } -// From Starting offset +// From The starting offset for synonyms sets to retrieve. // API name: from func (r *GetSynonymsSets) From(from int) *GetSynonymsSets { r.values.Set("from", strconv.Itoa(from)) @@ -282,7 +284,7 @@ func (r *GetSynonymsSets) From(from int) *GetSynonymsSets { return r } -// Size specifies a max number of results to get +// Size The maximum number of synonyms sets to retrieve. // API name: size func (r *GetSynonymsSets) Size(size int) *GetSynonymsSets { r.values.Set("size", strconv.Itoa(size)) diff --git a/typedapi/synonyms/getsynonymssets/response.go b/typedapi/synonyms/getsynonymssets/response.go index bbda42121c..759c22e12e 100644 --- a/typedapi/synonyms/getsynonymssets/response.go +++ b/typedapi/synonyms/getsynonymssets/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getsynonymssets @@ -26,9 +26,13 @@ import ( // Response holds the response body struct for the package getsynonymssets // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/synonyms/get_synonyms_sets/SynonymsSetsGetResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/synonyms/get_synonyms_sets/SynonymsSetsGetResponse.ts#L23-L34 type Response struct { - Count int `json:"count"` + + // Count The total number of synonyms sets defined. + Count int `json:"count"` + // Results The identifier and total number of defined synonym rules for each synonyms + // set. Results []types.SynonymsSetItem `json:"results"` } diff --git a/typedapi/synonyms/putsynonym/put_synonym.go b/typedapi/synonyms/putsynonym/put_synonym.go index 7d0398f310..468031273a 100644 --- a/typedapi/synonyms/putsynonym/put_synonym.go +++ b/typedapi/synonyms/putsynonym/put_synonym.go @@ -16,9 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Creates or updates a synonym set. +// Create or update a synonym set. +// Synonyms sets are limited to a maximum of 10,000 synonym rules per set. +// If you need to manage more synonym rules, you can create multiple synonym +// sets. +// +// When an existing synonyms set is updated, the search analyzers that use the +// synonyms set are reloaded automatically for all indices. +// This is equivalent to invoking the reload search analyzers API for all +// indices that use the synonyms set. package putsynonym import ( @@ -81,9 +89,17 @@ func NewPutSynonymFunc(tp elastictransport.Interface) NewPutSynonym { } } -// Creates or updates a synonym set. +// Create or update a synonym set. +// Synonyms sets are limited to a maximum of 10,000 synonym rules per set. +// If you need to manage more synonym rules, you can create multiple synonym +// sets. +// +// When an existing synonyms set is updated, the search analyzers that use the +// synonyms set are reloaded automatically for all indices. +// This is equivalent to invoking the reload search analyzers API for all +// indices that use the synonyms set. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-synonyms-set.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym func New(tp elastictransport.Interface) *PutSynonym { r := &PutSynonym{ transport: tp, @@ -91,8 +107,6 @@ func New(tp elastictransport.Interface) *PutSynonym { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -304,7 +318,7 @@ func (r *PutSynonym) Header(key, value string) *PutSynonym { return r } -// Id The id of the synonyms set to be created or updated +// Id The ID of the synonyms set to be created or updated. // API Name: id func (r *PutSynonym) _id(id string) *PutSynonym { r.paramSet |= idMask @@ -357,10 +371,17 @@ func (r *PutSynonym) Pretty(pretty bool) *PutSynonym { return r } -// SynonymsSet The synonym set information to update +// The synonym rules definitions for the synonyms set. // API name: synonyms_set -func (r *PutSynonym) SynonymsSet(synonymssets ...types.SynonymRule) *PutSynonym { - r.req.SynonymsSet = synonymssets +func (r *PutSynonym) SynonymsSet(synonymssets ...types.SynonymRuleVariant) *PutSynonym { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.SynonymsSet = make([]types.SynonymRule, len(synonymssets)) + for i, v := range synonymssets { + r.req.SynonymsSet[i] = *v.SynonymRuleCaster() + } return r } diff --git a/typedapi/synonyms/putsynonym/request.go b/typedapi/synonyms/putsynonym/request.go index f4e7d1a6eb..0fee0914f8 100644 --- a/typedapi/synonyms/putsynonym/request.go +++ b/typedapi/synonyms/putsynonym/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putsynonym @@ -32,10 +32,10 @@ import ( // Request holds the request body struct for the package putsynonym // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/synonyms/put_synonym/SynonymsPutRequest.ts#L23-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/synonyms/put_synonym/SynonymsPutRequest.ts#L23-L55 type Request struct { - // SynonymsSet The synonym set information to update + // SynonymsSet The synonym rules definitions for the synonyms set. SynonymsSet []types.SynonymRule `json:"synonyms_set"` } diff --git a/typedapi/synonyms/putsynonym/response.go b/typedapi/synonyms/putsynonym/response.go index bd2de0a98c..87ca504d1c 100644 --- a/typedapi/synonyms/putsynonym/response.go +++ b/typedapi/synonyms/putsynonym/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putsynonym @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package putsynonym // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/synonyms/put_synonym/SynonymsPutResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/synonyms/put_synonym/SynonymsPutResponse.ts#L23-L28 type Response struct { ReloadAnalyzersDetails types.ReloadResult `json:"reload_analyzers_details"` Result result.Result `json:"result"` diff --git a/typedapi/synonyms/putsynonymrule/put_synonym_rule.go b/typedapi/synonyms/putsynonymrule/put_synonym_rule.go index e4e199d3c1..4096ec8246 100644 --- a/typedapi/synonyms/putsynonymrule/put_synonym_rule.go +++ b/typedapi/synonyms/putsynonymrule/put_synonym_rule.go @@ -16,9 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Creates or updates a synonym rule in a synonym set +// Create or update a synonym rule. +// Create or update a synonym rule in a synonym set. +// +// If any of the synonym rules included is invalid, the API returns an error. +// +// When you update a synonym rule, all analyzers using the synonyms set will be +// reloaded automatically to reflect the new rule. package putsynonymrule import ( @@ -86,9 +92,15 @@ func NewPutSynonymRuleFunc(tp elastictransport.Interface) NewPutSynonymRule { } } -// Creates or updates a synonym rule in a synonym set +// Create or update a synonym rule. +// Create or update a synonym rule in a synonym set. +// +// If any of the synonym rules included is invalid, the API returns an error. +// +// When you update a synonym rule, all analyzers using the synonyms set will be +// reloaded automatically to reflect the new rule. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-synonym-rule.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym-rule func New(tp elastictransport.Interface) *PutSynonymRule { r := &PutSynonymRule{ transport: tp, @@ -96,8 +108,6 @@ func New(tp elastictransport.Interface) *PutSynonymRule { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -315,7 +325,7 @@ func (r *PutSynonymRule) Header(key, value string) *PutSynonymRule { return r } -// SetId The id of the synonym set to be updated with the synonym rule +// SetId The ID of the synonym set. // API Name: setid func (r *PutSynonymRule) _setid(setid string) *PutSynonymRule { r.paramSet |= setidMask @@ -324,7 +334,7 @@ func (r *PutSynonymRule) _setid(setid string) *PutSynonymRule { return r } -// RuleId The id of the synonym rule to be updated or created +// RuleId The ID of the synonym rule to be updated or created. // API Name: ruleid func (r *PutSynonymRule) _ruleid(ruleid string) *PutSynonymRule { r.paramSet |= ruleidMask @@ -377,8 +387,14 @@ func (r *PutSynonymRule) Pretty(pretty bool) *PutSynonymRule { return r } +// The synonym rule information definition, which must be in Solr format. // API name: synonyms func (r *PutSynonymRule) Synonyms(synonymstring string) *PutSynonymRule { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Synonyms = synonymstring return r diff --git a/typedapi/synonyms/putsynonymrule/request.go b/typedapi/synonyms/putsynonymrule/request.go index dae00f5f95..ff999ac6ee 100644 --- a/typedapi/synonyms/putsynonymrule/request.go +++ b/typedapi/synonyms/putsynonymrule/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putsynonymrule @@ -30,8 +30,10 @@ import ( // Request holds the request body struct for the package putsynonymrule // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/synonyms/put_synonym_rule/SynonymRulePutRequest.ts#L23-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/synonyms/put_synonym_rule/SynonymRulePutRequest.ts#L23-L60 type Request struct { + + // Synonyms The synonym rule information definition, which must be in Solr format. Synonyms string `json:"synonyms"` } diff --git a/typedapi/synonyms/putsynonymrule/response.go b/typedapi/synonyms/putsynonymrule/response.go index f3312920ae..2bd58037c9 100644 --- a/typedapi/synonyms/putsynonymrule/response.go +++ b/typedapi/synonyms/putsynonymrule/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putsynonymrule @@ -27,13 +27,13 @@ import ( // Response holds the response body struct for the package putsynonymrule // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/synonyms/put_synonym_rule/SynonymRulePutResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/synonyms/put_synonym_rule/SynonymRulePutResponse.ts#L22-L24 type Response struct { // ReloadAnalyzersDetails Updating synonyms in a synonym set reloads the associated analyzers. - // This is the analyzers reloading result + // This information is the analyzers reloading result. ReloadAnalyzersDetails types.ReloadResult `json:"reload_analyzers_details"` - // Result Update operation result + // Result The update operation result. Result result.Result `json:"result"` } diff --git a/typedapi/tasks/cancel/cancel.go b/typedapi/tasks/cancel/cancel.go index 31cc945920..704bee3b6a 100644 --- a/typedapi/tasks/cancel/cancel.go +++ b/typedapi/tasks/cancel/cancel.go @@ -16,9 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Cancels a task, if it can be cancelled through an API. +// Cancel a task. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// A task may continue to run for some time after it has been cancelled because +// it may not be able to safely stop its current activity straight away. +// It is also possible that Elasticsearch must complete its work on other tasks +// before it can process the cancellation. +// The get task information API will continue to list these cancelled tasks +// until they complete. +// The cancelled flag in the response indicates that the cancellation command +// has been processed and the task will stop as soon as possible. +// +// To troubleshoot why a cancelled task does not complete promptly, use the get +// task information API with the `?detailed` parameter to identify the other +// tasks the system is running. +// You can also use the node hot threads API to obtain detailed information +// about the work the system is doing instead of completing the cancelled task. package cancel import ( @@ -74,9 +93,28 @@ func NewCancelFunc(tp elastictransport.Interface) NewCancel { } } -// Cancels a task, if it can be cancelled through an API. +// Cancel a task. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html +// A task may continue to run for some time after it has been cancelled because +// it may not be able to safely stop its current activity straight away. +// It is also possible that Elasticsearch must complete its work on other tasks +// before it can process the cancellation. +// The get task information API will continue to list these cancelled tasks +// until they complete. +// The cancelled flag in the response indicates that the cancellation command +// has been processed and the task will stop as soon as possible. +// +// To troubleshoot why a cancelled task does not complete promptly, use the get +// task information API with the `?detailed` parameter to identify the other +// tasks the system is running. +// You can also use the node hot threads API to obtain detailed information +// about the work the system is doing instead of completing the cancelled task. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks func New(tp elastictransport.Interface) *Cancel { r := &Cancel{ transport: tp, @@ -295,7 +333,7 @@ func (r *Cancel) Header(key, value string) *Cancel { return r } -// TaskId ID of the task. +// TaskId The task identifier. // API Name: taskid func (r *Cancel) TaskId(taskid string) *Cancel { r.paramSet |= taskidMask @@ -304,8 +342,8 @@ func (r *Cancel) TaskId(taskid string) *Cancel { return r } -// Actions Comma-separated list or wildcard expression of actions used to limit the -// request. +// Actions A comma-separated list or wildcard expression of actions that is used to +// limit the request. // API name: actions func (r *Cancel) Actions(actions ...string) *Cancel { tmp := []string{} @@ -317,7 +355,8 @@ func (r *Cancel) Actions(actions ...string) *Cancel { return r } -// Nodes Comma-separated list of node IDs or names used to limit the request. +// Nodes A comma-separated list of node IDs or names that is used to limit the +// request. // API name: nodes func (r *Cancel) Nodes(nodes ...string) *Cancel { tmp := []string{} @@ -329,7 +368,7 @@ func (r *Cancel) Nodes(nodes ...string) *Cancel { return r } -// ParentTaskId Parent task ID used to limit the tasks. +// ParentTaskId A parent task ID that is used to limit the tasks. // API name: parent_task_id func (r *Cancel) ParentTaskId(parenttaskid string) *Cancel { r.values.Set("parent_task_id", parenttaskid) @@ -337,8 +376,7 @@ func (r *Cancel) ParentTaskId(parenttaskid string) *Cancel { return r } -// WaitForCompletion Should the request block until the cancellation of the task and its -// descendant tasks is completed. Defaults to false +// WaitForCompletion If true, the request blocks until all found tasks are complete. // API name: wait_for_completion func (r *Cancel) WaitForCompletion(waitforcompletion bool) *Cancel { r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) diff --git a/typedapi/tasks/cancel/response.go b/typedapi/tasks/cancel/response.go index 252bb119ac..df417ac452 100644 --- a/typedapi/tasks/cancel/response.go +++ b/typedapi/tasks/cancel/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package cancel @@ -32,7 +32,7 @@ import ( // Response holds the response body struct for the package cancel // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/tasks/cancel/CancelTasksResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/tasks/cancel/CancelTasksResponse.ts#L22-L24 type Response struct { NodeFailures []types.ErrorCause `json:"node_failures,omitempty"` // Nodes Task information grouped by node, if `group_by` was set to `node` (the diff --git a/typedapi/tasks/get/get.go b/typedapi/tasks/get/get.go index db2d7dc66e..da5220a12d 100644 --- a/typedapi/tasks/get/get.go +++ b/typedapi/tasks/get/get.go @@ -16,10 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get task information. -// Returns information about the tasks currently executing in the cluster. +// Get information about a task currently running in the cluster. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// If the task identifier is not found, a 404 response code indicates that there +// are no resources that match the request. package get import ( @@ -78,9 +85,16 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { } // Get task information. -// Returns information about the tasks currently executing in the cluster. +// Get information about a task currently running in the cluster. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// If the task identifier is not found, a 404 response code indicates that there +// are no resources that match the request. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks func New(tp elastictransport.Interface) *Get { r := &Get{ transport: tp, @@ -290,7 +304,7 @@ func (r *Get) Header(key, value string) *Get { return r } -// TaskId ID of the task. +// TaskId The task identifier. // API Name: taskid func (r *Get) _taskid(taskid string) *Get { r.paramSet |= taskidMask @@ -299,7 +313,7 @@ func (r *Get) _taskid(taskid string) *Get { return r } -// Timeout Period to wait for a response. +// Timeout The period to wait for a response. // If no response is received before the timeout expires, the request fails and // returns an error. // API name: timeout diff --git a/typedapi/tasks/get/response.go b/typedapi/tasks/get/response.go index c12bbcf1f9..af921cb96f 100644 --- a/typedapi/tasks/get/response.go +++ b/typedapi/tasks/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package get @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/tasks/get/GetTaskResponse.ts#L24-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/tasks/get/GetTaskResponse.ts#L24-L31 type Response struct { Completed bool `json:"completed"` Error *types.ErrorCause `json:"error,omitempty"` diff --git a/typedapi/tasks/list/list.go b/typedapi/tasks/list/list.go index 47ecff3dce..6fa541473c 100644 --- a/typedapi/tasks/list/list.go +++ b/typedapi/tasks/list/list.go @@ -16,10 +16,77 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// The task management API returns information about tasks currently executing -// on one or more nodes in the cluster. +// Get all tasks. +// Get information about the tasks currently running on one or more nodes in the +// cluster. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// **Identifying running tasks** +// +// The `X-Opaque-Id header`, when provided on the HTTP request header, is going +// to be returned as a header in the response as well as in the headers field +// for in the task information. +// This enables you to track certain calls or associate certain tasks with the +// client that started them. +// For example: +// +// ``` +// curl -i -H "X-Opaque-Id: 123456" +// "http://localhost:9200/_tasks?group_by=parents" +// ``` +// +// The API returns the following result: +// +// ``` +// HTTP/1.1 200 OK +// X-Opaque-Id: 123456 +// content-type: application/json; charset=UTF-8 +// content-length: 831 +// +// { +// "tasks" : { +// "u5lcZHqcQhu-rUoFaqDphA:45" : { +// "node" : "u5lcZHqcQhu-rUoFaqDphA", +// "id" : 45, +// "type" : "transport", +// "action" : "cluster:monitor/tasks/lists", +// "start_time_in_millis" : 1513823752749, +// "running_time_in_nanos" : 293139, +// "cancellable" : false, +// "headers" : { +// "X-Opaque-Id" : "123456" +// }, +// "children" : [ +// { +// "node" : "u5lcZHqcQhu-rUoFaqDphA", +// "id" : 46, +// "type" : "direct", +// "action" : "cluster:monitor/tasks/lists[n]", +// "start_time_in_millis" : 1513823752750, +// "running_time_in_nanos" : 92133, +// "cancellable" : false, +// "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", +// "headers" : { +// "X-Opaque-Id" : "123456" +// } +// } +// ] +// } +// } +// } +// +// ``` +// In this example, `X-Opaque-Id: 123456` is the ID as a part of the response +// header. +// The `X-Opaque-Id` in the task `headers` is the ID for the task that was +// initiated by the REST request. +// The `X-Opaque-Id` in the children `headers` is the child task of the task +// that was initiated by the REST request. package list import ( @@ -70,10 +137,77 @@ func NewListFunc(tp elastictransport.Interface) NewList { } } -// The task management API returns information about tasks currently executing -// on one or more nodes in the cluster. +// Get all tasks. +// Get information about the tasks currently running on one or more nodes in the +// cluster. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// **Identifying running tasks** +// +// The `X-Opaque-Id header`, when provided on the HTTP request header, is going +// to be returned as a header in the response as well as in the headers field +// for in the task information. +// This enables you to track certain calls or associate certain tasks with the +// client that started them. +// For example: +// +// ``` +// curl -i -H "X-Opaque-Id: 123456" +// "http://localhost:9200/_tasks?group_by=parents" +// ``` // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html +// The API returns the following result: +// +// ``` +// HTTP/1.1 200 OK +// X-Opaque-Id: 123456 +// content-type: application/json; charset=UTF-8 +// content-length: 831 +// +// { +// "tasks" : { +// "u5lcZHqcQhu-rUoFaqDphA:45" : { +// "node" : "u5lcZHqcQhu-rUoFaqDphA", +// "id" : 45, +// "type" : "transport", +// "action" : "cluster:monitor/tasks/lists", +// "start_time_in_millis" : 1513823752749, +// "running_time_in_nanos" : 293139, +// "cancellable" : false, +// "headers" : { +// "X-Opaque-Id" : "123456" +// }, +// "children" : [ +// { +// "node" : "u5lcZHqcQhu-rUoFaqDphA", +// "id" : 46, +// "type" : "direct", +// "action" : "cluster:monitor/tasks/lists[n]", +// "start_time_in_millis" : 1513823752750, +// "running_time_in_nanos" : 92133, +// "cancellable" : false, +// "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", +// "headers" : { +// "X-Opaque-Id" : "123456" +// } +// } +// ] +// } +// } +// } +// +// ``` +// In this example, `X-Opaque-Id: 123456` is the ID as a part of the response +// header. +// The `X-Opaque-Id` in the task `headers` is the ID for the task that was +// initiated by the REST request. +// The `X-Opaque-Id` in the children `headers` is the child task of the task +// that was initiated by the REST request. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks func New(tp elastictransport.Interface) *List { r := &List{ transport: tp, @@ -277,8 +411,9 @@ func (r *List) Header(key, value string) *List { return r } -// Actions Comma-separated list or wildcard expression of actions used to limit the +// Actions A comma-separated list or wildcard expression of actions used to limit the // request. +// For example, you can use `cluser:*` to retrieve all cluster-related tasks. // API name: actions func (r *List) Actions(actions ...string) *List { tmp := []string{} @@ -290,7 +425,10 @@ func (r *List) Actions(actions ...string) *List { return r } -// Detailed If `true`, the response includes detailed information about shard recoveries. +// Detailed If `true`, the response includes detailed information about the running +// tasks. +// This information is useful to distinguish tasks from each other but is more +// costly to run. // API name: detailed func (r *List) Detailed(detailed bool) *List { r.values.Set("detailed", strconv.FormatBool(detailed)) @@ -298,7 +436,8 @@ func (r *List) Detailed(detailed bool) *List { return r } -// GroupBy Key used to group tasks in the response. +// GroupBy A key that is used to group tasks in the response. +// The task lists can be grouped either by nodes or by parent tasks. // API name: group_by func (r *List) GroupBy(groupby groupby.GroupBy) *List { r.values.Set("group_by", groupby.String()) @@ -306,20 +445,18 @@ func (r *List) GroupBy(groupby groupby.GroupBy) *List { return r } -// NodeId Comma-separated list of node IDs or names used to limit returned information. -// API name: node_id -func (r *List) NodeId(nodeids ...string) *List { - tmp := []string{} - for _, item := range nodeids { - tmp = append(tmp, fmt.Sprintf("%v", item)) - } - r.values.Set("node_id", strings.Join(tmp, ",")) +// Nodes A comma-separated list of node IDs or names that is used to limit the +// returned information. +// API name: nodes +func (r *List) Nodes(nodeids ...string) *List { + r.values.Set("nodes", strings.Join(nodeids, ",")) return r } -// ParentTaskId Parent task ID used to limit returned information. To return all tasks, omit -// this parameter or use a value of `-1`. +// ParentTaskId A parent task identifier that is used to limit returned information. +// To return all tasks, omit this parameter or use a value of `-1`. +// If the parent task is not found, the API does not return a 404 response code. // API name: parent_task_id func (r *List) ParentTaskId(id string) *List { r.values.Set("parent_task_id", id) @@ -327,17 +464,10 @@ func (r *List) ParentTaskId(id string) *List { return r } -// MasterTimeout Period to wait for a connection to the master node. If no response is -// received before the timeout expires, the request fails and returns an error. -// API name: master_timeout -func (r *List) MasterTimeout(duration string) *List { - r.values.Set("master_timeout", duration) - - return r -} - -// Timeout Period to wait for a response. If no response is received before the timeout -// expires, the request fails and returns an error. +// Timeout The period to wait for each node to respond. +// If a node does not respond before its timeout expires, the response does not +// include its information. +// However, timed out nodes are included in the `node_failures` property. // API name: timeout func (r *List) Timeout(duration string) *List { r.values.Set("timeout", duration) diff --git a/typedapi/tasks/list/response.go b/typedapi/tasks/list/response.go index 41a8167bef..3472bf3a09 100644 --- a/typedapi/tasks/list/response.go +++ b/typedapi/tasks/list/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package list @@ -32,7 +32,7 @@ import ( // Response holds the response body struct for the package list // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/tasks/list/ListTasksResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/tasks/list/ListTasksResponse.ts#L22-L24 type Response struct { NodeFailures []types.ErrorCause `json:"node_failures,omitempty"` // Nodes Task information grouped by node, if `group_by` was set to `node` (the diff --git a/typedapi/textstructure/findfieldstructure/find_field_structure.go b/typedapi/textstructure/findfieldstructure/find_field_structure.go index 495eb134ad..a34d67c4c3 100644 --- a/typedapi/textstructure/findfieldstructure/find_field_structure.go +++ b/typedapi/textstructure/findfieldstructure/find_field_structure.go @@ -16,21 +16,52 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Finds the structure of a text field in an index. +// Find the structure of a text field. +// Find the structure of a text field in an Elasticsearch index. +// +// This API provides a starting point for extracting further information from +// log messages already ingested into Elasticsearch. +// For example, if you have ingested data into a very simple index that has just +// `@timestamp` and message fields, you can use this API to see what common +// structure exists in the message field. +// +// The response from the API contains: +// +// * Sample messages. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// * Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// +// If the structure finder produces unexpected results, specify the `explain` +// query parameter and an explanation will appear in the response. +// It helps determine why the returned structure was chosen. package findfieldstructure import ( "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ecscompatibilitytype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/formattype" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -65,9 +96,35 @@ func NewFindFieldStructureFunc(tp elastictransport.Interface) NewFindFieldStruct } } -// Finds the structure of a text field in an index. +// Find the structure of a text field. +// Find the structure of a text field in an Elasticsearch index. +// +// This API provides a starting point for extracting further information from +// log messages already ingested into Elasticsearch. +// For example, if you have ingested data into a very simple index that has just +// `@timestamp` and message fields, you can use this API to see what common +// structure exists in the message field. +// +// The response from the API contains: +// +// * Sample messages. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// * Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/find-field-structure.html +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// +// If the structure finder produces unexpected results, specify the `explain` +// query parameter and an explanation will appear in the response. +// It helps determine why the returned structure was chosen. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-text_structure func New(tp elastictransport.Interface) *FindFieldStructure { r := &FindFieldStructure{ transport: tp, @@ -174,8 +231,57 @@ func (r FindFieldStructure) Perform(providedCtx context.Context) (*http.Response } // Do runs the request through the transport, handle the response and returns a findfieldstructure.Response -func (r FindFieldStructure) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) +func (r FindFieldStructure) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "text_structure.find_field_structure") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. @@ -223,3 +329,268 @@ func (r *FindFieldStructure) Header(key, value string) *FindFieldStructure { return r } + +// ColumnNames If `format` is set to `delimited`, you can specify the column names in a +// comma-separated list. +// If this parameter is not specified, the structure finder uses the column +// names from the header row of the text. +// If the text does not have a header row, columns are named "column1", +// "column2", "column3", for example. +// API name: column_names +func (r *FindFieldStructure) ColumnNames(columnnames string) *FindFieldStructure { + r.values.Set("column_names", columnnames) + + return r +} + +// Delimiter If you have set `format` to `delimited`, you can specify the character used +// to delimit the values in each row. +// Only a single character is supported; the delimiter cannot have multiple +// characters. +// By default, the API considers the following possibilities: comma, tab, +// semi-colon, and pipe (`|`). +// In this default scenario, all rows must have the same number of fields for +// the delimited format to be detected. +// If you specify a delimiter, up to 10% of the rows can have a different number +// of columns than the first row. +// API name: delimiter +func (r *FindFieldStructure) Delimiter(delimiter string) *FindFieldStructure { + r.values.Set("delimiter", delimiter) + + return r +} + +// DocumentsToSample The number of documents to include in the structural analysis. +// The minimum value is 2. +// API name: documents_to_sample +func (r *FindFieldStructure) DocumentsToSample(documentstosample string) *FindFieldStructure { + r.values.Set("documents_to_sample", documentstosample) + + return r +} + +// EcsCompatibility The mode of compatibility with ECS compliant Grok patterns. +// Use this parameter to specify whether to use ECS Grok patterns instead of +// legacy ones when the structure finder creates a Grok pattern. +// This setting primarily has an impact when a whole message Grok pattern such +// as `%{CATALINALOG}` matches the input. +// If the structure finder identifies a common structure but has no idea of the +// meaning then generic field names such as `path`, `ipaddress`, `field1`, and +// `field2` are used in the `grok_pattern` output. +// The intention in that situation is that a user who knows the meanings will +// rename the fields before using them. +// API name: ecs_compatibility +func (r *FindFieldStructure) EcsCompatibility(ecscompatibility ecscompatibilitytype.EcsCompatibilityType) *FindFieldStructure { + r.values.Set("ecs_compatibility", ecscompatibility.String()) + + return r +} + +// Explain If `true`, the response includes a field named `explanation`, which is an +// array of strings that indicate how the structure finder produced its result. +// API name: explain +func (r *FindFieldStructure) Explain(explain bool) *FindFieldStructure { + r.values.Set("explain", strconv.FormatBool(explain)) + + return r +} + +// Field The field that should be analyzed. +// API name: field +func (r *FindFieldStructure) Field(field string) *FindFieldStructure { + r.values.Set("field", field) + + return r +} + +// Format The high level structure of the text. +// By default, the API chooses the format. +// In this default scenario, all rows must have the same number of fields for a +// delimited format to be detected. +// If the format is set to delimited and the delimiter is not set, however, the +// API tolerates up to 5% of rows that have a different number of columns than +// the first row. +// API name: format +func (r *FindFieldStructure) Format(format formattype.FormatType) *FindFieldStructure { + r.values.Set("format", format.String()) + + return r +} + +// GrokPattern If the format is `semi_structured_text`, you can specify a Grok pattern that +// is used to extract fields from every message in the text. +// The name of the timestamp field in the Grok pattern must match what is +// specified in the `timestamp_field` parameter. +// If that parameter is not specified, the name of the timestamp field in the +// Grok pattern must match "timestamp". +// If `grok_pattern` is not specified, the structure finder creates a Grok +// pattern. +// API name: grok_pattern +func (r *FindFieldStructure) GrokPattern(grokpattern string) *FindFieldStructure { + r.values.Set("grok_pattern", grokpattern) + + return r +} + +// Index The name of the index that contains the analyzed field. +// API name: index +func (r *FindFieldStructure) Index(indexname string) *FindFieldStructure { + r.values.Set("index", indexname) + + return r +} + +// Quote If the format is `delimited`, you can specify the character used to quote the +// values in each row if they contain newlines or the delimiter character. +// Only a single character is supported. +// If this parameter is not specified, the default value is a double quote +// (`"`). +// If your delimited text format does not use quoting, a workaround is to set +// this argument to a character that does not appear anywhere in the sample. +// API name: quote +func (r *FindFieldStructure) Quote(quote string) *FindFieldStructure { + r.values.Set("quote", quote) + + return r +} + +// ShouldTrimFields If the format is `delimited`, you can specify whether values between +// delimiters should have whitespace trimmed from them. +// If this parameter is not specified and the delimiter is pipe (`|`), the +// default value is true. +// Otherwise, the default value is `false`. +// API name: should_trim_fields +func (r *FindFieldStructure) ShouldTrimFields(shouldtrimfields bool) *FindFieldStructure { + r.values.Set("should_trim_fields", strconv.FormatBool(shouldtrimfields)) + + return r +} + +// Timeout The maximum amount of time that the structure analysis can take. +// If the analysis is still running when the timeout expires, it will be +// stopped. +// API name: timeout +func (r *FindFieldStructure) Timeout(duration string) *FindFieldStructure { + r.values.Set("timeout", duration) + + return r +} + +// TimestampField The name of the field that contains the primary timestamp of each record in +// the text. +// In particular, if the text was ingested into an index, this is the field that +// would be used to populate the `@timestamp` field. +// +// If the format is `semi_structured_text`, this field must match the name of +// the appropriate extraction in the `grok_pattern`. +// Therefore, for semi-structured text, it is best not to specify this parameter +// unless `grok_pattern` is also specified. +// +// For structured text, if you specify this parameter, the field must exist +// within the text. +// +// If this parameter is not specified, the structure finder makes a decision +// about which field (if any) is the primary timestamp field. +// For structured text, it is not compulsory to have a timestamp in the text. +// API name: timestamp_field +func (r *FindFieldStructure) TimestampField(field string) *FindFieldStructure { + r.values.Set("timestamp_field", field) + + return r +} + +// TimestampFormat The Java time format of the timestamp field in the text. +// Only a subset of Java time format letter groups are supported: +// +// * `a` +// * `d` +// * `dd` +// * `EEE` +// * `EEEE` +// * `H` +// * `HH` +// * `h` +// * `M` +// * `MM` +// * `MMM` +// * `MMMM` +// * `mm` +// * `ss` +// * `XX` +// * `XXX` +// * `yy` +// * `yyyy` +// * `zzz` +// +// Additionally `S` letter groups (fractional seconds) of length one to nine are +// supported providing they occur after `ss` and are separated from the `ss` by +// a period (`.`), comma (`,`), or colon (`:`). +// Spacing and punctuation is also permitted with the exception a question mark +// (`?`), newline, and carriage return, together with literal text enclosed in +// single quotes. +// For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. +// +// One valuable use case for this parameter is when the format is +// semi-structured text, there are multiple timestamp formats in the text, and +// you know which format corresponds to the primary timestamp, but you do not +// want to specify the full `grok_pattern`. +// Another is when the timestamp format is one that the structure finder does +// not consider by default. +// +// If this parameter is not specified, the structure finder chooses the best +// format from a built-in set. +// +// If the special value `null` is specified, the structure finder will not look +// for a primary timestamp in the text. +// When the format is semi-structured text, this will result in the structure +// finder treating the text as single-line messages. +// API name: timestamp_format +func (r *FindFieldStructure) TimestampFormat(timestampformat string) *FindFieldStructure { + r.values.Set("timestamp_format", timestampformat) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *FindFieldStructure) ErrorTrace(errortrace bool) *FindFieldStructure { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *FindFieldStructure) FilterPath(filterpaths ...string) *FindFieldStructure { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *FindFieldStructure) Human(human bool) *FindFieldStructure { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *FindFieldStructure) Pretty(pretty bool) *FindFieldStructure { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/textstructure/findfieldstructure/response.go b/typedapi/textstructure/findfieldstructure/response.go new file mode 100644 index 0000000000..66aa6b53b2 --- /dev/null +++ b/typedapi/textstructure/findfieldstructure/response.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package findfieldstructure + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ecscompatibilitytype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/formattype" +) + +// Response holds the response body struct for the package findfieldstructure +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/text_structure/find_field_structure/FindFieldStructureResponse.ts#L31-L49 +type Response struct { + Charset string `json:"charset"` + EcsCompatibility *ecscompatibilitytype.EcsCompatibilityType `json:"ecs_compatibility,omitempty"` + FieldStats map[string]types.FieldStat `json:"field_stats"` + Format formattype.FormatType `json:"format"` + GrokPattern *string `json:"grok_pattern,omitempty"` + IngestPipeline types.PipelineConfig `json:"ingest_pipeline"` + JavaTimestampFormats []string `json:"java_timestamp_formats,omitempty"` + JodaTimestampFormats []string `json:"joda_timestamp_formats,omitempty"` + Mappings types.TypeMapping `json:"mappings"` + MultilineStartPattern *string `json:"multiline_start_pattern,omitempty"` + NeedClientTimezone bool `json:"need_client_timezone"` + NumLinesAnalyzed int `json:"num_lines_analyzed"` + NumMessagesAnalyzed int `json:"num_messages_analyzed"` + SampleStart string `json:"sample_start"` + TimestampField *string `json:"timestamp_field,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + FieldStats: make(map[string]types.FieldStat, 0), + } + return r +} diff --git a/typedapi/textstructure/findmessagestructure/find_message_structure.go b/typedapi/textstructure/findmessagestructure/find_message_structure.go index a42d5ac962..9942a34021 100644 --- a/typedapi/textstructure/findmessagestructure/find_message_structure.go +++ b/typedapi/textstructure/findmessagestructure/find_message_structure.go @@ -16,22 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Finds the structure of a list of messages. The messages must contain data -// that is suitable to be ingested into Elasticsearch. +// Find the structure of text messages. +// Find the structure of a list of text messages. +// The messages must contain data that is suitable to be ingested into +// Elasticsearch. +// +// This API provides a starting point for ingesting data into Elasticsearch in a +// format that is suitable for subsequent use with other Elastic Stack +// functionality. +// Use this API rather than the find text structure API if your input text has +// already been split up into separate messages by some other process. +// +// The response from the API contains: +// +// * Sample messages. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// +// If the structure finder produces unexpected results, specify the `explain` +// query parameter and an explanation will appear in the response. +// It helps determine why the returned structure was chosen. package findmessagestructure import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ecscompatibilitytype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/formattype" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -46,6 +79,10 @@ type FindMessageStructure struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -66,15 +103,44 @@ func NewFindMessageStructureFunc(tp elastictransport.Interface) NewFindMessageSt } } -// Finds the structure of a list of messages. The messages must contain data -// that is suitable to be ingested into Elasticsearch. +// Find the structure of text messages. +// Find the structure of a list of text messages. +// The messages must contain data that is suitable to be ingested into +// Elasticsearch. +// +// This API provides a starting point for ingesting data into Elasticsearch in a +// format that is suitable for subsequent use with other Elastic Stack +// functionality. +// Use this API rather than the find text structure API if your input text has +// already been split up into separate messages by some other process. +// +// The response from the API contains: // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/find-message-structure.html +// * Sample messages. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// +// If the structure finder produces unexpected results, specify the `explain` +// query parameter and an explanation will appear in the response. +// It helps determine why the returned structure was chosen. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure func New(tp elastictransport.Interface) *FindMessageStructure { r := &FindMessageStructure{ transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -86,6 +152,21 @@ func New(tp elastictransport.Interface) *FindMessageStructure { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *FindMessageStructure) Raw(raw io.Reader) *FindMessageStructure { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *FindMessageStructure) Request(req *Request) *FindMessageStructure { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *FindMessageStructure) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -95,6 +176,31 @@ func (r *FindMessageStructure) HttpRequest(ctx context.Context) (*http.Request, var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for FindMessageStructure: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -182,13 +288,7 @@ func (r FindMessageStructure) Perform(providedCtx context.Context) (*http.Respon } // Do runs the request through the transport, handle the response and returns a findmessagestructure.Response -func (r FindMessageStructure) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r FindMessageStructure) IsSuccess(providedCtx context.Context) (bool, error) { +func (r FindMessageStructure) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -199,30 +299,46 @@ func (r FindMessageStructure) IsSuccess(providedCtx context.Context) (bool, erro ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the FindMessageStructure query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode } - return false, nil + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the FindMessageStructure headers map. @@ -231,3 +347,258 @@ func (r *FindMessageStructure) Header(key, value string) *FindMessageStructure { return r } + +// ColumnNames If the format is `delimited`, you can specify the column names in a +// comma-separated list. +// If this parameter is not specified, the structure finder uses the column +// names from the header row of the text. +// If the text does not have a header role, columns are named "column1", +// "column2", "column3", for example. +// API name: column_names +func (r *FindMessageStructure) ColumnNames(columnnames string) *FindMessageStructure { + r.values.Set("column_names", columnnames) + + return r +} + +// Delimiter If you the format is `delimited`, you can specify the character used to +// delimit the values in each row. +// Only a single character is supported; the delimiter cannot have multiple +// characters. +// By default, the API considers the following possibilities: comma, tab, +// semi-colon, and pipe (`|`). +// In this default scenario, all rows must have the same number of fields for +// the delimited format to be detected. +// If you specify a delimiter, up to 10% of the rows can have a different number +// of columns than the first row. +// API name: delimiter +func (r *FindMessageStructure) Delimiter(delimiter string) *FindMessageStructure { + r.values.Set("delimiter", delimiter) + + return r +} + +// EcsCompatibility The mode of compatibility with ECS compliant Grok patterns. +// Use this parameter to specify whether to use ECS Grok patterns instead of +// legacy ones when the structure finder creates a Grok pattern. +// This setting primarily has an impact when a whole message Grok pattern such +// as `%{CATALINALOG}` matches the input. +// If the structure finder identifies a common structure but has no idea of +// meaning then generic field names such as `path`, `ipaddress`, `field1`, and +// `field2` are used in the `grok_pattern` output, with the intention that a +// user who knows the meanings rename these fields before using it. +// API name: ecs_compatibility +func (r *FindMessageStructure) EcsCompatibility(ecscompatibility ecscompatibilitytype.EcsCompatibilityType) *FindMessageStructure { + r.values.Set("ecs_compatibility", ecscompatibility.String()) + + return r +} + +// Explain If this parameter is set to true, the response includes a field named +// `explanation`, which is an array of strings that indicate how the structure +// finder produced its result. +// API name: explain +func (r *FindMessageStructure) Explain(explain bool) *FindMessageStructure { + r.values.Set("explain", strconv.FormatBool(explain)) + + return r +} + +// Format The high level structure of the text. +// By default, the API chooses the format. +// In this default scenario, all rows must have the same number of fields for a +// delimited format to be detected. +// If the format is `delimited` and the delimiter is not set, however, the API +// tolerates up to 5% of rows that have a different number of columns than the +// first row. +// API name: format +func (r *FindMessageStructure) Format(format formattype.FormatType) *FindMessageStructure { + r.values.Set("format", format.String()) + + return r +} + +// GrokPattern If the format is `semi_structured_text`, you can specify a Grok pattern that +// is used to extract fields from every message in the text. +// The name of the timestamp field in the Grok pattern must match what is +// specified in the `timestamp_field` parameter. +// If that parameter is not specified, the name of the timestamp field in the +// Grok pattern must match "timestamp". +// If `grok_pattern` is not specified, the structure finder creates a Grok +// pattern. +// API name: grok_pattern +func (r *FindMessageStructure) GrokPattern(grokpattern string) *FindMessageStructure { + r.values.Set("grok_pattern", grokpattern) + + return r +} + +// Quote If the format is `delimited`, you can specify the character used to quote the +// values in each row if they contain newlines or the delimiter character. +// Only a single character is supported. +// If this parameter is not specified, the default value is a double quote +// (`"`). +// If your delimited text format does not use quoting, a workaround is to set +// this argument to a character that does not appear anywhere in the sample. +// API name: quote +func (r *FindMessageStructure) Quote(quote string) *FindMessageStructure { + r.values.Set("quote", quote) + + return r +} + +// ShouldTrimFields If the format is `delimited`, you can specify whether values between +// delimiters should have whitespace trimmed from them. +// If this parameter is not specified and the delimiter is pipe (`|`), the +// default value is true. +// Otherwise, the default value is `false`. +// API name: should_trim_fields +func (r *FindMessageStructure) ShouldTrimFields(shouldtrimfields bool) *FindMessageStructure { + r.values.Set("should_trim_fields", strconv.FormatBool(shouldtrimfields)) + + return r +} + +// Timeout The maximum amount of time that the structure analysis can take. +// If the analysis is still running when the timeout expires, it will be +// stopped. +// API name: timeout +func (r *FindMessageStructure) Timeout(duration string) *FindMessageStructure { + r.values.Set("timeout", duration) + + return r +} + +// TimestampField The name of the field that contains the primary timestamp of each record in +// the text. +// In particular, if the text was ingested into an index, this is the field that +// would be used to populate the `@timestamp` field. +// +// If the format is `semi_structured_text`, this field must match the name of +// the appropriate extraction in the `grok_pattern`. +// Therefore, for semi-structured text, it is best not to specify this parameter +// unless `grok_pattern` is also specified. +// +// For structured text, if you specify this parameter, the field must exist +// within the text. +// +// If this parameter is not specified, the structure finder makes a decision +// about which field (if any) is the primary timestamp field. +// For structured text, it is not compulsory to have a timestamp in the text. +// API name: timestamp_field +func (r *FindMessageStructure) TimestampField(field string) *FindMessageStructure { + r.values.Set("timestamp_field", field) + + return r +} + +// TimestampFormat The Java time format of the timestamp field in the text. +// Only a subset of Java time format letter groups are supported: +// +// * `a` +// * `d` +// * `dd` +// * `EEE` +// * `EEEE` +// * `H` +// * `HH` +// * `h` +// * `M` +// * `MM` +// * `MMM` +// * `MMMM` +// * `mm` +// * `ss` +// * `XX` +// * `XXX` +// * `yy` +// * `yyyy` +// * `zzz` +// +// Additionally `S` letter groups (fractional seconds) of length one to nine are +// supported providing they occur after `ss` and are separated from the `ss` by +// a period (`.`), comma (`,`), or colon (`:`). +// Spacing and punctuation is also permitted with the exception a question mark +// (`?`), newline, and carriage return, together with literal text enclosed in +// single quotes. +// For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. +// +// One valuable use case for this parameter is when the format is +// semi-structured text, there are multiple timestamp formats in the text, and +// you know which format corresponds to the primary timestamp, but you do not +// want to specify the full `grok_pattern`. +// Another is when the timestamp format is one that the structure finder does +// not consider by default. +// +// If this parameter is not specified, the structure finder chooses the best +// format from a built-in set. +// +// If the special value `null` is specified, the structure finder will not look +// for a primary timestamp in the text. +// When the format is semi-structured text, this will result in the structure +// finder treating the text as single-line messages. +// API name: timestamp_format +func (r *FindMessageStructure) TimestampFormat(timestampformat string) *FindMessageStructure { + r.values.Set("timestamp_format", timestampformat) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *FindMessageStructure) ErrorTrace(errortrace bool) *FindMessageStructure { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *FindMessageStructure) FilterPath(filterpaths ...string) *FindMessageStructure { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *FindMessageStructure) Human(human bool) *FindMessageStructure { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *FindMessageStructure) Pretty(pretty bool) *FindMessageStructure { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The list of messages you want to analyze. +// API name: messages +func (r *FindMessageStructure) Messages(messages ...string) *FindMessageStructure { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range messages { + + r.req.Messages = append(r.req.Messages, v) + + } + return r +} diff --git a/typedapi/textstructure/findmessagestructure/request.go b/typedapi/textstructure/findmessagestructure/request.go new file mode 100644 index 0000000000..4b0b3ac8ec --- /dev/null +++ b/typedapi/textstructure/findmessagestructure/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package findmessagestructure + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package findmessagestructure +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/text_structure/find_message_structure/FindMessageStructureRequest.ts#L25-L174 +type Request struct { + + // Messages The list of messages you want to analyze. + Messages []string `json:"messages"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Findmessagestructure request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/textstructure/findmessagestructure/response.go b/typedapi/textstructure/findmessagestructure/response.go new file mode 100644 index 0000000000..258629a418 --- /dev/null +++ b/typedapi/textstructure/findmessagestructure/response.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package findmessagestructure + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ecscompatibilitytype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/formattype" +) + +// Response holds the response body struct for the package findmessagestructure +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/text_structure/find_message_structure/FindMessageStructureResponse.ts#L31-L49 +type Response struct { + Charset string `json:"charset"` + EcsCompatibility *ecscompatibilitytype.EcsCompatibilityType `json:"ecs_compatibility,omitempty"` + FieldStats map[string]types.FieldStat `json:"field_stats"` + Format formattype.FormatType `json:"format"` + GrokPattern *string `json:"grok_pattern,omitempty"` + IngestPipeline types.PipelineConfig `json:"ingest_pipeline"` + JavaTimestampFormats []string `json:"java_timestamp_formats,omitempty"` + JodaTimestampFormats []string `json:"joda_timestamp_formats,omitempty"` + Mappings types.TypeMapping `json:"mappings"` + MultilineStartPattern *string `json:"multiline_start_pattern,omitempty"` + NeedClientTimezone bool `json:"need_client_timezone"` + NumLinesAnalyzed int `json:"num_lines_analyzed"` + NumMessagesAnalyzed int `json:"num_messages_analyzed"` + SampleStart string `json:"sample_start"` + TimestampField *string `json:"timestamp_field,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + FieldStats: make(map[string]types.FieldStat, 0), + } + return r +} diff --git a/typedapi/textstructure/findstructure/find_structure.go b/typedapi/textstructure/findstructure/find_structure.go index ab86bd6474..fcf9ef3152 100644 --- a/typedapi/textstructure/findstructure/find_structure.go +++ b/typedapi/textstructure/findstructure/find_structure.go @@ -16,10 +16,35 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Finds the structure of a text file. The text file must contain data that is -// suitable to be ingested into Elasticsearch. +// Find the structure of a text file. +// The text file must contain data that is suitable to be ingested into +// Elasticsearch. +// +// This API provides a starting point for ingesting data into Elasticsearch in a +// format that is suitable for subsequent use with other Elastic Stack +// functionality. +// Unlike other Elasticsearch endpoints, the data that is posted to this +// endpoint does not need to be UTF-8 encoded and in JSON format. +// It must, however, be text; binary text formats are not currently supported. +// The size is limited to the Elasticsearch HTTP receive buffer size, which +// defaults to 100 Mb. +// +// The response from the API contains: +// +// * A couple of messages from the beginning of the text. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// * Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. package findstructure import ( @@ -74,10 +99,35 @@ func NewFindStructureFunc(tp elastictransport.Interface) NewFindStructure { } } -// Finds the structure of a text file. The text file must contain data that is -// suitable to be ingested into Elasticsearch. +// Find the structure of a text file. +// The text file must contain data that is suitable to be ingested into +// Elasticsearch. +// +// This API provides a starting point for ingesting data into Elasticsearch in a +// format that is suitable for subsequent use with other Elastic Stack +// functionality. +// Unlike other Elasticsearch endpoints, the data that is posted to this +// endpoint does not need to be UTF-8 encoded and in JSON format. +// It must, however, be text; binary text formats are not currently supported. +// The size is limited to the Elasticsearch HTTP receive buffer size, which +// defaults to 100 Mb. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.html +// The response from the API contains: +// +// * A couple of messages from the beginning of the text. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// * Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure func New(tp elastictransport.Interface) *FindStructure { r := &FindStructure{ transport: tp, @@ -85,8 +135,6 @@ func New(tp elastictransport.Interface) *FindStructure { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -299,9 +347,11 @@ func (r *FindStructure) Header(key, value string) *FindStructure { return r } -// Charset The text’s character set. It must be a character set that is supported by the -// JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE, windows-1252, or -// EUC-JP. If this parameter is not specified, the structure finder chooses an +// Charset The text's character set. +// It must be a character set that is supported by the JVM that Elasticsearch +// uses. +// For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. +// If this parameter is not specified, the structure finder chooses an // appropriate character set. // API name: charset func (r *FindStructure) Charset(charset string) *FindStructure { @@ -310,11 +360,12 @@ func (r *FindStructure) Charset(charset string) *FindStructure { return r } -// ColumnNames If you have set format to delimited, you can specify the column names in a -// comma-separated list. If this parameter is not specified, the structure -// finder uses the column names from the header row of the text. If the text -// does not have a header role, columns are named "column1", "column2", -// "column3", etc. +// ColumnNames If you have set format to `delimited`, you can specify the column names in a +// comma-separated list. +// If this parameter is not specified, the structure finder uses the column +// names from the header row of the text. +// If the text does not have a header role, columns are named "column1", +// "column2", "column3", for example. // API name: column_names func (r *FindStructure) ColumnNames(columnnames string) *FindStructure { r.values.Set("column_names", columnnames) @@ -322,13 +373,16 @@ func (r *FindStructure) ColumnNames(columnnames string) *FindStructure { return r } -// Delimiter If you have set format to delimited, you can specify the character used to -// delimit the values in each row. Only a single character is supported; the -// delimiter cannot have multiple characters. By default, the API considers the -// following possibilities: comma, tab, semi-colon, and pipe (|). In this -// default scenario, all rows must have the same number of fields for the -// delimited format to be detected. If you specify a delimiter, up to 10% of the -// rows can have a different number of columns than the first row. +// Delimiter If you have set `format` to `delimited`, you can specify the character used +// to delimit the values in each row. +// Only a single character is supported; the delimiter cannot have multiple +// characters. +// By default, the API considers the following possibilities: comma, tab, +// semi-colon, and pipe (`|`). +// In this default scenario, all rows must have the same number of fields for +// the delimited format to be detected. +// If you specify a delimiter, up to 10% of the rows can have a different number +// of columns than the first row. // API name: delimiter func (r *FindStructure) Delimiter(delimiter string) *FindStructure { r.values.Set("delimiter", delimiter) @@ -336,8 +390,16 @@ func (r *FindStructure) Delimiter(delimiter string) *FindStructure { return r } -// EcsCompatibility The mode of compatibility with ECS compliant Grok patterns (disabled or v1, -// default: disabled). +// EcsCompatibility The mode of compatibility with ECS compliant Grok patterns. +// Use this parameter to specify whether to use ECS Grok patterns instead of +// legacy ones when the structure finder creates a Grok pattern. +// Valid values are `disabled` and `v1`. +// This setting primarily has an impact when a whole message Grok pattern such +// as `%{CATALINALOG}` matches the input. +// If the structure finder identifies a common structure but has no idea of +// meaning then generic field names such as `path`, `ipaddress`, `field1`, and +// `field2` are used in the `grok_pattern` output, with the intention that a +// user who knows the meanings rename these fields before using it. // API name: ecs_compatibility func (r *FindStructure) EcsCompatibility(ecscompatibility string) *FindStructure { r.values.Set("ecs_compatibility", ecscompatibility) @@ -345,9 +407,11 @@ func (r *FindStructure) EcsCompatibility(ecscompatibility string) *FindStructure return r } -// Explain If this parameter is set to true, the response includes a field named +// Explain If this parameter is set to `true`, the response includes a field named // explanation, which is an array of strings that indicate how the structure // finder produced its result. +// If the structure finder produces unexpected results for some text, use this +// query parameter to help you determine why the returned structure was chosen. // API name: explain func (r *FindStructure) Explain(explain bool) *FindStructure { r.values.Set("explain", strconv.FormatBool(explain)) @@ -355,12 +419,14 @@ func (r *FindStructure) Explain(explain bool) *FindStructure { return r } -// Format The high level structure of the text. Valid values are ndjson, xml, -// delimited, and semi_structured_text. By default, the API chooses the format. +// Format The high level structure of the text. +// Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. +// By default, the API chooses the format. // In this default scenario, all rows must have the same number of fields for a -// delimited format to be detected. If the format is set to delimited and the -// delimiter is not set, however, the API tolerates up to 5% of rows that have a -// different number of columns than the first row. +// delimited format to be detected. +// If the format is set to `delimited` and the delimiter is not set, however, +// the API tolerates up to 5% of rows that have a different number of columns +// than the first row. // API name: format func (r *FindStructure) Format(format string) *FindStructure { r.values.Set("format", format) @@ -368,12 +434,14 @@ func (r *FindStructure) Format(format string) *FindStructure { return r } -// GrokPattern If you have set format to semi_structured_text, you can specify a Grok -// pattern that is used to extract fields from every message in the text. The -// name of the timestamp field in the Grok pattern must match what is specified -// in the timestamp_field parameter. If that parameter is not specified, the -// name of the timestamp field in the Grok pattern must match "timestamp". If -// grok_pattern is not specified, the structure finder creates a Grok pattern. +// GrokPattern If you have set `format` to `semi_structured_text`, you can specify a Grok +// pattern that is used to extract fields from every message in the text. +// The name of the timestamp field in the Grok pattern must match what is +// specified in the `timestamp_field` parameter. +// If that parameter is not specified, the name of the timestamp field in the +// Grok pattern must match "timestamp". +// If `grok_pattern` is not specified, the structure finder creates a Grok +// pattern. // API name: grok_pattern func (r *FindStructure) GrokPattern(grokpattern string) *FindStructure { r.values.Set("grok_pattern", grokpattern) @@ -381,10 +449,10 @@ func (r *FindStructure) GrokPattern(grokpattern string) *FindStructure { return r } -// HasHeaderRow If you have set format to delimited, you can use this parameter to indicate -// whether the column names are in the first row of the text. If this parameter -// is not specified, the structure finder guesses based on the similarity of the -// first row of the text to other rows. +// HasHeaderRow If you have set `format` to `delimited`, you can use this parameter to +// indicate whether the column names are in the first row of the text. +// If this parameter is not specified, the structure finder guesses based on the +// similarity of the first row of the text to other rows. // API name: has_header_row func (r *FindStructure) HasHeaderRow(hasheaderrow bool) *FindStructure { r.values.Set("has_header_row", strconv.FormatBool(hasheaderrow)) @@ -393,10 +461,10 @@ func (r *FindStructure) HasHeaderRow(hasheaderrow bool) *FindStructure { } // LineMergeSizeLimit The maximum number of characters in a message when lines are merged to form -// messages while analyzing semi-structured text. If you have extremely long -// messages you may need to increase this, but be aware that this may lead to -// very long processing times if the way to group lines into messages is -// misdetected. +// messages while analyzing semi-structured text. +// If you have extremely long messages you may need to increase this, but be +// aware that this may lead to very long processing times if the way to group +// lines into messages is misdetected. // API name: line_merge_size_limit func (r *FindStructure) LineMergeSizeLimit(linemergesizelimit string) *FindStructure { r.values.Set("line_merge_size_limit", linemergesizelimit) @@ -405,9 +473,20 @@ func (r *FindStructure) LineMergeSizeLimit(linemergesizelimit string) *FindStruc } // LinesToSample The number of lines to include in the structural analysis, starting from the -// beginning of the text. The minimum is 2; If the value of this parameter is -// greater than the number of lines in the text, the analysis proceeds (as long -// as there are at least two lines in the text) for all of the lines. +// beginning of the text. +// The minimum is 2. +// If the value of this parameter is greater than the number of lines in the +// text, the analysis proceeds (as long as there are at least two lines in the +// text) for all of the lines. +// +// NOTE: The number of lines and the variation of the lines affects the speed of +// the analysis. +// For example, if you upload text where the first 1000 lines are all variations +// on the same message, the analysis will find more commonality than would be +// seen with a bigger sample. +// If possible, however, it is more efficient to upload sample text with more +// variety in the first 1000 lines than to request analysis of 100000 lines to +// achieve some variety. // API name: lines_to_sample func (r *FindStructure) LinesToSample(linestosample string) *FindStructure { r.values.Set("lines_to_sample", linestosample) @@ -415,12 +494,14 @@ func (r *FindStructure) LinesToSample(linestosample string) *FindStructure { return r } -// Quote If you have set format to delimited, you can specify the character used to -// quote the values in each row if they contain newlines or the delimiter -// character. Only a single character is supported. If this parameter is not -// specified, the default value is a double quote ("). If your delimited text -// format does not use quoting, a workaround is to set this argument to a -// character that does not appear anywhere in the sample. +// Quote If you have set `format` to `delimited`, you can specify the character used +// to quote the values in each row if they contain newlines or the delimiter +// character. +// Only a single character is supported. +// If this parameter is not specified, the default value is a double quote +// (`"`). +// If your delimited text format does not use quoting, a workaround is to set +// this argument to a character that does not appear anywhere in the sample. // API name: quote func (r *FindStructure) Quote(quote string) *FindStructure { r.values.Set("quote", quote) @@ -428,10 +509,11 @@ func (r *FindStructure) Quote(quote string) *FindStructure { return r } -// ShouldTrimFields If you have set format to delimited, you can specify whether values between -// delimiters should have whitespace trimmed from them. If this parameter is not -// specified and the delimiter is pipe (|), the default value is true. -// Otherwise, the default value is false. +// ShouldTrimFields If you have set `format` to `delimited`, you can specify whether values +// between delimiters should have whitespace trimmed from them. +// If this parameter is not specified and the delimiter is pipe (`|`), the +// default value is `true`. +// Otherwise, the default value is `false`. // API name: should_trim_fields func (r *FindStructure) ShouldTrimFields(shouldtrimfields bool) *FindStructure { r.values.Set("should_trim_fields", strconv.FormatBool(shouldtrimfields)) @@ -439,8 +521,9 @@ func (r *FindStructure) ShouldTrimFields(shouldtrimfields bool) *FindStructure { return r } -// Timeout Sets the maximum amount of time that the structure analysis make take. If the -// analysis is still running when the timeout expires then it will be aborted. +// Timeout The maximum amount of time that the structure analysis can take. +// If the analysis is still running when the timeout expires then it will be +// stopped. // API name: timeout func (r *FindStructure) Timeout(duration string) *FindStructure { r.values.Set("timeout", duration) @@ -448,7 +531,22 @@ func (r *FindStructure) Timeout(duration string) *FindStructure { return r } -// TimestampField Optional parameter to specify the timestamp field in the file +// TimestampField The name of the field that contains the primary timestamp of each record in +// the text. +// In particular, if the text were ingested into an index, this is the field +// that would be used to populate the `@timestamp` field. +// +// If the `format` is `semi_structured_text`, this field must match the name of +// the appropriate extraction in the `grok_pattern`. +// Therefore, for semi-structured text, it is best not to specify this parameter +// unless `grok_pattern` is also specified. +// +// For structured text, if you specify this parameter, the field must exist +// within the text. +// +// If this parameter is not specified, the structure finder makes a decision +// about which field (if any) is the primary timestamp field. +// For structured text, it is not compulsory to have a timestamp in the text. // API name: timestamp_field func (r *FindStructure) TimestampField(field string) *FindStructure { r.values.Set("timestamp_field", field) @@ -457,6 +555,50 @@ func (r *FindStructure) TimestampField(field string) *FindStructure { } // TimestampFormat The Java time format of the timestamp field in the text. +// +// Only a subset of Java time format letter groups are supported: +// +// * `a` +// * `d` +// * `dd` +// * `EEE` +// * `EEEE` +// * `H` +// * `HH` +// * `h` +// * `M` +// * `MM` +// * `MMM` +// * `MMMM` +// * `mm` +// * `ss` +// * `XX` +// * `XXX` +// * `yy` +// * `yyyy` +// * `zzz` +// +// Additionally `S` letter groups (fractional seconds) of length one to nine are +// supported providing they occur after `ss` and separated from the `ss` by a +// `.`, `,` or `:`. +// Spacing and punctuation is also permitted with the exception of `?`, newline +// and carriage return, together with literal text enclosed in single quotes. +// For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. +// +// One valuable use case for this parameter is when the format is +// semi-structured text, there are multiple timestamp formats in the text, and +// you know which format corresponds to the primary timestamp, but you do not +// want to specify the full `grok_pattern`. +// Another is when the timestamp format is one that the structure finder does +// not consider by default. +// +// If this parameter is not specified, the structure finder chooses the best +// format from a built-in set. +// +// If the special value `null` is specified the structure finder will not look +// for a primary timestamp in the text. +// When the format is semi-structured text this will result in the structure +// finder treating the text as single-line messages. // API name: timestamp_format func (r *FindStructure) TimestampFormat(timestampformat string) *FindStructure { r.values.Set("timestamp_format", timestampformat) diff --git a/typedapi/textstructure/findstructure/request.go b/typedapi/textstructure/findstructure/request.go index 20b797cc54..5cf9518c74 100644 --- a/typedapi/textstructure/findstructure/request.go +++ b/typedapi/textstructure/findstructure/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package findstructure @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package findstructure // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/text_structure/find_structure/FindStructureRequest.ts#L24-L75 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/text_structure/find_structure/FindStructureRequest.ts#L24-L207 type Request = []json.RawMessage // NewRequest returns a Request diff --git a/typedapi/textstructure/findstructure/response.go b/typedapi/textstructure/findstructure/response.go index 362735784e..3c421388e6 100644 --- a/typedapi/textstructure/findstructure/response.go +++ b/typedapi/textstructure/findstructure/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package findstructure @@ -26,30 +26,58 @@ import ( // Response holds the response body struct for the package findstructure // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/text_structure/find_structure/FindStructureResponse.ts#L27-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/text_structure/find_structure/FindStructureResponse.ts#L27-L97 type Response struct { - Charset string `json:"charset"` - ColumnNames []string `json:"column_names,omitempty"` - Delimiter *string `json:"delimiter,omitempty"` - ExcludeLinesPattern *string `json:"exclude_lines_pattern,omitempty"` - Explanation []string `json:"explanation,omitempty"` - FieldStats map[string]types.FieldStat `json:"field_stats"` - Format string `json:"format"` - GrokPattern *string `json:"grok_pattern,omitempty"` - HasByteOrderMarker bool `json:"has_byte_order_marker"` - HasHeaderRow *bool `json:"has_header_row,omitempty"` - IngestPipeline types.PipelineConfig `json:"ingest_pipeline"` - JavaTimestampFormats []string `json:"java_timestamp_formats,omitempty"` - JodaTimestampFormats []string `json:"joda_timestamp_formats,omitempty"` - Mappings types.TypeMapping `json:"mappings"` - MultilineStartPattern *string `json:"multiline_start_pattern,omitempty"` - NeedClientTimezone bool `json:"need_client_timezone"` - NumLinesAnalyzed int `json:"num_lines_analyzed"` - NumMessagesAnalyzed int `json:"num_messages_analyzed"` - Quote *string `json:"quote,omitempty"` - SampleStart string `json:"sample_start"` - ShouldTrimFields *bool `json:"should_trim_fields,omitempty"` - TimestampField *string `json:"timestamp_field,omitempty"` + + // Charset The character encoding used to parse the text. + Charset string `json:"charset"` + // ColumnNames If `format` is `delimited`, the `column_names` field lists the column names + // in the order they appear in the sample. + ColumnNames []string `json:"column_names,omitempty"` + Delimiter *string `json:"delimiter,omitempty"` + ExcludeLinesPattern *string `json:"exclude_lines_pattern,omitempty"` + Explanation []string `json:"explanation,omitempty"` + // FieldStats The most common values of each field, plus basic numeric statistics for the + // numeric `page_count` field. + // This information may provide clues that the data needs to be cleaned or + // transformed prior to use by other Elastic Stack functionality. + FieldStats map[string]types.FieldStat `json:"field_stats"` + // Format Valid values include `ndjson`, `xml`, `delimited`, and + // `semi_structured_text`. + Format string `json:"format"` + GrokPattern *string `json:"grok_pattern,omitempty"` + // HasByteOrderMarker For UTF character encodings, it indicates whether the text begins with a byte + // order marker. + HasByteOrderMarker bool `json:"has_byte_order_marker"` + HasHeaderRow *bool `json:"has_header_row,omitempty"` + IngestPipeline types.PipelineConfig `json:"ingest_pipeline"` + // JavaTimestampFormats The Java time formats recognized in the time fields. + // Elasticsearch mappings and ingest pipelines use this format. + JavaTimestampFormats []string `json:"java_timestamp_formats,omitempty"` + // JodaTimestampFormats Information that is used to tell Logstash how to parse timestamps. + JodaTimestampFormats []string `json:"joda_timestamp_formats,omitempty"` + // Mappings Some suitable mappings for an index into which the data could be ingested. + Mappings types.TypeMapping `json:"mappings"` + MultilineStartPattern *string `json:"multiline_start_pattern,omitempty"` + // NeedClientTimezone If a timestamp format is detected that does not include a timezone, + // `need_client_timezone` is `true`. + // The server that parses the text must therefore be told the correct timezone + // by the client. + NeedClientTimezone bool `json:"need_client_timezone"` + // NumLinesAnalyzed The number of lines of the text that were analyzed. + NumLinesAnalyzed int `json:"num_lines_analyzed"` + // NumMessagesAnalyzed The number of distinct messages the lines contained. + // For NDJSON, this value is the same as `num_lines_analyzed`. + // For other text formats, messages can span several lines. + NumMessagesAnalyzed int `json:"num_messages_analyzed"` + Quote *string `json:"quote,omitempty"` + // SampleStart The first two messages in the text verbatim. + // This may help diagnose parse errors or accidental uploads of the wrong text. + SampleStart string `json:"sample_start"` + ShouldTrimFields *bool `json:"should_trim_fields,omitempty"` + // TimestampField The field considered most likely to be the primary timestamp of each + // document. + TimestampField *string `json:"timestamp_field,omitempty"` } // NewResponse returns a Response diff --git a/typedapi/textstructure/testgrokpattern/request.go b/typedapi/textstructure/testgrokpattern/request.go index bb28a3e029..4f894486d7 100644 --- a/typedapi/textstructure/testgrokpattern/request.go +++ b/typedapi/textstructure/testgrokpattern/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package testgrokpattern @@ -30,12 +30,12 @@ import ( // Request holds the request body struct for the package testgrokpattern // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/text_structure/test_grok_pattern/TestGrokPatternRequest.ts#L23-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/text_structure/test_grok_pattern/TestGrokPatternRequest.ts#L23-L59 type Request struct { - // GrokPattern Grok pattern to run on the text. + // GrokPattern The Grok pattern to run on the text. GrokPattern string `json:"grok_pattern"` - // Text Lines of text to run the Grok pattern on. + // Text The lines of text to run the Grok pattern on. Text []string `json:"text"` } diff --git a/typedapi/textstructure/testgrokpattern/response.go b/typedapi/textstructure/testgrokpattern/response.go index 4c472179ca..59ed1eefb5 100644 --- a/typedapi/textstructure/testgrokpattern/response.go +++ b/typedapi/textstructure/testgrokpattern/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package testgrokpattern @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package testgrokpattern // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/text_structure/test_grok_pattern/TestGrokPatternResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/text_structure/test_grok_pattern/TestGrokPatternResponse.ts#L22-L26 type Response struct { Matches []types.MatchedText `json:"matches"` } diff --git a/typedapi/textstructure/testgrokpattern/test_grok_pattern.go b/typedapi/textstructure/testgrokpattern/test_grok_pattern.go index e4e260322e..778be7f4c3 100644 --- a/typedapi/textstructure/testgrokpattern/test_grok_pattern.go +++ b/typedapi/textstructure/testgrokpattern/test_grok_pattern.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Tests a Grok pattern on some text. +// Test a Grok pattern. +// Test a Grok pattern on one or more lines of text. +// The API indicates whether the lines match the pattern together with the +// offsets and lengths of the matched substrings. package testgrokpattern import ( @@ -73,9 +76,12 @@ func NewTestGrokPatternFunc(tp elastictransport.Interface) NewTestGrokPattern { } } -// Tests a Grok pattern on some text. +// Test a Grok pattern. +// Test a Grok pattern on one or more lines of text. +// The API indicates whether the lines match the pattern together with the +// offsets and lengths of the matched substrings. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/test-grok-pattern.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern func New(tp elastictransport.Interface) *TestGrokPattern { r := &TestGrokPattern{ transport: tp, @@ -83,8 +89,6 @@ func New(tp elastictransport.Interface) *TestGrokPattern { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -292,8 +296,10 @@ func (r *TestGrokPattern) Header(key, value string) *TestGrokPattern { return r } -// EcsCompatibility The mode of compatibility with ECS compliant Grok patterns (disabled or v1, -// default: disabled). +// EcsCompatibility The mode of compatibility with ECS compliant Grok patterns. +// Use this parameter to specify whether to use ECS Grok patterns instead of +// legacy ones when the structure finder creates a Grok pattern. +// Valid values are `disabled` and `v1`. // API name: ecs_compatibility func (r *TestGrokPattern) EcsCompatibility(ecscompatibility string) *TestGrokPattern { r.values.Set("ecs_compatibility", ecscompatibility) @@ -345,18 +351,30 @@ func (r *TestGrokPattern) Pretty(pretty bool) *TestGrokPattern { return r } -// GrokPattern Grok pattern to run on the text. +// The Grok pattern to run on the text. // API name: grok_pattern func (r *TestGrokPattern) GrokPattern(grokpattern string) *TestGrokPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.GrokPattern = grokpattern return r } -// Text Lines of text to run the Grok pattern on. +// The lines of text to run the Grok pattern on. // API name: text func (r *TestGrokPattern) Text(texts ...string) *TestGrokPattern { - r.req.Text = texts + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range texts { + + r.req.Text = append(r.req.Text, v) + } return r } diff --git a/typedapi/transform/deletetransform/delete_transform.go b/typedapi/transform/deletetransform/delete_transform.go index f77b6c4ed8..2b0688edae 100644 --- a/typedapi/transform/deletetransform/delete_transform.go +++ b/typedapi/transform/deletetransform/delete_transform.go @@ -16,10 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Delete a transform. -// Deletes a transform. package deletetransform import ( @@ -78,9 +77,8 @@ func NewDeleteTransformFunc(tp elastictransport.Interface) NewDeleteTransform { } // Delete a transform. -// Deletes a transform. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform func New(tp elastictransport.Interface) *DeleteTransform { r := &DeleteTransform{ transport: tp, diff --git a/typedapi/transform/deletetransform/response.go b/typedapi/transform/deletetransform/response.go index d5a9b52a0f..7e48e6a6e5 100644 --- a/typedapi/transform/deletetransform/response.go +++ b/typedapi/transform/deletetransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletetransform // Response holds the response body struct for the package deletetransform // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/delete_transform/DeleteTransformResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/delete_transform/DeleteTransformResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/transform/getnodestats/get_node_stats.go b/typedapi/transform/getnodestats/get_node_stats.go index 4ae294592d..0c5ac2fa1e 100644 --- a/typedapi/transform/getnodestats/get_node_stats.go +++ b/typedapi/transform/getnodestats/get_node_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Retrieves transform usage information for transform nodes. package getnodestats diff --git a/typedapi/transform/gettransform/get_transform.go b/typedapi/transform/gettransform/get_transform.go index 8e36c6dde6..76eb0685b7 100644 --- a/typedapi/transform/gettransform/get_transform.go +++ b/typedapi/transform/gettransform/get_transform.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get transforms. -// Retrieves configuration information for transforms. +// Get configuration information for transforms. package gettransform import ( @@ -76,9 +76,9 @@ func NewGetTransformFunc(tp elastictransport.Interface) NewGetTransform { } // Get transforms. -// Retrieves configuration information for transforms. +// Get configuration information for transforms. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform func New(tp elastictransport.Interface) *GetTransform { r := &GetTransform{ transport: tp, diff --git a/typedapi/transform/gettransform/response.go b/typedapi/transform/gettransform/response.go index 03c1d0c03c..2cca2a20f8 100644 --- a/typedapi/transform/gettransform/response.go +++ b/typedapi/transform/gettransform/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package gettransform @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettransform // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/get_transform/GetTransformResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/get_transform/GetTransformResponse.ts#L23-L25 type Response struct { Count int64 `json:"count"` Transforms []types.TransformSummary `json:"transforms"` diff --git a/typedapi/transform/gettransformstats/get_transform_stats.go b/typedapi/transform/gettransformstats/get_transform_stats.go index 0227263114..04ee69c8b3 100644 --- a/typedapi/transform/gettransformstats/get_transform_stats.go +++ b/typedapi/transform/gettransformstats/get_transform_stats.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Get transform stats. -// Retrieves usage information for transforms. +// +// Get usage information for transforms. package gettransformstats import ( @@ -78,9 +79,10 @@ func NewGetTransformStatsFunc(tp elastictransport.Interface) NewGetTransformStat } // Get transform stats. -// Retrieves usage information for transforms. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-stats.html +// Get usage information for transforms. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats func New(tp elastictransport.Interface) *GetTransformStats { r := &GetTransformStats{ transport: tp, diff --git a/typedapi/transform/gettransformstats/response.go b/typedapi/transform/gettransformstats/response.go index a671ab1f0d..4eeac553af 100644 --- a/typedapi/transform/gettransformstats/response.go +++ b/typedapi/transform/gettransformstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package gettransformstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettransformstats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/get_transform_stats/GetTransformStatsResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/get_transform_stats/GetTransformStatsResponse.ts#L23-L25 type Response struct { Count int64 `json:"count"` Transforms []types.TransformStats `json:"transforms"` diff --git a/typedapi/transform/previewtransform/preview_transform.go b/typedapi/transform/previewtransform/preview_transform.go index 7e8011f821..5ab0974604 100644 --- a/typedapi/transform/previewtransform/preview_transform.go +++ b/typedapi/transform/previewtransform/preview_transform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Preview a transform. // Generates a preview of the results that you will get when you create a @@ -97,7 +97,7 @@ func NewPreviewTransformFunc(tp elastictransport.Interface) NewPreviewTransform // values are determined based on the field // types of the source index and the transform aggregations. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/preview-transform.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform func New(tp elastictransport.Interface) *PreviewTransform { r := &PreviewTransform{ transport: tp, @@ -105,8 +105,6 @@ func New(tp elastictransport.Interface) *PreviewTransform { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -391,89 +389,126 @@ func (r *PreviewTransform) Pretty(pretty bool) *PreviewTransform { return r } -// Description Free text description of the transform. +// Free text description of the transform. // API name: description func (r *PreviewTransform) Description(description string) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Dest The destination for the transform. +// The destination for the transform. // API name: dest -func (r *PreviewTransform) Dest(dest *types.TransformDestination) *PreviewTransform { +func (r *PreviewTransform) Dest(dest types.TransformDestinationVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Dest = dest + r.req.Dest = dest.TransformDestinationCaster() return r } -// Frequency The interval between checks for changes in the source indices when the +// The interval between checks for changes in the source indices when the // transform is running continuously. Also determines the retry interval in // the event of transient failures while the transform is searching or // indexing. The minimum value is 1s and the maximum is 1h. // API name: frequency -func (r *PreviewTransform) Frequency(duration types.Duration) *PreviewTransform { - r.req.Frequency = duration +func (r *PreviewTransform) Frequency(duration types.DurationVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Frequency = *duration.DurationCaster() return r } -// Latest The latest method transforms the data by finding the latest document for +// The latest method transforms the data by finding the latest document for // each unique key. // API name: latest -func (r *PreviewTransform) Latest(latest *types.Latest) *PreviewTransform { +func (r *PreviewTransform) Latest(latest types.LatestVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Latest = latest + r.req.Latest = latest.LatestCaster() return r } -// Pivot The pivot method transforms the data by aggregating and grouping it. +// The pivot method transforms the data by aggregating and grouping it. // These objects define the group by fields and the aggregation to reduce // the data. // API name: pivot -func (r *PreviewTransform) Pivot(pivot *types.Pivot) *PreviewTransform { +func (r *PreviewTransform) Pivot(pivot types.PivotVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Pivot = pivot + r.req.Pivot = pivot.PivotCaster() return r } -// RetentionPolicy Defines a retention policy for the transform. Data that meets the defined +// Defines a retention policy for the transform. Data that meets the defined // criteria is deleted from the destination index. // API name: retention_policy -func (r *PreviewTransform) RetentionPolicy(retentionpolicy *types.RetentionPolicyContainer) *PreviewTransform { +func (r *PreviewTransform) RetentionPolicy(retentionpolicy types.RetentionPolicyContainerVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.RetentionPolicy = retentionpolicy + r.req.RetentionPolicy = retentionpolicy.RetentionPolicyContainerCaster() return r } -// Settings Defines optional transform settings. +// Defines optional transform settings. // API name: settings -func (r *PreviewTransform) Settings(settings *types.Settings) *PreviewTransform { +func (r *PreviewTransform) Settings(settings types.SettingsVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Settings = settings + r.req.Settings = settings.SettingsCaster() return r } -// Source The source of the data for the transform. +// The source of the data for the transform. // API name: source -func (r *PreviewTransform) Source(source *types.TransformSource) *PreviewTransform { +func (r *PreviewTransform) Source(source types.TransformSourceVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Source = source + r.req.Source = source.TransformSourceCaster() return r } -// Sync Defines the properties transforms require to run continuously. +// Defines the properties transforms require to run continuously. // API name: sync -func (r *PreviewTransform) Sync(sync *types.SyncContainer) *PreviewTransform { +func (r *PreviewTransform) Sync(sync types.SyncContainerVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Sync = sync + r.req.Sync = sync.SyncContainerCaster() return r } diff --git a/typedapi/transform/previewtransform/request.go b/typedapi/transform/previewtransform/request.go index c28cb93157..009eabdb08 100644 --- a/typedapi/transform/previewtransform/request.go +++ b/typedapi/transform/previewtransform/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package previewtransform @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package previewtransform // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/preview_transform/PreviewTransformRequest.ts#L33-L108 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/preview_transform/PreviewTransformRequest.ts#L33-L119 type Request struct { // Description Free text description of the transform. diff --git a/typedapi/transform/previewtransform/response.go b/typedapi/transform/previewtransform/response.go index 9fc4573230..f85567d9c1 100644 --- a/typedapi/transform/previewtransform/response.go +++ b/typedapi/transform/previewtransform/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package previewtransform @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package previewtransform // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/preview_transform/PreviewTransformResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/preview_transform/PreviewTransformResponse.ts#L22-L27 type Response struct { GeneratedDestIndex types.IndexState `json:"generated_dest_index"` Preview []json.RawMessage `json:"preview"` diff --git a/typedapi/transform/puttransform/put_transform.go b/typedapi/transform/puttransform/put_transform.go index 90cf686ac6..b78db1ffed 100644 --- a/typedapi/transform/puttransform/put_transform.go +++ b/typedapi/transform/puttransform/put_transform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Create a transform. // Creates a transform. @@ -151,7 +151,7 @@ func NewPutTransformFunc(tp elastictransport.Interface) NewPutTransform { // transforms prior to 7.5, also do not // give users any privileges on `.data-frame-internal*` indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-transform.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform func New(tp elastictransport.Interface) *PutTransform { r := &PutTransform{ transport: tp, @@ -159,8 +159,6 @@ func New(tp elastictransport.Interface) *PutTransform { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -453,99 +451,141 @@ func (r *PutTransform) Pretty(pretty bool) *PutTransform { return r } -// Description Free text description of the transform. +// Free text description of the transform. // API name: description func (r *PutTransform) Description(description string) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Dest The destination for the transform. +// The destination for the transform. // API name: dest -func (r *PutTransform) Dest(dest *types.TransformDestination) *PutTransform { +func (r *PutTransform) Dest(dest types.TransformDestinationVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Dest = *dest + r.req.Dest = *dest.TransformDestinationCaster() return r } -// Frequency The interval between checks for changes in the source indices when the +// The interval between checks for changes in the source indices when the // transform is running continuously. Also // determines the retry interval in the event of transient failures while the // transform is searching or indexing. // The minimum value is `1s` and the maximum is `1h`. // API name: frequency -func (r *PutTransform) Frequency(duration types.Duration) *PutTransform { - r.req.Frequency = duration +func (r *PutTransform) Frequency(duration types.DurationVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Frequency = *duration.DurationCaster() return r } -// Latest The latest method transforms the data by finding the latest document for each +// The latest method transforms the data by finding the latest document for each // unique key. // API name: latest -func (r *PutTransform) Latest(latest *types.Latest) *PutTransform { +func (r *PutTransform) Latest(latest types.LatestVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Latest = latest + r.req.Latest = latest.LatestCaster() return r } -// Meta_ Defines optional transform metadata. +// Defines optional transform metadata. // API name: _meta -func (r *PutTransform) Meta_(metadata types.Metadata) *PutTransform { - r.req.Meta_ = metadata +func (r *PutTransform) Meta_(metadata types.MetadataVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() return r } -// Pivot The pivot method transforms the data by aggregating and grouping it. These +// The pivot method transforms the data by aggregating and grouping it. These // objects define the group by fields // and the aggregation to reduce the data. // API name: pivot -func (r *PutTransform) Pivot(pivot *types.Pivot) *PutTransform { +func (r *PutTransform) Pivot(pivot types.PivotVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Pivot = pivot + r.req.Pivot = pivot.PivotCaster() return r } -// RetentionPolicy Defines a retention policy for the transform. Data that meets the defined +// Defines a retention policy for the transform. Data that meets the defined // criteria is deleted from the // destination index. // API name: retention_policy -func (r *PutTransform) RetentionPolicy(retentionpolicy *types.RetentionPolicyContainer) *PutTransform { +func (r *PutTransform) RetentionPolicy(retentionpolicy types.RetentionPolicyContainerVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.RetentionPolicy = retentionpolicy + r.req.RetentionPolicy = retentionpolicy.RetentionPolicyContainerCaster() return r } -// Settings Defines optional transform settings. +// Defines optional transform settings. // API name: settings -func (r *PutTransform) Settings(settings *types.Settings) *PutTransform { +func (r *PutTransform) Settings(settings types.SettingsVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Settings = settings + r.req.Settings = settings.SettingsCaster() return r } -// Source The source of the data for the transform. +// The source of the data for the transform. // API name: source -func (r *PutTransform) Source(source *types.TransformSource) *PutTransform { +func (r *PutTransform) Source(source types.TransformSourceVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Source = *source + r.req.Source = *source.TransformSourceCaster() return r } -// Sync Defines the properties transforms require to run continuously. +// Defines the properties transforms require to run continuously. // API name: sync -func (r *PutTransform) Sync(sync *types.SyncContainer) *PutTransform { +func (r *PutTransform) Sync(sync types.SyncContainerVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Sync = sync + r.req.Sync = sync.SyncContainerCaster() return r } diff --git a/typedapi/transform/puttransform/request.go b/typedapi/transform/puttransform/request.go index cfca5bbe09..ff3b9f3f6b 100644 --- a/typedapi/transform/puttransform/request.go +++ b/typedapi/transform/puttransform/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package puttransform @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package puttransform // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/put_transform/PutTransformRequest.ts#L33-L123 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/put_transform/PutTransformRequest.ts#L33-L130 type Request struct { // Description Free text description of the transform. diff --git a/typedapi/transform/puttransform/response.go b/typedapi/transform/puttransform/response.go index e73961f140..0dee933611 100644 --- a/typedapi/transform/puttransform/response.go +++ b/typedapi/transform/puttransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package puttransform // Response holds the response body struct for the package puttransform // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/put_transform/PutTransformResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/put_transform/PutTransformResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/transform/resettransform/reset_transform.go b/typedapi/transform/resettransform/reset_transform.go index 8a9f332d66..3a454197e9 100644 --- a/typedapi/transform/resettransform/reset_transform.go +++ b/typedapi/transform/resettransform/reset_transform.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Reset a transform. -// Resets a transform. +// // Before you can reset it, you must stop it; alternatively, use the `force` // query parameter. // If the destination index was created by the transform, it is deleted. @@ -81,12 +81,12 @@ func NewResetTransformFunc(tp elastictransport.Interface) NewResetTransform { } // Reset a transform. -// Resets a transform. +// // Before you can reset it, you must stop it; alternatively, use the `force` // query parameter. // If the destination index was created by the transform, it is deleted. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-transform.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform func New(tp elastictransport.Interface) *ResetTransform { r := &ResetTransform{ transport: tp, @@ -320,6 +320,15 @@ func (r *ResetTransform) Force(force bool) *ResetTransform { return r } +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *ResetTransform) Timeout(duration string) *ResetTransform { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/transform/resettransform/response.go b/typedapi/transform/resettransform/response.go index 7aa16ab350..abc43c644e 100644 --- a/typedapi/transform/resettransform/response.go +++ b/typedapi/transform/resettransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package resettransform // Response holds the response body struct for the package resettransform // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/reset_transform/ResetTransformResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/reset_transform/ResetTransformResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/transform/schedulenowtransform/response.go b/typedapi/transform/schedulenowtransform/response.go index dbc26ca278..46e2789b6a 100644 --- a/typedapi/transform/schedulenowtransform/response.go +++ b/typedapi/transform/schedulenowtransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package schedulenowtransform // Response holds the response body struct for the package schedulenowtransform // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/schedule_now_transform/ScheduleNowTransformResponse.ts#L21-L23 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/schedule_now_transform/ScheduleNowTransformResponse.ts#L21-L23 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/transform/schedulenowtransform/schedule_now_transform.go b/typedapi/transform/schedulenowtransform/schedule_now_transform.go index 5e9f011c2b..02c2c5e53d 100644 --- a/typedapi/transform/schedulenowtransform/schedule_now_transform.go +++ b/typedapi/transform/schedulenowtransform/schedule_now_transform.go @@ -16,16 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Schedule a transform to start now. -// Instantly runs a transform to process data. // -// If you _schedule_now a transform, it will process the new data instantly, -// without waiting for the configured frequency interval. After _schedule_now -// API is called, -// the transform will be processed again at now + frequency unless _schedule_now -// API +// Instantly run a transform to process data. +// If you run this API, the transform will process the new data instantly, +// without waiting for the configured frequency interval. After the API is +// called, +// the transform will be processed again at `now + frequency` unless the API // is called again in the meantime. package schedulenowtransform @@ -85,16 +84,15 @@ func NewScheduleNowTransformFunc(tp elastictransport.Interface) NewScheduleNowTr } // Schedule a transform to start now. -// Instantly runs a transform to process data. // -// If you _schedule_now a transform, it will process the new data instantly, -// without waiting for the configured frequency interval. After _schedule_now -// API is called, -// the transform will be processed again at now + frequency unless _schedule_now -// API +// Instantly run a transform to process data. +// If you run this API, the transform will process the new data instantly, +// without waiting for the configured frequency interval. After the API is +// called, +// the transform will be processed again at `now + frequency` unless the API // is called again in the meantime. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/schedule-now-transform.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform func New(tp elastictransport.Interface) *ScheduleNowTransform { r := &ScheduleNowTransform{ transport: tp, diff --git a/typedapi/transform/starttransform/response.go b/typedapi/transform/starttransform/response.go index 7c70044b24..905e24fcb4 100644 --- a/typedapi/transform/starttransform/response.go +++ b/typedapi/transform/starttransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package starttransform // Response holds the response body struct for the package starttransform // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/start_transform/StartTransformResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/start_transform/StartTransformResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/transform/starttransform/start_transform.go b/typedapi/transform/starttransform/start_transform.go index 23349ca65a..45d1079757 100644 --- a/typedapi/transform/starttransform/start_transform.go +++ b/typedapi/transform/starttransform/start_transform.go @@ -16,10 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Start a transform. -// Starts a transform. // // When you start a transform, it creates the destination index if it does not // already exist. The `number_of_shards` is @@ -105,7 +104,6 @@ func NewStartTransformFunc(tp elastictransport.Interface) NewStartTransform { } // Start a transform. -// Starts a transform. // // When you start a transform, it creates the destination index if it does not // already exist. The `number_of_shards` is @@ -134,7 +132,7 @@ func NewStartTransformFunc(tp elastictransport.Interface) NewStartTransform { // destination indices, the transform fails when it attempts unauthorized // operations. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/start-transform.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform func New(tp elastictransport.Interface) *StartTransform { r := &StartTransform{ transport: tp, diff --git a/typedapi/transform/stoptransform/response.go b/typedapi/transform/stoptransform/response.go index 7d2a89c8dc..a4433654d3 100644 --- a/typedapi/transform/stoptransform/response.go +++ b/typedapi/transform/stoptransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package stoptransform // Response holds the response body struct for the package stoptransform // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/stop_transform/StopTransformResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/stop_transform/StopTransformResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/transform/stoptransform/stop_transform.go b/typedapi/transform/stoptransform/stop_transform.go index ec72dc2a43..942c0cfd97 100644 --- a/typedapi/transform/stoptransform/stop_transform.go +++ b/typedapi/transform/stoptransform/stop_transform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Stop transforms. // Stops one or more transforms. @@ -80,7 +80,7 @@ func NewStopTransformFunc(tp elastictransport.Interface) NewStopTransform { // Stop transforms. // Stops one or more transforms. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-transform.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform func New(tp elastictransport.Interface) *StopTransform { r := &StopTransform{ transport: tp, diff --git a/typedapi/transform/updatetransform/request.go b/typedapi/transform/updatetransform/request.go index 6e1af69572..25cd142d32 100644 --- a/typedapi/transform/updatetransform/request.go +++ b/typedapi/transform/updatetransform/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatetransform @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package updatetransform // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/update_transform/UpdateTransformRequest.ts#L31-L106 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/update_transform/UpdateTransformRequest.ts#L31-L113 type Request struct { // Description Free text description of the transform. diff --git a/typedapi/transform/updatetransform/response.go b/typedapi/transform/updatetransform/response.go index 57e85eaaa1..bf842b3ca6 100644 --- a/typedapi/transform/updatetransform/response.go +++ b/typedapi/transform/updatetransform/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package updatetransform @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatetransform // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/update_transform/UpdateTransformResponse.ts#L33-L51 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/update_transform/UpdateTransformResponse.ts#L33-L51 type Response struct { Authorization *types.TransformAuthorization `json:"authorization,omitempty"` CreateTime int64 `json:"create_time"` diff --git a/typedapi/transform/updatetransform/update_transform.go b/typedapi/transform/updatetransform/update_transform.go index 8163dbea98..856b66b8c4 100644 --- a/typedapi/transform/updatetransform/update_transform.go +++ b/typedapi/transform/updatetransform/update_transform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Update a transform. // Updates certain properties of a transform. @@ -105,7 +105,7 @@ func NewUpdateTransformFunc(tp elastictransport.Interface) NewUpdateTransform { // roles the user who updated it had at the // time of update and runs with those privileges. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-transform.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform func New(tp elastictransport.Interface) *UpdateTransform { r := &UpdateTransform{ transport: tp, @@ -113,8 +113,6 @@ func New(tp elastictransport.Interface) *UpdateTransform { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -400,75 +398,110 @@ func (r *UpdateTransform) Pretty(pretty bool) *UpdateTransform { return r } -// Description Free text description of the transform. +// Free text description of the transform. // API name: description func (r *UpdateTransform) Description(description string) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Dest The destination for the transform. +// The destination for the transform. // API name: dest -func (r *UpdateTransform) Dest(dest *types.TransformDestination) *UpdateTransform { +func (r *UpdateTransform) Dest(dest types.TransformDestinationVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Dest = dest + r.req.Dest = dest.TransformDestinationCaster() return r } -// Frequency The interval between checks for changes in the source indices when the +// The interval between checks for changes in the source indices when the // transform is running continuously. Also determines the retry interval in // the event of transient failures while the transform is searching or // indexing. The minimum value is 1s and the maximum is 1h. // API name: frequency -func (r *UpdateTransform) Frequency(duration types.Duration) *UpdateTransform { - r.req.Frequency = duration +func (r *UpdateTransform) Frequency(duration types.DurationVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Frequency = *duration.DurationCaster() return r } -// Meta_ Defines optional transform metadata. +// Defines optional transform metadata. // API name: _meta -func (r *UpdateTransform) Meta_(metadata types.Metadata) *UpdateTransform { - r.req.Meta_ = metadata +func (r *UpdateTransform) Meta_(metadata types.MetadataVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() return r } -// RetentionPolicy Defines a retention policy for the transform. Data that meets the defined +// Defines a retention policy for the transform. Data that meets the defined // criteria is deleted from the destination index. // API name: retention_policy -func (r *UpdateTransform) RetentionPolicy(retentionpolicy types.RetentionPolicyContainer) *UpdateTransform { - r.req.RetentionPolicy = &retentionpolicy +func (r *UpdateTransform) RetentionPolicy(retentionpolicy types.RetentionPolicyContainerVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RetentionPolicy = retentionpolicy.RetentionPolicyContainerCaster() return r } -// Settings Defines optional transform settings. +// Defines optional transform settings. // API name: settings -func (r *UpdateTransform) Settings(settings *types.Settings) *UpdateTransform { +func (r *UpdateTransform) Settings(settings types.SettingsVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Settings = settings + r.req.Settings = settings.SettingsCaster() return r } -// Source The source of the data for the transform. +// The source of the data for the transform. // API name: source -func (r *UpdateTransform) Source(source *types.TransformSource) *UpdateTransform { +func (r *UpdateTransform) Source(source types.TransformSourceVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Source = source + r.req.Source = source.TransformSourceCaster() return r } -// Sync Defines the properties transforms require to run continuously. +// Defines the properties transforms require to run continuously. // API name: sync -func (r *UpdateTransform) Sync(sync *types.SyncContainer) *UpdateTransform { +func (r *UpdateTransform) Sync(sync types.SyncContainerVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Sync = sync + r.req.Sync = sync.SyncContainerCaster() return r } diff --git a/typedapi/transform/upgradetransforms/response.go b/typedapi/transform/upgradetransforms/response.go index 396540672b..6fc516676c 100644 --- a/typedapi/transform/upgradetransforms/response.go +++ b/typedapi/transform/upgradetransforms/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package upgradetransforms // Response holds the response body struct for the package upgradetransforms // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/upgrade_transforms/UpgradeTransformsResponse.ts#L25-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/upgrade_transforms/UpgradeTransformsResponse.ts#L25-L34 type Response struct { // NeedsUpdate The number of transforms that need to be upgraded. diff --git a/typedapi/transform/upgradetransforms/upgrade_transforms.go b/typedapi/transform/upgradetransforms/upgrade_transforms.go index 6868106134..06ba9d7666 100644 --- a/typedapi/transform/upgradetransforms/upgrade_transforms.go +++ b/typedapi/transform/upgradetransforms/upgrade_transforms.go @@ -16,18 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Upgrades all transforms. +// Upgrade all transforms. +// +// Transforms are compatible across minor versions and between supported major +// versions. +// However, over time, the format of transform configuration information may +// change. // This API identifies transforms that have a legacy configuration format and -// upgrades them to the latest version. It -// also cleans up the internal data structures that store the transform state -// and checkpoints. The upgrade does not -// affect the source and destination indices. The upgrade also does not affect -// the roles that transforms use when +// upgrades them to the latest version. +// It also cleans up the internal data structures that store the transform state +// and checkpoints. +// The upgrade does not affect the source and destination indices. +// The upgrade also does not affect the roles that transforms use when // Elasticsearch security features are enabled; the role used to read source -// data and write to the destination index -// remains unchanged. +// data and write to the destination index remains unchanged. +// +// If a transform upgrade step fails, the upgrade stops and an error is returned +// about the underlying issue. +// Resolve the issue then re-run the process again. +// A summary is returned when the upgrade is finished. +// +// To ensure continuous transforms remain running during a major version upgrade +// of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade +// transforms before upgrading the cluster. +// You may want to perform a recent cluster backup prior to the upgrade. package upgradetransforms import ( @@ -77,18 +91,32 @@ func NewUpgradeTransformsFunc(tp elastictransport.Interface) NewUpgradeTransform } } -// Upgrades all transforms. +// Upgrade all transforms. +// +// Transforms are compatible across minor versions and between supported major +// versions. +// However, over time, the format of transform configuration information may +// change. // This API identifies transforms that have a legacy configuration format and -// upgrades them to the latest version. It -// also cleans up the internal data structures that store the transform state -// and checkpoints. The upgrade does not -// affect the source and destination indices. The upgrade also does not affect -// the roles that transforms use when +// upgrades them to the latest version. +// It also cleans up the internal data structures that store the transform state +// and checkpoints. +// The upgrade does not affect the source and destination indices. +// The upgrade also does not affect the roles that transforms use when // Elasticsearch security features are enabled; the role used to read source -// data and write to the destination index -// remains unchanged. +// data and write to the destination index remains unchanged. +// +// If a transform upgrade step fails, the upgrade stops and an error is returned +// about the underlying issue. +// Resolve the issue then re-run the process again. +// A summary is returned when the upgrade is finished. +// +// To ensure continuous transforms remain running during a major version upgrade +// of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade +// transforms before upgrading the cluster. +// You may want to perform a recent cluster backup prior to the upgrade. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/upgrade-transforms.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms func New(tp elastictransport.Interface) *UpgradeTransforms { r := &UpgradeTransforms{ transport: tp, diff --git a/typedapi/types/access.go b/typedapi/types/access.go index 8832cd1bb6..234db341fd 100644 --- a/typedapi/types/access.go +++ b/typedapi/types/access.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Access type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Access.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Access.ts#L22-L31 type Access struct { // Replication A list of indices permission entries for cross-cluster replication. Replication []ReplicationAccess `json:"replication,omitempty"` @@ -36,3 +36,13 @@ func NewAccess() *Access { return r } + +// true + +type AccessVariant interface { + AccessCaster() *Access +} + +func (s *Access) AccessCaster() *Access { + return s +} diff --git a/typedapi/types/acknowledgement.go b/typedapi/types/acknowledgement.go index 0e95f94f64..c4e0e8fe06 100644 --- a/typedapi/types/acknowledgement.go +++ b/typedapi/types/acknowledgement.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Acknowledgement type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/license/post/types.ts#L20-L23 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/license/post/types.ts#L20-L23 type Acknowledgement struct { License []string `json:"license"` Message string `json:"message"` @@ -80,3 +80,5 @@ func NewAcknowledgement() *Acknowledgement { return r } + +// false diff --git a/typedapi/types/acknowledgestate.go b/typedapi/types/acknowledgestate.go index c419510e66..bbd7505e40 100644 --- a/typedapi/types/acknowledgestate.go +++ b/typedapi/types/acknowledgestate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // AcknowledgeState type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Action.ts#L109-L112 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Action.ts#L109-L112 type AcknowledgeState struct { State acknowledgementoptions.AcknowledgementOptions `json:"state"` Timestamp DateTime `json:"timestamp"` @@ -74,3 +74,13 @@ func NewAcknowledgeState() *AcknowledgeState { return r } + +// true + +type AcknowledgeStateVariant interface { + AcknowledgeStateCaster() *AcknowledgeState +} + +func (s *AcknowledgeState) AcknowledgeStateCaster() *AcknowledgeState { + return s +} diff --git a/typedapi/types/actionstatus.go b/typedapi/types/actionstatus.go index ac170f8370..c1a838fc47 100644 --- a/typedapi/types/actionstatus.go +++ b/typedapi/types/actionstatus.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ActionStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Action.ts#L125-L130 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Action.ts#L125-L130 type ActionStatus struct { Ack AcknowledgeState `json:"ack"` LastExecution *ExecutionState `json:"last_execution,omitempty"` @@ -36,3 +36,13 @@ func NewActionStatus() *ActionStatus { return r } + +// true + +type ActionStatusVariant interface { + ActionStatusCaster() *ActionStatus +} + +func (s *ActionStatus) ActionStatusCaster() *ActionStatus { + return s +} diff --git a/typedapi/types/activationstate.go b/typedapi/types/activationstate.go index 7a9b936269..c197809114 100644 --- a/typedapi/types/activationstate.go +++ b/typedapi/types/activationstate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ActivationState type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Activation.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Activation.ts#L24-L27 type ActivationState struct { Active bool `json:"active"` Timestamp DateTime `json:"timestamp"` @@ -82,3 +82,13 @@ func NewActivationState() *ActivationState { return r } + +// true + +type ActivationStateVariant interface { + ActivationStateCaster() *ActivationState +} + +func (s *ActivationState) ActivationStateCaster() *ActivationState { + return s +} diff --git a/typedapi/types/activationstatus.go b/typedapi/types/activationstatus.go index ddb9823267..eca81f2ea6 100644 --- a/typedapi/types/activationstatus.go +++ b/typedapi/types/activationstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ActivationStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Activation.ts#L29-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Activation.ts#L29-L33 type ActivationStatus struct { Actions WatcherStatusActions `json:"actions"` State ActivationState `json:"state"` @@ -78,3 +78,5 @@ func NewActivationStatus() *ActivationStatus { return r } + +// false diff --git a/typedapi/types/adaptiveallocationssettings.go b/typedapi/types/adaptiveallocationssettings.go new file mode 100644 index 0000000000..98a94fa02f --- /dev/null +++ b/typedapi/types/adaptiveallocationssettings.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AdaptiveAllocationsSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L109-L125 +type AdaptiveAllocationsSettings struct { + // Enabled If true, adaptive_allocations is enabled + Enabled bool `json:"enabled"` + // MaxNumberOfAllocations Specifies the maximum number of allocations to scale to. + // If set, it must be greater than or equal to min_number_of_allocations. + MaxNumberOfAllocations *int `json:"max_number_of_allocations,omitempty"` + // MinNumberOfAllocations Specifies the minimum number of allocations to scale to. + // If set, it must be greater than or equal to 0. + // If not defined, the deployment scales to 0. + MinNumberOfAllocations *int `json:"min_number_of_allocations,omitempty"` +} + +func (s *AdaptiveAllocationsSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "max_number_of_allocations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxNumberOfAllocations", err) + } + s.MaxNumberOfAllocations = &value + case float64: + f := int(v) + s.MaxNumberOfAllocations = &f + } + + case "min_number_of_allocations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinNumberOfAllocations", err) + } + s.MinNumberOfAllocations = &value + case float64: + f := int(v) + s.MinNumberOfAllocations = &f + } + + } + } + return nil +} + +// NewAdaptiveAllocationsSettings returns a AdaptiveAllocationsSettings. +func NewAdaptiveAllocationsSettings() *AdaptiveAllocationsSettings { + r := &AdaptiveAllocationsSettings{} + + return r +} + +// true + +type AdaptiveAllocationsSettingsVariant interface { + AdaptiveAllocationsSettingsCaster() *AdaptiveAllocationsSettings +} + +func (s *AdaptiveAllocationsSettings) AdaptiveAllocationsSettingsCaster() *AdaptiveAllocationsSettings { + return s +} diff --git a/typedapi/types/adaptiveselection.go b/typedapi/types/adaptiveselection.go index 036a29837f..fdd81c1fb7 100644 --- a/typedapi/types/adaptiveselection.go +++ b/typedapi/types/adaptiveselection.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AdaptiveSelection type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L439-L468 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L439-L468 type AdaptiveSelection struct { // AvgQueueSize The exponentially weighted moving average queue size of search requests on // the keyed node. @@ -163,3 +163,5 @@ func NewAdaptiveSelection() *AdaptiveSelection { return r } + +// false diff --git a/typedapi/types/addaction.go b/typedapi/types/addaction.go index 3068ca822c..6e51ed7f7e 100644 --- a/typedapi/types/addaction.go +++ b/typedapi/types/addaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AddAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/update_aliases/types.ts#L41-L95 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/update_aliases/types.ts#L41-L95 type AddAction struct { // Alias Alias for the action. // Index alias names support date math. @@ -196,3 +196,13 @@ func NewAddAction() *AddAction { return r } + +// true + +type AddActionVariant interface { + AddActionCaster() *AddAction +} + +func (s *AddAction) AddActionCaster() *AddAction { + return s +} diff --git a/typedapi/types/adjacencymatrixaggregate.go b/typedapi/types/adjacencymatrixaggregate.go index ee2e5ed0a4..6227f7610f 100644 --- a/typedapi/types/adjacencymatrixaggregate.go +++ b/typedapi/types/adjacencymatrixaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // AdjacencyMatrixAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L647-L652 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L647-L652 type AdjacencyMatrixAggregate struct { Buckets BucketsAdjacencyMatrixBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewAdjacencyMatrixAggregate() *AdjacencyMatrixAggregate { return r } + +// false diff --git a/typedapi/types/adjacencymatrixaggregation.go b/typedapi/types/adjacencymatrixaggregation.go index 2432301fb6..f90efbf4b1 100644 --- a/typedapi/types/adjacencymatrixaggregation.go +++ b/typedapi/types/adjacencymatrixaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AdjacencyMatrixAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L60-L70 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L60-L70 type AdjacencyMatrixAggregation struct { // Filters Filters used to create buckets. // At least one filter is required. @@ -83,8 +83,18 @@ func (s *AdjacencyMatrixAggregation) UnmarshalJSON(data []byte) error { // NewAdjacencyMatrixAggregation returns a AdjacencyMatrixAggregation. func NewAdjacencyMatrixAggregation() *AdjacencyMatrixAggregation { r := &AdjacencyMatrixAggregation{ - Filters: make(map[string]Query, 0), + Filters: make(map[string]Query), } return r } + +// true + +type AdjacencyMatrixAggregationVariant interface { + AdjacencyMatrixAggregationCaster() *AdjacencyMatrixAggregation +} + +func (s *AdjacencyMatrixAggregation) AdjacencyMatrixAggregationCaster() *AdjacencyMatrixAggregation { + return s +} diff --git a/typedapi/types/adjacencymatrixbucket.go b/typedapi/types/adjacencymatrixbucket.go index a12207cbb1..0125f888b3 100644 --- a/typedapi/types/adjacencymatrixbucket.go +++ b/typedapi/types/adjacencymatrixbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // AdjacencyMatrixBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L654-L656 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L654-L656 type AdjacencyMatrixBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -638,8 +638,10 @@ func (s AdjacencyMatrixBucket) MarshalJSON() ([]byte, error) { // NewAdjacencyMatrixBucket returns a AdjacencyMatrixBucket. func NewAdjacencyMatrixBucket() *AdjacencyMatrixBucket { r := &AdjacencyMatrixBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/aggregate.go b/typedapi/types/aggregate.go index 1ac7e05b62..776b3bf922 100644 --- a/typedapi/types/aggregate.go +++ b/typedapi/types/aggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -93,5 +93,5 @@ package types // MatrixStatsAggregate // GeoLineAggregate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L38-L125 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L38-L125 type Aggregate any diff --git a/typedapi/types/aggregatemetricdoubleproperty.go b/typedapi/types/aggregatemetricdoubleproperty.go index a892f8e7e8..a2790b5089 100644 --- a/typedapi/types/aggregatemetricdoubleproperty.go +++ b/typedapi/types/aggregatemetricdoubleproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,23 +29,25 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // AggregateMetricDoubleProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/complex.ts#L51-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/complex.ts#L59-L64 type AggregateMetricDoubleProperty struct { DefaultMetric string `json:"default_metric"` Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Metrics []string `json:"metrics"` - Properties map[string]Property `json:"properties,omitempty"` - TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Metrics []string `json:"metrics"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type,omitempty"` } func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { @@ -99,301 +101,313 @@ func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -447,306 +461,323 @@ func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_metric": if err := dec.Decode(&s.TimeSeriesMetric); err != nil { return fmt.Errorf("%s | %w", "TimeSeriesMetric", err) @@ -766,15 +797,16 @@ func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { func (s AggregateMetricDoubleProperty) MarshalJSON() ([]byte, error) { type innerAggregateMetricDoubleProperty AggregateMetricDoubleProperty tmp := innerAggregateMetricDoubleProperty{ - DefaultMetric: s.DefaultMetric, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Metrics: s.Metrics, - Properties: s.Properties, - TimeSeriesMetric: s.TimeSeriesMetric, - Type: s.Type, + DefaultMetric: s.DefaultMetric, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Metrics: s.Metrics, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, } tmp.Type = "aggregate_metric_double" @@ -785,10 +817,20 @@ func (s AggregateMetricDoubleProperty) MarshalJSON() ([]byte, error) { // NewAggregateMetricDoubleProperty returns a AggregateMetricDoubleProperty. func NewAggregateMetricDoubleProperty() *AggregateMetricDoubleProperty { r := &AggregateMetricDoubleProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type AggregateMetricDoublePropertyVariant interface { + AggregateMetricDoublePropertyCaster() *AggregateMetricDoubleProperty +} + +func (s *AggregateMetricDoubleProperty) AggregateMetricDoublePropertyCaster() *AggregateMetricDoubleProperty { + return s +} diff --git a/typedapi/types/aggregateorder.go b/typedapi/types/aggregateorder.go index fec9d5aac4..8ff20a1445 100644 --- a/typedapi/types/aggregateorder.go +++ b/typedapi/types/aggregateorder.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // map[string]sortorder.SortOrder // []map[string]sortorder.SortOrder // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L1052-L1054 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L1052-L1054 type AggregateOrder any + +type AggregateOrderVariant interface { + AggregateOrderCaster() *AggregateOrder +} diff --git a/typedapi/types/aggregateoutput.go b/typedapi/types/aggregateoutput.go index d7cd647f27..7863a3e33d 100644 --- a/typedapi/types/aggregateoutput.go +++ b/typedapi/types/aggregateoutput.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // AggregateOutput type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model/types.ts#L101-L106 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model/types.ts#L101-L106 type AggregateOutput struct { Exponent *Weights `json:"exponent,omitempty"` LogisticRegression *Weights `json:"logistic_regression,omitempty"` @@ -36,3 +36,13 @@ func NewAggregateOutput() *AggregateOutput { return r } + +// true + +type AggregateOutputVariant interface { + AggregateOutputCaster() *AggregateOutput +} + +func (s *AggregateOutput) AggregateOutputCaster() *AggregateOutput { + return s +} diff --git a/typedapi/types/aggregationbreakdown.go b/typedapi/types/aggregationbreakdown.go index c642e6b446..bac8d080e1 100644 --- a/typedapi/types/aggregationbreakdown.go +++ b/typedapi/types/aggregationbreakdown.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AggregationBreakdown type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L26-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L26-L39 type AggregationBreakdown struct { BuildAggregation int64 `json:"build_aggregation"` BuildAggregationCount int64 `json:"build_aggregation_count"` @@ -253,3 +253,5 @@ func NewAggregationBreakdown() *AggregationBreakdown { return r } + +// false diff --git a/typedapi/types/aggregationprofile.go b/typedapi/types/aggregationprofile.go index 8a15a1afd0..7dd06668ea 100644 --- a/typedapi/types/aggregationprofile.go +++ b/typedapi/types/aggregationprofile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AggregationProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L85-L92 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L85-L92 type AggregationProfile struct { Breakdown AggregationBreakdown `json:"breakdown"` Children []AggregationProfile `json:"children,omitempty"` @@ -111,3 +111,5 @@ func NewAggregationProfile() *AggregationProfile { return r } + +// false diff --git a/typedapi/types/aggregationprofiledebug.go b/typedapi/types/aggregationprofiledebug.go index 838a269f26..19104cb514 100644 --- a/typedapi/types/aggregationprofiledebug.go +++ b/typedapi/types/aggregationprofiledebug.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AggregationProfileDebug type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L42-L76 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L42-L76 type AggregationProfileDebug struct { BruteForceUsed *int `json:"brute_force_used,omitempty"` BuiltBuckets *int `json:"built_buckets,omitempty"` @@ -554,3 +554,5 @@ func NewAggregationProfileDebug() *AggregationProfileDebug { return r } + +// false diff --git a/typedapi/types/aggregationprofiledelegatedebugfilter.go b/typedapi/types/aggregationprofiledelegatedebugfilter.go index 5fd9062117..062f878cab 100644 --- a/typedapi/types/aggregationprofiledelegatedebugfilter.go +++ b/typedapi/types/aggregationprofiledelegatedebugfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AggregationProfileDelegateDebugFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L78-L83 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L78-L83 type AggregationProfileDelegateDebugFilter struct { Query *string `json:"query,omitempty"` ResultsFromMetadata *int `json:"results_from_metadata,omitempty"` @@ -121,3 +121,5 @@ func NewAggregationProfileDelegateDebugFilter() *AggregationProfileDelegateDebug return r } + +// false diff --git a/typedapi/types/aggregationrange.go b/typedapi/types/aggregationrange.go index 61ca892376..daabe66b16 100644 --- a/typedapi/types/aggregationrange.go +++ b/typedapi/types/aggregationrange.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AggregationRange type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L691-L704 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L691-L704 type AggregationRange struct { // From Start of the range (inclusive). From *Float64 `json:"from,omitempty"` @@ -89,3 +89,13 @@ func NewAggregationRange() *AggregationRange { return r } + +// true + +type AggregationRangeVariant interface { + AggregationRangeCaster() *AggregationRange +} + +func (s *AggregationRange) AggregationRangeCaster() *AggregationRange { + return s +} diff --git a/typedapi/types/aggregations.go b/typedapi/types/aggregations.go index d534f832de..caa3587bbb 100644 --- a/typedapi/types/aggregations.go +++ b/typedapi/types/aggregations.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,8 +30,9 @@ import ( // Aggregations type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/AggregationContainer.ts#L107-L533 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/AggregationContainer.ts#L107-L533 type Aggregations struct { + AdditionalAggregationsProperty map[string]json.RawMessage `json:"-"` // AdjacencyMatrix A bucket aggregation returning a form of adjacency matrix. // The request provides a collection of named filter expressions, similar to the // `filters` aggregation. @@ -563,36 +564,36 @@ func (s *Aggregations) UnmarshalJSON(data []byte) error { case "linear": o := NewLinearMovingAverageAggregation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "linear", err) } s.MovingAvg = *o case "simple": o := NewSimpleMovingAverageAggregation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "simple", err) } s.MovingAvg = *o case "ewma": o := NewEwmaMovingAverageAggregation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "ewma", err) } s.MovingAvg = *o case "holt": o := NewHoltMovingAverageAggregation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "holt", err) } s.MovingAvg = *o case "holt_winters": o := NewHoltWintersMovingAverageAggregation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "holt_winters", err) } s.MovingAvg = *o default: if err := localDec.Decode(&s.MovingAvg); err != nil { - return err + return fmt.Errorf("MovingAvg | %w", err) } } @@ -756,16 +757,69 @@ func (s *Aggregations) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "WeightedAvg", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalAggregationsProperty == nil { + s.AdditionalAggregationsProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalAggregationsProperty", err) + } + s.AdditionalAggregationsProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s Aggregations) MarshalJSON() ([]byte, error) { + type opt Aggregations + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalAggregationsProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalAggregationsProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewAggregations returns a Aggregations. func NewAggregations() *Aggregations { r := &Aggregations{ - Aggregations: make(map[string]Aggregations, 0), + AdditionalAggregationsProperty: make(map[string]json.RawMessage), + Aggregations: make(map[string]Aggregations), } return r } + +// true + +type AggregationsVariant interface { + AggregationsCaster() *Aggregations +} + +func (s *Aggregations) AggregationsCaster() *Aggregations { + return s +} diff --git a/typedapi/types/alias.go b/typedapi/types/alias.go index e409bc62f0..bb0b0058f7 100644 --- a/typedapi/types/alias.go +++ b/typedapi/types/alias.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Alias type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/Alias.ts#L23-L53 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/Alias.ts#L23-L53 type Alias struct { // Filter Query used to limit documents the alias can access. Filter *Query `json:"filter,omitempty"` @@ -124,3 +124,13 @@ func NewAlias() *Alias { return r } + +// true + +type AliasVariant interface { + AliasCaster() *Alias +} + +func (s *Alias) AliasCaster() *Alias { + return s +} diff --git a/typedapi/types/aliasdefinition.go b/typedapi/types/aliasdefinition.go index 088f782069..958462584f 100644 --- a/typedapi/types/aliasdefinition.go +++ b/typedapi/types/aliasdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AliasDefinition type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/AliasDefinition.ts#L22-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/AliasDefinition.ts#L22-L54 type AliasDefinition struct { // Filter Query used to limit documents the alias can access. Filter *Query `json:"filter,omitempty"` @@ -145,3 +145,13 @@ func NewAliasDefinition() *AliasDefinition { return r } + +// true + +type AliasDefinitionVariant interface { + AliasDefinitionCaster() *AliasDefinition +} + +func (s *AliasDefinition) AliasDefinitionCaster() *AliasDefinition { + return s +} diff --git a/typedapi/types/aliasesrecord.go b/typedapi/types/aliasesrecord.go index 3664d0ff60..22d5a6548e 100644 --- a/typedapi/types/aliasesrecord.go +++ b/typedapi/types/aliasesrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AliasesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/aliases/types.ts#L22-L53 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/aliases/types.ts#L22-L53 type AliasesRecord struct { // Alias alias name Alias *string `json:"alias,omitempty"` @@ -138,3 +138,5 @@ func NewAliasesRecord() *AliasesRecord { return r } + +// false diff --git a/typedapi/types/allfield.go b/typedapi/types/allfield.go index 27c1e16e8c..7044aec2f7 100644 --- a/typedapi/types/allfield.go +++ b/typedapi/types/allfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AllField type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/meta-fields.ts#L29-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/meta-fields.ts#L29-L40 type AllField struct { Analyzer string `json:"analyzer"` Enabled bool `json:"enabled"` @@ -205,3 +205,13 @@ func NewAllField() *AllField { return r } + +// true + +type AllFieldVariant interface { + AllFieldCaster() *AllField +} + +func (s *AllField) AllFieldCaster() *AllField { + return s +} diff --git a/typedapi/types/allocateaction.go b/typedapi/types/allocateaction.go index 77458db8cf..a8e38757c6 100644 --- a/typedapi/types/allocateaction.go +++ b/typedapi/types/allocateaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AllocateAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/_types/Phase.ts#L136-L142 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/_types/Phase.ts#L133-L139 type AllocateAction struct { Exclude map[string]string `json:"exclude,omitempty"` Include map[string]string `json:"include,omitempty"` @@ -119,10 +119,20 @@ func (s *AllocateAction) UnmarshalJSON(data []byte) error { // NewAllocateAction returns a AllocateAction. func NewAllocateAction() *AllocateAction { r := &AllocateAction{ - Exclude: make(map[string]string, 0), - Include: make(map[string]string, 0), - Require: make(map[string]string, 0), + Exclude: make(map[string]string), + Include: make(map[string]string), + Require: make(map[string]string), } return r } + +// true + +type AllocateActionVariant interface { + AllocateActionCaster() *AllocateAction +} + +func (s *AllocateAction) AllocateActionCaster() *AllocateAction { + return s +} diff --git a/typedapi/types/allocationdecision.go b/typedapi/types/allocationdecision.go index d8ad1b4231..ecd8afbd63 100644 --- a/typedapi/types/allocationdecision.go +++ b/typedapi/types/allocationdecision.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // AllocationDecision type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/allocation_explain/types.ts#L27-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/allocation_explain/types.ts#L27-L31 type AllocationDecision struct { Decider string `json:"decider"` Decision allocationexplaindecision.AllocationExplainDecision `json:"decision"` @@ -95,3 +95,5 @@ func NewAllocationDecision() *AllocationDecision { return r } + +// false diff --git a/typedapi/types/allocationrecord.go b/typedapi/types/allocationrecord.go index 6aaa8c7234..8e44c5c060 100644 --- a/typedapi/types/allocationrecord.go +++ b/typedapi/types/allocationrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,25 +31,25 @@ import ( // AllocationRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/allocation/types.ts#L25-L99 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/allocation/types.ts#L25-L99 type AllocationRecord struct { // DiskAvail Free disk space available to Elasticsearch. // Elasticsearch retrieves this metric from the node’s operating system. // Disk-based shard allocation uses this metric to assign shards to nodes based // on available disk space. - DiskAvail *ByteSize `json:"disk.avail,omitempty"` + DiskAvail ByteSize `json:"disk.avail,omitempty"` // DiskIndices Disk space used by the node’s shards. Does not include disk space for the // translog or unassigned shards. // IMPORTANT: This metric double-counts disk space for hard-linked files, such // as those created when shrinking, splitting, or cloning an index. - DiskIndices *ByteSize `json:"disk.indices,omitempty"` + DiskIndices ByteSize `json:"disk.indices,omitempty"` // DiskIndicesForecast Sum of shard size forecasts - DiskIndicesForecast *ByteSize `json:"disk.indices.forecast,omitempty"` + DiskIndicesForecast ByteSize `json:"disk.indices.forecast,omitempty"` // DiskPercent Total percentage of disk space in use. Calculated as `disk.used / // disk.total`. - DiskPercent *Percentage `json:"disk.percent,omitempty"` + DiskPercent Percentage `json:"disk.percent,omitempty"` // DiskTotal Total disk space for the node, including in-use and available space. - DiskTotal *ByteSize `json:"disk.total,omitempty"` + DiskTotal ByteSize `json:"disk.total,omitempty"` // DiskUsed Total disk space in use. // Elasticsearch retrieves this metric from the node’s operating system (OS). // The metric includes disk space for: Elasticsearch, including the translog and @@ -57,7 +57,7 @@ type AllocationRecord struct { // files on the node. // Unlike `disk.indices`, this metric does not double-count disk space for // hard-linked files. - DiskUsed *ByteSize `json:"disk.used,omitempty"` + DiskUsed ByteSize `json:"disk.used,omitempty"` // Host Network host for the node. Set using the `network.host` setting. Host *string `json:"host,omitempty"` // Ip IP address and port for the node. @@ -72,7 +72,7 @@ type AllocationRecord struct { // -1 other than desired balance allocator is used ShardsUndesired *string `json:"shards.undesired,omitempty"` // WriteLoadForecast Sum of index write load forecasts - WriteLoadForecast *Stringifieddouble `json:"write_load.forecast,omitempty"` + WriteLoadForecast Stringifieddouble `json:"write_load.forecast,omitempty"` } func (s *AllocationRecord) UnmarshalJSON(data []byte) error { @@ -194,3 +194,5 @@ func NewAllocationRecord() *AllocationRecord { return r } + +// false diff --git a/typedapi/types/allocationstore.go b/typedapi/types/allocationstore.go index aaeaa0d9e3..2b459e18e5 100644 --- a/typedapi/types/allocationstore.go +++ b/typedapi/types/allocationstore.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AllocationStore type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/allocation_explain/types.ts#L40-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/allocation_explain/types.ts#L40-L47 type AllocationStore struct { AllocationId string `json:"allocation_id"` Found bool `json:"found"` @@ -148,3 +148,5 @@ func NewAllocationStore() *AllocationStore { return r } + +// false diff --git a/typedapi/types/alwayscondition.go b/typedapi/types/alwayscondition.go index 77672cfc39..6e34c9a498 100644 --- a/typedapi/types/alwayscondition.go +++ b/typedapi/types/alwayscondition.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // AlwaysCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Conditions.ts#L25-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Conditions.ts#L25-L25 type AlwaysCondition struct { } @@ -32,3 +32,13 @@ func NewAlwaysCondition() *AlwaysCondition { return r } + +// true + +type AlwaysConditionVariant interface { + AlwaysConditionCaster() *AlwaysCondition +} + +func (s *AlwaysCondition) AlwaysConditionCaster() *AlwaysCondition { + return s +} diff --git a/typedapi/types/analysisconfig.go b/typedapi/types/analysisconfig.go index 8bb7ffa249..0f5ce773ed 100644 --- a/typedapi/types/analysisconfig.go +++ b/typedapi/types/analysisconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AnalysisConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Analysis.ts#L29-L77 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Analysis.ts#L29-L77 type AnalysisConfig struct { // BucketSpan The size of the interval that the analysis is aggregated into, typically // between `5m` and `1h`. This value should be either a whole number of days or @@ -234,3 +234,13 @@ func NewAnalysisConfig() *AnalysisConfig { return r } + +// true + +type AnalysisConfigVariant interface { + AnalysisConfigCaster() *AnalysisConfig +} + +func (s *AnalysisConfig) AnalysisConfigCaster() *AnalysisConfig { + return s +} diff --git a/typedapi/types/analysisconfigread.go b/typedapi/types/analysisconfigread.go index 7d1f82f093..795c132411 100644 --- a/typedapi/types/analysisconfigread.go +++ b/typedapi/types/analysisconfigread.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AnalysisConfigRead type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Analysis.ts#L79-L148 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Analysis.ts#L79-L148 type AnalysisConfigRead struct { // BucketSpan The size of the interval that the analysis is aggregated into, typically // between `5m` and `1h`. @@ -221,3 +221,5 @@ func NewAnalysisConfigRead() *AnalysisConfigRead { return r } + +// false diff --git a/typedapi/types/analysislimits.go b/typedapi/types/analysislimits.go index f40455a3a0..5514dd4cad 100644 --- a/typedapi/types/analysislimits.go +++ b/typedapi/types/analysislimits.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AnalysisLimits type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Analysis.ts#L161-L172 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Analysis.ts#L161-L172 type AnalysisLimits struct { // CategorizationExamplesLimit The maximum number of examples stored per category in memory and in the // results data store. If you increase this value, more examples are available, @@ -55,7 +55,7 @@ type AnalysisLimits struct { // `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to // create jobs that have `model_memory_limit` values greater than that setting // value. - ModelMemoryLimit *string `json:"model_memory_limit,omitempty"` + ModelMemoryLimit ByteSize `json:"model_memory_limit,omitempty"` } func (s *AnalysisLimits) UnmarshalJSON(data []byte) error { @@ -89,16 +89,9 @@ func (s *AnalysisLimits) UnmarshalJSON(data []byte) error { } case "model_memory_limit": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.ModelMemoryLimit); err != nil { return fmt.Errorf("%s | %w", "ModelMemoryLimit", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.ModelMemoryLimit = &o } } @@ -111,3 +104,13 @@ func NewAnalysisLimits() *AnalysisLimits { return r } + +// true + +type AnalysisLimitsVariant interface { + AnalysisLimitsCaster() *AnalysisLimits +} + +func (s *AnalysisLimits) AnalysisLimitsCaster() *AnalysisLimits { + return s +} diff --git a/typedapi/types/analysismemorylimit.go b/typedapi/types/analysismemorylimit.go index b11d717296..8ba09cb379 100644 --- a/typedapi/types/analysismemorylimit.go +++ b/typedapi/types/analysismemorylimit.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AnalysisMemoryLimit type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Analysis.ts#L174-L179 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Analysis.ts#L174-L179 type AnalysisMemoryLimit struct { // ModelMemoryLimit Limits can be applied for the resources required to hold the mathematical // models in memory. These limits are approximate and can be set per job. They @@ -78,3 +78,13 @@ func NewAnalysisMemoryLimit() *AnalysisMemoryLimit { return r } + +// true + +type AnalysisMemoryLimitVariant interface { + AnalysisMemoryLimitCaster() *AnalysisMemoryLimit +} + +func (s *AnalysisMemoryLimit) AnalysisMemoryLimitCaster() *AnalysisMemoryLimit { + return s +} diff --git a/typedapi/types/analytics.go b/typedapi/types/analytics.go index 605437837a..1a0289c004 100644 --- a/typedapi/types/analytics.go +++ b/typedapi/types/analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Analytics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L328-L330 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L340-L342 type Analytics struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -97,3 +97,5 @@ func NewAnalytics() *Analytics { return r } + +// false diff --git a/typedapi/types/analyticscollection.go b/typedapi/types/analyticscollection.go index d898591f30..87c8452e58 100644 --- a/typedapi/types/analyticscollection.go +++ b/typedapi/types/analyticscollection.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // AnalyticsCollection type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/search_application/_types/BehavioralAnalytics.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/_types/BehavioralAnalytics.ts#L22-L27 type AnalyticsCollection struct { // EventDataStream Data stream for the collection. EventDataStream EventDataStream `json:"event_data_stream"` @@ -34,3 +34,5 @@ func NewAnalyticsCollection() *AnalyticsCollection { return r } + +// false diff --git a/typedapi/types/analyticsstatistics.go b/typedapi/types/analyticsstatistics.go index dda7dd8702..263777725e 100644 --- a/typedapi/types/analyticsstatistics.go +++ b/typedapi/types/analyticsstatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AnalyticsStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L59-L69 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L58-L68 type AnalyticsStatistics struct { BoxplotUsage int64 `json:"boxplot_usage"` CumulativeCardinalityUsage int64 `json:"cumulative_cardinality_usage"` @@ -205,3 +205,5 @@ func NewAnalyticsStatistics() *AnalyticsStatistics { return r } + +// false diff --git a/typedapi/types/analyzedetail.go b/typedapi/types/analyzedetail.go index 4209d560cc..81e47eef9e 100644 --- a/typedapi/types/analyzedetail.go +++ b/typedapi/types/analyzedetail.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AnalyzeDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/analyze/types.ts#L24-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/analyze/types.ts#L24-L30 type AnalyzeDetail struct { Analyzer *AnalyzerDetail `json:"analyzer,omitempty"` Charfilters []CharFilterDetail `json:"charfilters,omitempty"` @@ -100,3 +100,5 @@ func NewAnalyzeDetail() *AnalyzeDetail { return r } + +// false diff --git a/typedapi/types/analyzer.go b/typedapi/types/analyzer.go index 545fdc976c..41d9820584 100644 --- a/typedapi/types/analyzer.go +++ b/typedapi/types/analyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,7 +25,6 @@ package types // CustomAnalyzer // FingerprintAnalyzer // KeywordAnalyzer -// LanguageAnalyzer // NoriAnalyzer // PatternAnalyzer // SimpleAnalyzer @@ -73,5 +72,9 @@ package types // TurkishAnalyzer // ThaiAnalyzer // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L359-L413 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L427-L480 type Analyzer any + +type AnalyzerVariant interface { + AnalyzerCaster() *Analyzer +} diff --git a/typedapi/types/analyzerdetail.go b/typedapi/types/analyzerdetail.go index 071aa2cac7..1069a6ae62 100644 --- a/typedapi/types/analyzerdetail.go +++ b/typedapi/types/analyzerdetail.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AnalyzerDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/analyze/types.ts#L32-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/analyze/types.ts#L32-L35 type AnalyzerDetail struct { Name string `json:"name"` Tokens []ExplainAnalyzeToken `json:"tokens"` @@ -80,3 +80,5 @@ func NewAnalyzerDetail() *AnalyzerDetail { return r } + +// false diff --git a/typedapi/types/analyzetoken.go b/typedapi/types/analyzetoken.go index 2cdea590b6..082f5d2041 100644 --- a/typedapi/types/analyzetoken.go +++ b/typedapi/types/analyzetoken.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AnalyzeToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/analyze/types.ts#L37-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/analyze/types.ts#L37-L44 type AnalyzeToken struct { EndOffset int64 `json:"end_offset"` Position int64 `json:"position"` @@ -151,3 +151,5 @@ func NewAnalyzeToken() *AnalyzeToken { return r } + +// false diff --git a/typedapi/types/anomaly.go b/typedapi/types/anomaly.go index 9d1e692462..48a4a24e0f 100644 --- a/typedapi/types/anomaly.go +++ b/typedapi/types/anomaly.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Anomaly type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Anomaly.ts#L24-L121 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Anomaly.ts#L24-L121 type Anomaly struct { // Actual The actual value for the bucket. Actual []Float64 `json:"actual,omitempty"` @@ -388,3 +388,5 @@ func NewAnomaly() *Anomaly { return r } + +// false diff --git a/typedapi/types/anomalycause.go b/typedapi/types/anomalycause.go index 5b621a2053..a6c3d934a4 100644 --- a/typedapi/types/anomalycause.go +++ b/typedapi/types/anomalycause.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,22 +31,23 @@ import ( // AnomalyCause type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Anomaly.ts#L123-L138 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Anomaly.ts#L123-L139 type AnomalyCause struct { - Actual []Float64 `json:"actual"` - ByFieldName string `json:"by_field_name"` - ByFieldValue string `json:"by_field_value"` - CorrelatedByFieldValue string `json:"correlated_by_field_value"` - FieldName string `json:"field_name"` - Function string `json:"function"` - FunctionDescription string `json:"function_description"` - Influencers []Influence `json:"influencers"` - OverFieldName string `json:"over_field_name"` - OverFieldValue string `json:"over_field_value"` - PartitionFieldName string `json:"partition_field_name"` - PartitionFieldValue string `json:"partition_field_value"` + Actual []Float64 `json:"actual,omitempty"` + ByFieldName *string `json:"by_field_name,omitempty"` + ByFieldValue *string `json:"by_field_value,omitempty"` + CorrelatedByFieldValue *string `json:"correlated_by_field_value,omitempty"` + FieldName *string `json:"field_name,omitempty"` + Function *string `json:"function,omitempty"` + FunctionDescription *string `json:"function_description,omitempty"` + GeoResults *GeoResults `json:"geo_results,omitempty"` + Influencers []Influence `json:"influencers,omitempty"` + OverFieldName *string `json:"over_field_name,omitempty"` + OverFieldValue *string `json:"over_field_value,omitempty"` + PartitionFieldName *string `json:"partition_field_name,omitempty"` + PartitionFieldValue *string `json:"partition_field_value,omitempty"` Probability Float64 `json:"probability"` - Typical []Float64 `json:"typical"` + Typical []Float64 `json:"typical,omitempty"` } func (s *AnomalyCause) UnmarshalJSON(data []byte) error { @@ -84,7 +85,7 @@ func (s *AnomalyCause) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.ByFieldValue = o + s.ByFieldValue = &o case "correlated_by_field_value": var tmp json.RawMessage @@ -96,7 +97,7 @@ func (s *AnomalyCause) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.CorrelatedByFieldValue = o + s.CorrelatedByFieldValue = &o case "field_name": if err := dec.Decode(&s.FieldName); err != nil { @@ -113,7 +114,7 @@ func (s *AnomalyCause) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Function = o + s.Function = &o case "function_description": var tmp json.RawMessage @@ -125,7 +126,12 @@ func (s *AnomalyCause) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.FunctionDescription = o + s.FunctionDescription = &o + + case "geo_results": + if err := dec.Decode(&s.GeoResults); err != nil { + return fmt.Errorf("%s | %w", "GeoResults", err) + } case "influencers": if err := dec.Decode(&s.Influencers); err != nil { @@ -147,7 +153,7 @@ func (s *AnomalyCause) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.OverFieldValue = o + s.OverFieldValue = &o case "partition_field_name": var tmp json.RawMessage @@ -159,7 +165,7 @@ func (s *AnomalyCause) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.PartitionFieldName = o + s.PartitionFieldName = &o case "partition_field_value": var tmp json.RawMessage @@ -171,7 +177,7 @@ func (s *AnomalyCause) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.PartitionFieldValue = o + s.PartitionFieldValue = &o case "probability": var tmp any @@ -205,3 +211,5 @@ func NewAnomalyCause() *AnomalyCause { return r } + +// false diff --git a/typedapi/types/anomalydetectors.go b/typedapi/types/anomalydetectors.go index 9b6072eaf1..eb5035c4fe 100644 --- a/typedapi/types/anomalydetectors.go +++ b/typedapi/types/anomalydetectors.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AnomalyDetectors type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/info/types.ts#L44-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/info/types.ts#L46-L52 type AnomalyDetectors struct { CategorizationAnalyzer CategorizationAnalyzer `json:"categorization_analyzer"` CategorizationExamplesLimit int `json:"categorization_examples_limit"` @@ -162,3 +162,5 @@ func NewAnomalyDetectors() *AnomalyDetectors { return r } + +// false diff --git a/typedapi/types/anomalyexplanation.go b/typedapi/types/anomalyexplanation.go index 45457d1b34..11e6d05ab8 100644 --- a/typedapi/types/anomalyexplanation.go +++ b/typedapi/types/anomalyexplanation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AnomalyExplanation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Anomaly.ts#L156-L197 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Anomaly.ts#L157-L198 type AnomalyExplanation struct { // AnomalyCharacteristicsImpact Impact from the duration and magnitude of the detected anomaly relative to // the historical average. @@ -237,3 +237,5 @@ func NewAnomalyExplanation() *AnomalyExplanation { return r } + +// false diff --git a/typedapi/types/apikey.go b/typedapi/types/apikey.go index cde35ad550..3f39bceda4 100644 --- a/typedapi/types/apikey.go +++ b/typedapi/types/apikey.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -27,14 +27,23 @@ import ( "fmt" "io" "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/apikeytype" ) // ApiKey type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/ApiKey.ts#L26-L88 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/ApiKey.ts#L27-L113 type ApiKey struct { + // Access The access granted to cross-cluster API keys. + // The access is composed of permissions for cross cluster search and cross + // cluster replication. + // At least one of them must be specified. + // When specified, the new access assignment fully replaces the previously + // assigned access. + Access *Access `json:"access,omitempty"` // Creation Creation time for the API key in milliseconds. - Creation *int64 `json:"creation,omitempty"` + Creation int64 `json:"creation"` // Expiration Expiration time for the API key in milliseconds. Expiration *int64 `json:"expiration,omitempty"` // Id Id for the API key @@ -42,21 +51,23 @@ type ApiKey struct { // Invalidated Invalidation status for the API key. // If the key has been invalidated, it has a value of `true`. Otherwise, it is // `false`. - Invalidated *bool `json:"invalidated,omitempty"` + Invalidated bool `json:"invalidated"` + // Invalidation If the key has been invalidated, invalidation time in milliseconds. + Invalidation *int64 `json:"invalidation,omitempty"` // LimitedBy The owner user’s permissions associated with the API key. // It is a point-in-time snapshot captured at creation and subsequent updates. // An API key’s effective permissions are an intersection of its assigned // privileges and the owner user’s permissions. LimitedBy []map[string]RoleDescriptor `json:"limited_by,omitempty"` // Metadata Metadata of the API key - Metadata Metadata `json:"metadata,omitempty"` + Metadata Metadata `json:"metadata"` // Name Name of the API key. Name string `json:"name"` // ProfileUid The profile uid for the API key owner principal, if requested and if it // exists ProfileUid *string `json:"profile_uid,omitempty"` // Realm Realm name of the principal for which this API key was created. - Realm *string `json:"realm,omitempty"` + Realm string `json:"realm"` // RealmType Realm type of the principal for which this API key was created RealmType *string `json:"realm_type,omitempty"` // RoleDescriptors The role descriptors assigned to this API key when it was created or last @@ -64,9 +75,13 @@ type ApiKey struct { // An empty role descriptor means the API key inherits the owner user’s // permissions. RoleDescriptors map[string]RoleDescriptor `json:"role_descriptors,omitempty"` - Sort_ []FieldValue `json:"_sort,omitempty"` + // Sort_ Sorting values when using the `sort` parameter with the + // `security.query_api_keys` API. + Sort_ []FieldValue `json:"_sort,omitempty"` + // Type The type of the API key (e.g. `rest` or `cross_cluster`). + Type apikeytype.ApiKeyType `json:"type"` // Username Principal for which this API key was created - Username *string `json:"username,omitempty"` + Username string `json:"username"` } func (s *ApiKey) UnmarshalJSON(data []byte) error { @@ -84,34 +99,19 @@ func (s *ApiKey) UnmarshalJSON(data []byte) error { switch t { + case "access": + if err := dec.Decode(&s.Access); err != nil { + return fmt.Errorf("%s | %w", "Access", err) + } + case "creation": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Creation", err) - } - s.Creation = &value - case float64: - f := int64(v) - s.Creation = &f + if err := dec.Decode(&s.Creation); err != nil { + return fmt.Errorf("%s | %w", "Creation", err) } case "expiration": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Expiration", err) - } - s.Expiration = &value - case float64: - f := int64(v) - s.Expiration = &f + if err := dec.Decode(&s.Expiration); err != nil { + return fmt.Errorf("%s | %w", "Expiration", err) } case "id": @@ -128,9 +128,14 @@ func (s *ApiKey) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "Invalidated", err) } - s.Invalidated = &value + s.Invalidated = value case bool: - s.Invalidated = &v + s.Invalidated = v + } + + case "invalidation": + if err := dec.Decode(&s.Invalidation); err != nil { + return fmt.Errorf("%s | %w", "Invalidation", err) } case "limited_by": @@ -170,7 +175,7 @@ func (s *ApiKey) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Realm = &o + s.Realm = o case "realm_type": var tmp json.RawMessage @@ -197,6 +202,11 @@ func (s *ApiKey) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Sort_", err) } + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + case "username": if err := dec.Decode(&s.Username); err != nil { return fmt.Errorf("%s | %w", "Username", err) @@ -210,8 +220,10 @@ func (s *ApiKey) UnmarshalJSON(data []byte) error { // NewApiKey returns a ApiKey. func NewApiKey() *ApiKey { r := &ApiKey{ - RoleDescriptors: make(map[string]RoleDescriptor, 0), + RoleDescriptors: make(map[string]RoleDescriptor), } return r } + +// false diff --git a/typedapi/types/apikeyaggregate.go b/typedapi/types/apikeyaggregate.go index 2b165682ef..2a971da200 100644 --- a/typedapi/types/apikeyaggregate.go +++ b/typedapi/types/apikeyaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -36,5 +36,5 @@ package types // DateRangeAggregate // CompositeAggregate // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/query_api_keys/types.ts#L122-L139 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/query_api_keys/types.ts#L122-L139 type ApiKeyAggregate any diff --git a/typedapi/types/apikeyaggregationcontainer.go b/typedapi/types/apikeyaggregationcontainer.go index 9812cd3cb3..954b438bcd 100644 --- a/typedapi/types/apikeyaggregationcontainer.go +++ b/typedapi/types/apikeyaggregationcontainer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,8 +30,9 @@ import ( // ApiKeyAggregationContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/query_api_keys/types.ts#L63-L120 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/query_api_keys/types.ts#L63-L120 type ApiKeyAggregationContainer struct { + AdditionalApiKeyAggregationContainerProperty map[string]json.RawMessage `json:"-"` // Aggregations Sub-aggregations for this aggregation. // Only applies to bucket aggregations. Aggregations map[string]ApiKeyAggregationContainer `json:"aggregations,omitempty"` @@ -139,16 +140,69 @@ func (s *ApiKeyAggregationContainer) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "ValueCount", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalApiKeyAggregationContainerProperty == nil { + s.AdditionalApiKeyAggregationContainerProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalApiKeyAggregationContainerProperty", err) + } + s.AdditionalApiKeyAggregationContainerProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s ApiKeyAggregationContainer) MarshalJSON() ([]byte, error) { + type opt ApiKeyAggregationContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalApiKeyAggregationContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalApiKeyAggregationContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewApiKeyAggregationContainer returns a ApiKeyAggregationContainer. func NewApiKeyAggregationContainer() *ApiKeyAggregationContainer { r := &ApiKeyAggregationContainer{ - Aggregations: make(map[string]ApiKeyAggregationContainer, 0), + AdditionalApiKeyAggregationContainerProperty: make(map[string]json.RawMessage), + Aggregations: make(map[string]ApiKeyAggregationContainer), } return r } + +// true + +type ApiKeyAggregationContainerVariant interface { + ApiKeyAggregationContainerCaster() *ApiKeyAggregationContainer +} + +func (s *ApiKeyAggregationContainer) ApiKeyAggregationContainerCaster() *ApiKeyAggregationContainer { + return s +} diff --git a/typedapi/types/apikeyauthorization.go b/typedapi/types/apikeyauthorization.go index 9ce52126cb..7827a0623e 100644 --- a/typedapi/types/apikeyauthorization.go +++ b/typedapi/types/apikeyauthorization.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ApiKeyAuthorization type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Authorization.ts#L20-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Authorization.ts#L20-L29 type ApiKeyAuthorization struct { // Id The identifier for the API key. Id string `json:"id"` @@ -89,3 +89,5 @@ func NewApiKeyAuthorization() *ApiKeyAuthorization { return r } + +// false diff --git a/typedapi/types/apikeyfiltersaggregation.go b/typedapi/types/apikeyfiltersaggregation.go index 355b977337..cde223b400 100644 --- a/typedapi/types/apikeyfiltersaggregation.go +++ b/typedapi/types/apikeyfiltersaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ApiKeyFiltersAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/query_api_keys/types.ts#L207-L227 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/query_api_keys/types.ts#L207-L227 type ApiKeyFiltersAggregation struct { // Filters Collection of queries from which to build buckets. Filters BucketsApiKeyQueryContainer `json:"filters,omitempty"` @@ -132,3 +132,13 @@ func NewApiKeyFiltersAggregation() *ApiKeyFiltersAggregation { return r } + +// true + +type ApiKeyFiltersAggregationVariant interface { + ApiKeyFiltersAggregationCaster() *ApiKeyFiltersAggregation +} + +func (s *ApiKeyFiltersAggregation) ApiKeyFiltersAggregationCaster() *ApiKeyFiltersAggregation { + return s +} diff --git a/typedapi/types/apikeyquerycontainer.go b/typedapi/types/apikeyquerycontainer.go index 893ffe9bc6..d86712af53 100644 --- a/typedapi/types/apikeyquerycontainer.go +++ b/typedapi/types/apikeyquerycontainer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,9 +30,10 @@ import ( // ApiKeyQueryContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/query_api_keys/types.ts#L141-L205 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/query_api_keys/types.ts#L141-L205 type ApiKeyQueryContainer struct { - // Bool matches documents matching boolean combinations of other queries. + AdditionalApiKeyQueryContainerProperty map[string]json.RawMessage `json:"-"` + // Bool Matches documents matching boolean combinations of other queries. Bool *BoolQuery `json:"bool,omitempty"` // Exists Returns documents that contain an indexed value for a field. Exists *ExistsQuery `json:"exists,omitempty"` @@ -159,20 +160,73 @@ func (s *ApiKeyQueryContainer) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Wildcard", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalApiKeyQueryContainerProperty == nil { + s.AdditionalApiKeyQueryContainerProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalApiKeyQueryContainerProperty", err) + } + s.AdditionalApiKeyQueryContainerProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s ApiKeyQueryContainer) MarshalJSON() ([]byte, error) { + type opt ApiKeyQueryContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalApiKeyQueryContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalApiKeyQueryContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewApiKeyQueryContainer returns a ApiKeyQueryContainer. func NewApiKeyQueryContainer() *ApiKeyQueryContainer { r := &ApiKeyQueryContainer{ - Match: make(map[string]MatchQuery, 0), - Prefix: make(map[string]PrefixQuery, 0), - Range: make(map[string]RangeQuery, 0), - Term: make(map[string]TermQuery, 0), - Wildcard: make(map[string]WildcardQuery, 0), + AdditionalApiKeyQueryContainerProperty: make(map[string]json.RawMessage), + Match: make(map[string]MatchQuery), + Prefix: make(map[string]PrefixQuery), + Range: make(map[string]RangeQuery), + Term: make(map[string]TermQuery), + Wildcard: make(map[string]WildcardQuery), } return r } + +// true + +type ApiKeyQueryContainerVariant interface { + ApiKeyQueryContainerCaster() *ApiKeyQueryContainer +} + +func (s *ApiKeyQueryContainer) ApiKeyQueryContainerCaster() *ApiKeyQueryContainer { + return s +} diff --git a/typedapi/types/appendprocessor.go b/typedapi/types/appendprocessor.go index 389b1f8e3d..034ae7fb57 100644 --- a/typedapi/types/appendprocessor.go +++ b/typedapi/types/appendprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AppendProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L324-L339 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L329-L344 type AppendProcessor struct { // AllowDuplicates If `false`, the processor does not append values already present in the // field. @@ -43,7 +43,7 @@ type AppendProcessor struct { // Supports template snippets. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // OnFailure Handle failures for the processor. @@ -102,16 +102,9 @@ func (s *AppendProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -171,3 +164,13 @@ func NewAppendProcessor() *AppendProcessor { return r } + +// true + +type AppendProcessorVariant interface { + AppendProcessorCaster() *AppendProcessor +} + +func (s *AppendProcessor) AppendProcessorCaster() *AppendProcessor { + return s +} diff --git a/typedapi/types/applicationglobaluserprivileges.go b/typedapi/types/applicationglobaluserprivileges.go index 0c69fae082..d4e1d63a01 100644 --- a/typedapi/types/applicationglobaluserprivileges.go +++ b/typedapi/types/applicationglobaluserprivileges.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ApplicationGlobalUserPrivileges type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Privileges.ts#L372-L374 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L434-L436 type ApplicationGlobalUserPrivileges struct { Manage ManageUserPrivileges `json:"manage"` } @@ -33,3 +33,13 @@ func NewApplicationGlobalUserPrivileges() *ApplicationGlobalUserPrivileges { return r } + +// true + +type ApplicationGlobalUserPrivilegesVariant interface { + ApplicationGlobalUserPrivilegesCaster() *ApplicationGlobalUserPrivileges +} + +func (s *ApplicationGlobalUserPrivileges) ApplicationGlobalUserPrivilegesCaster() *ApplicationGlobalUserPrivileges { + return s +} diff --git a/typedapi/types/applicationprivileges.go b/typedapi/types/applicationprivileges.go index db1191567b..488c9c034e 100644 --- a/typedapi/types/applicationprivileges.go +++ b/typedapi/types/applicationprivileges.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ApplicationPrivileges type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Privileges.ts#L27-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L27-L40 type ApplicationPrivileges struct { // Application The name of the application to which this entry applies. Application string `json:"application"` @@ -90,3 +90,13 @@ func NewApplicationPrivileges() *ApplicationPrivileges { return r } + +// true + +type ApplicationPrivilegesVariant interface { + ApplicationPrivilegesCaster() *ApplicationPrivileges +} + +func (s *ApplicationPrivileges) ApplicationPrivilegesCaster() *ApplicationPrivileges { + return s +} diff --git a/typedapi/types/applicationprivilegescheck.go b/typedapi/types/applicationprivilegescheck.go index e04c4e32fd..5a9a95aeba 100644 --- a/typedapi/types/applicationprivilegescheck.go +++ b/typedapi/types/applicationprivilegescheck.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,15 +31,15 @@ import ( // ApplicationPrivilegesCheck type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/has_privileges/types.ts#L24-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/has_privileges/types.ts#L24-L32 type ApplicationPrivilegesCheck struct { // Application The name of the application. Application string `json:"application"` // Privileges A list of the privileges that you want to check for the specified resources. - // May be either application privilege names, or the names of actions that are + // It may be either application privilege names or the names of actions that are // granted by those privileges Privileges []string `json:"privileges"` - // Resources A list of resource names against which the privileges should be checked + // Resources A list of resource names against which the privileges should be checked. Resources []string `json:"resources"` } @@ -91,3 +91,13 @@ func NewApplicationPrivilegesCheck() *ApplicationPrivilegesCheck { return r } + +// true + +type ApplicationPrivilegesCheckVariant interface { + ApplicationPrivilegesCheckCaster() *ApplicationPrivilegesCheck +} + +func (s *ApplicationPrivilegesCheck) ApplicationPrivilegesCheckCaster() *ApplicationPrivilegesCheck { + return s +} diff --git a/typedapi/types/applicationsprivileges.go b/typedapi/types/applicationsprivileges.go index 7df1ebcf5d..680034e862 100644 --- a/typedapi/types/applicationsprivileges.go +++ b/typedapi/types/applicationsprivileges.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ApplicationsPrivileges type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/has_privileges/types.ts#L46-L46 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/has_privileges/types.ts#L47-L47 type ApplicationsPrivileges map[string]ResourcePrivileges diff --git a/typedapi/types/arabicanalyzer.go b/typedapi/types/arabicanalyzer.go index 0e9dc5a1a0..f6d0bb5b05 100644 --- a/typedapi/types/arabicanalyzer.go +++ b/typedapi/types/arabicanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ArabicAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L61-L66 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L72-L77 type ArabicAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewArabicAnalyzer() *ArabicAnalyzer { return r } + +// true + +type ArabicAnalyzerVariant interface { + ArabicAnalyzerCaster() *ArabicAnalyzer +} + +func (s *ArabicAnalyzer) ArabicAnalyzerCaster() *ArabicAnalyzer { + return s +} diff --git a/typedapi/types/archive.go b/typedapi/types/archive.go index 5bf4ae0d9d..d0532684f6 100644 --- a/typedapi/types/archive.go +++ b/typedapi/types/archive.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Archive type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L46-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L45-L47 type Archive struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -107,3 +107,5 @@ func NewArchive() *Archive { return r } + +// false diff --git a/typedapi/types/armeniananalyzer.go b/typedapi/types/armeniananalyzer.go index 3821fd2308..16c9a27270 100644 --- a/typedapi/types/armeniananalyzer.go +++ b/typedapi/types/armeniananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ArmenianAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L68-L73 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L79-L84 type ArmenianAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewArmenianAnalyzer() *ArmenianAnalyzer { return r } + +// true + +type ArmenianAnalyzerVariant interface { + ArmenianAnalyzerCaster() *ArmenianAnalyzer +} + +func (s *ArmenianAnalyzer) ArmenianAnalyzerCaster() *ArmenianAnalyzer { + return s +} diff --git a/typedapi/types/arraycomparecondition.go b/typedapi/types/arraycomparecondition.go index 4c5691eb8e..64b9ed05e6 100644 --- a/typedapi/types/arraycomparecondition.go +++ b/typedapi/types/arraycomparecondition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // ArrayCompareCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Conditions.ts#L32-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Conditions.ts#L32-L39 type ArrayCompareCondition struct { ArrayCompareCondition map[conditionop.ConditionOp]ArrayCompareOpParams `json:"-"` Path string `json:"path"` @@ -121,8 +121,18 @@ func (s ArrayCompareCondition) MarshalJSON() ([]byte, error) { // NewArrayCompareCondition returns a ArrayCompareCondition. func NewArrayCompareCondition() *ArrayCompareCondition { r := &ArrayCompareCondition{ - ArrayCompareCondition: make(map[conditionop.ConditionOp]ArrayCompareOpParams, 0), + ArrayCompareCondition: make(map[conditionop.ConditionOp]ArrayCompareOpParams), } return r } + +// true + +type ArrayCompareConditionVariant interface { + ArrayCompareConditionCaster() *ArrayCompareCondition +} + +func (s *ArrayCompareCondition) ArrayCompareConditionCaster() *ArrayCompareCondition { + return s +} diff --git a/typedapi/types/arraycompareopparams.go b/typedapi/types/arraycompareopparams.go index c2ca5f0708..c46a61e300 100644 --- a/typedapi/types/arraycompareopparams.go +++ b/typedapi/types/arraycompareopparams.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // ArrayCompareOpParams type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Conditions.ts#L27-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Conditions.ts#L27-L30 type ArrayCompareOpParams struct { Quantifier quantifier.Quantifier `json:"quantifier"` Value FieldValue `json:"value"` @@ -74,3 +74,13 @@ func NewArrayCompareOpParams() *ArrayCompareOpParams { return r } + +// true + +type ArrayCompareOpParamsVariant interface { + ArrayCompareOpParamsCaster() *ArrayCompareOpParams +} + +func (s *ArrayCompareOpParams) ArrayCompareOpParamsCaster() *ArrayCompareOpParams { + return s +} diff --git a/typedapi/types/arraypercentilesitem.go b/typedapi/types/arraypercentilesitem.go index 02901a5b20..a56d78c493 100644 --- a/typedapi/types/arraypercentilesitem.go +++ b/typedapi/types/arraypercentilesitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ArrayPercentilesItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L162-L166 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L162-L166 type ArrayPercentilesItem struct { Key string `json:"key"` Value *Float64 `json:"value,omitempty"` @@ -93,3 +93,5 @@ func NewArrayPercentilesItem() *ArrayPercentilesItem { return r } + +// false diff --git a/typedapi/types/asciifoldingtokenfilter.go b/typedapi/types/asciifoldingtokenfilter.go index 118934edb8..8a24dbcc84 100644 --- a/typedapi/types/asciifoldingtokenfilter.go +++ b/typedapi/types/asciifoldingtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // AsciiFoldingTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L169-L172 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L169-L172 type AsciiFoldingTokenFilter struct { PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` Type string `json:"type,omitempty"` @@ -92,3 +92,13 @@ func NewAsciiFoldingTokenFilter() *AsciiFoldingTokenFilter { return r } + +// true + +type AsciiFoldingTokenFilterVariant interface { + AsciiFoldingTokenFilterCaster() *AsciiFoldingTokenFilter +} + +func (s *AsciiFoldingTokenFilter) AsciiFoldingTokenFilterCaster() *AsciiFoldingTokenFilter { + return s +} diff --git a/typedapi/types/asyncsearch.go b/typedapi/types/asyncsearch.go index 01d245c6de..f29fef5a75 100644 --- a/typedapi/types/asyncsearch.go +++ b/typedapi/types/asyncsearch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,10 +32,10 @@ import ( // AsyncSearch type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/async_search/_types/AsyncSearch.ts#L30-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/async_search/_types/AsyncSearch.ts#L30-L56 type AsyncSearch struct { // Aggregations Partial aggregations results, coming from the shards that have already - // completed the execution of the query. + // completed running the query. Aggregations map[string]Aggregate `json:"aggregations,omitempty"` Clusters_ *ClusterStatistics `json:"_clusters,omitempty"` Fields map[string]json.RawMessage `json:"fields,omitempty"` @@ -789,10 +789,12 @@ func (s *AsyncSearch) UnmarshalJSON(data []byte) error { // NewAsyncSearch returns a AsyncSearch. func NewAsyncSearch() *AsyncSearch { r := &AsyncSearch{ - Aggregations: make(map[string]Aggregate, 0), - Fields: make(map[string]json.RawMessage, 0), - Suggest: make(map[string][]Suggest, 0), + Aggregations: make(map[string]Aggregate), + Fields: make(map[string]json.RawMessage), + Suggest: make(map[string][]Suggest), } return r } + +// false diff --git a/typedapi/types/attachmentprocessor.go b/typedapi/types/attachmentprocessor.go index abbb222763..806c5dfdd1 100644 --- a/typedapi/types/attachmentprocessor.go +++ b/typedapi/types/attachmentprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AttachmentProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L341-L382 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L346-L387 type AttachmentProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type AttachmentProcessor struct { // Field The field to get the base64 encoded field from. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and field does not exist, the processor quietly exits without @@ -103,16 +103,9 @@ func (s *AttachmentProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -226,3 +219,13 @@ func NewAttachmentProcessor() *AttachmentProcessor { return r } + +// true + +type AttachmentProcessorVariant interface { + AttachmentProcessorCaster() *AttachmentProcessor +} + +func (s *AttachmentProcessor) AttachmentProcessorCaster() *AttachmentProcessor { + return s +} diff --git a/typedapi/types/audit.go b/typedapi/types/audit.go index b26db96cd8..78a997d40f 100644 --- a/typedapi/types/audit.go +++ b/typedapi/types/audit.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Audit type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L71-L73 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L70-L72 type Audit struct { Enabled bool `json:"enabled"` Outputs []string `json:"outputs,omitempty"` @@ -82,3 +82,5 @@ func NewAudit() *Audit { return r } + +// false diff --git a/typedapi/types/nodereloaderror.go b/typedapi/types/authenticateapikey.go similarity index 63% rename from typedapi/types/nodereloaderror.go rename to typedapi/types/authenticateapikey.go index 511680e00d..d981ae132e 100644 --- a/typedapi/types/nodereloaderror.go +++ b/typedapi/types/authenticateapikey.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -28,15 +28,15 @@ import ( "io" ) -// NodeReloadError type. +// AuthenticateApiKey type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/NodeReloadResult.ts#L24-L27 -type NodeReloadError struct { - Name string `json:"name"` - ReloadException *ErrorCause `json:"reload_exception,omitempty"` +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/authenticate/SecurityAuthenticateResponse.ts#L44-L47 +type AuthenticateApiKey struct { + Id string `json:"id"` + Name *string `json:"name,omitempty"` } -func (s *NodeReloadError) UnmarshalJSON(data []byte) error { +func (s *AuthenticateApiKey) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -51,24 +51,26 @@ func (s *NodeReloadError) UnmarshalJSON(data []byte) error { switch t { + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + case "name": if err := dec.Decode(&s.Name); err != nil { return fmt.Errorf("%s | %w", "Name", err) } - case "reload_exception": - if err := dec.Decode(&s.ReloadException); err != nil { - return fmt.Errorf("%s | %w", "ReloadException", err) - } - } } return nil } -// NewNodeReloadError returns a NodeReloadError. -func NewNodeReloadError() *NodeReloadError { - r := &NodeReloadError{} +// NewAuthenticateApiKey returns a AuthenticateApiKey. +func NewAuthenticateApiKey() *AuthenticateApiKey { + r := &AuthenticateApiKey{} return r } + +// false diff --git a/typedapi/types/authenticateduser.go b/typedapi/types/authenticateduser.go index 45cb1562b8..2415c9f0e7 100644 --- a/typedapi/types/authenticateduser.go +++ b/typedapi/types/authenticateduser.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AuthenticatedUser type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_token/types.ts#L40-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_token/types.ts#L60-L65 type AuthenticatedUser struct { AuthenticationProvider *AuthenticationProvider `json:"authentication_provider,omitempty"` AuthenticationRealm UserRealm `json:"authentication_realm"` @@ -150,3 +150,5 @@ func NewAuthenticatedUser() *AuthenticatedUser { return r } + +// false diff --git a/typedapi/types/authenticatetoken.go b/typedapi/types/authenticatetoken.go index b4e78b01f0..e1f4f5c1e9 100644 --- a/typedapi/types/authenticatetoken.go +++ b/typedapi/types/authenticatetoken.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AuthenticateToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/authenticate/types.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/authenticate/types.ts#L22-L29 type AuthenticateToken struct { Name string `json:"name"` Type *string `json:"type,omitempty"` @@ -80,3 +80,5 @@ func NewAuthenticateToken() *AuthenticateToken { return r } + +// false diff --git a/typedapi/types/authentication.go b/typedapi/types/authentication.go new file mode 100644 index 0000000000..694cab6176 --- /dev/null +++ b/typedapi/types/authentication.go @@ -0,0 +1,177 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Authentication type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/delegate_pki/SecurityDelegatePkiResponse.ts#L43-L55 +type Authentication struct { + ApiKey map[string]string `json:"api_key,omitempty"` + AuthenticationRealm AuthenticationRealm `json:"authentication_realm"` + AuthenticationType string `json:"authentication_type"` + Email *string `json:"email,omitempty"` + Enabled bool `json:"enabled"` + FullName *string `json:"full_name,omitempty"` + LookupRealm AuthenticationRealm `json:"lookup_realm"` + Metadata Metadata `json:"metadata"` + Roles []string `json:"roles"` + Token map[string]string `json:"token,omitempty"` + Username string `json:"username"` +} + +func (s *Authentication) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + if s.ApiKey == nil { + s.ApiKey = make(map[string]string, 0) + } + if err := dec.Decode(&s.ApiKey); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + + case "authentication_realm": + if err := dec.Decode(&s.AuthenticationRealm); err != nil { + return fmt.Errorf("%s | %w", "AuthenticationRealm", err) + } + + case "authentication_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AuthenticationType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AuthenticationType = o + + case "email": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Email", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Email = &o + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "full_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FullName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FullName = &o + + case "lookup_realm": + if err := dec.Decode(&s.LookupRealm); err != nil { + return fmt.Errorf("%s | %w", "LookupRealm", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "token": + if s.Token == nil { + s.Token = make(map[string]string, 0) + } + if err := dec.Decode(&s.Token); err != nil { + return fmt.Errorf("%s | %w", "Token", err) + } + + case "username": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Username", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Username = o + + } + } + return nil +} + +// NewAuthentication returns a Authentication. +func NewAuthentication() *Authentication { + r := &Authentication{ + ApiKey: make(map[string]string), + Token: make(map[string]string), + } + + return r +} + +// false diff --git a/typedapi/types/authenticationprovider.go b/typedapi/types/authenticationprovider.go index 7d9ef6ad6f..0a56a6f4a1 100644 --- a/typedapi/types/authenticationprovider.go +++ b/typedapi/types/authenticationprovider.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AuthenticationProvider type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_token/types.ts#L35-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_token/types.ts#L55-L58 type AuthenticationProvider struct { Name string `json:"name"` Type string `json:"type"` @@ -80,3 +80,5 @@ func NewAuthenticationProvider() *AuthenticationProvider { return r } + +// false diff --git a/typedapi/types/authenticationrealm.go b/typedapi/types/authenticationrealm.go new file mode 100644 index 0000000000..d28298c3a0 --- /dev/null +++ b/typedapi/types/authenticationrealm.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AuthenticationRealm type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/delegate_pki/SecurityDelegatePkiResponse.ts#L57-L61 +type AuthenticationRealm struct { + Domain *string `json:"domain,omitempty"` + Name string `json:"name"` + Type string `json:"type"` +} + +func (s *AuthenticationRealm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "domain": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Domain", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Domain = &o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewAuthenticationRealm returns a AuthenticationRealm. +func NewAuthenticationRealm() *AuthenticationRealm { + r := &AuthenticationRealm{} + + return r +} + +// false diff --git a/typedapi/types/autodatehistogramaggregate.go b/typedapi/types/autodatehistogramaggregate.go index 94d075bfca..12493718ba 100644 --- a/typedapi/types/autodatehistogramaggregate.go +++ b/typedapi/types/autodatehistogramaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // AutoDateHistogramAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L393-L400 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L393-L400 type AutoDateHistogramAggregate struct { Buckets BucketsDateHistogramBucket `json:"buckets"` Interval string `json:"interval"` @@ -94,3 +94,5 @@ func NewAutoDateHistogramAggregate() *AutoDateHistogramAggregate { return r } + +// false diff --git a/typedapi/types/autodatehistogramaggregation.go b/typedapi/types/autodatehistogramaggregation.go index 9a1b7fb329..fdab0289f1 100644 --- a/typedapi/types/autodatehistogramaggregation.go +++ b/typedapi/types/autodatehistogramaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // AutoDateHistogramAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L72-L110 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L72-L110 type AutoDateHistogramAggregation struct { // Buckets The target number of buckets. Buckets *int `json:"buckets,omitempty"` @@ -154,8 +154,18 @@ func (s *AutoDateHistogramAggregation) UnmarshalJSON(data []byte) error { // NewAutoDateHistogramAggregation returns a AutoDateHistogramAggregation. func NewAutoDateHistogramAggregation() *AutoDateHistogramAggregation { r := &AutoDateHistogramAggregation{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type AutoDateHistogramAggregationVariant interface { + AutoDateHistogramAggregationCaster() *AutoDateHistogramAggregation +} + +func (s *AutoDateHistogramAggregation) AutoDateHistogramAggregationCaster() *AutoDateHistogramAggregation { + return s +} diff --git a/typedapi/types/autofollowedcluster.go b/typedapi/types/autofollowedcluster.go index 7340561533..fe8b6e6f67 100644 --- a/typedapi/types/autofollowedcluster.go +++ b/typedapi/types/autofollowedcluster.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // AutoFollowedCluster type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/stats/types.ts.ts#L26-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/stats/types.ts.ts#L26-L30 type AutoFollowedCluster struct { ClusterName string `json:"cluster_name"` LastSeenMetadataVersion int64 `json:"last_seen_metadata_version"` @@ -78,3 +78,5 @@ func NewAutoFollowedCluster() *AutoFollowedCluster { return r } + +// false diff --git a/typedapi/types/autofollowpattern.go b/typedapi/types/autofollowpattern.go index e961a42d84..f4d87c5b9b 100644 --- a/typedapi/types/autofollowpattern.go +++ b/typedapi/types/autofollowpattern.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // AutoFollowPattern type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/get_auto_follow_pattern/types.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/get_auto_follow_pattern/types.ts#L23-L26 type AutoFollowPattern struct { Name string `json:"name"` Pattern AutoFollowPatternSummary `json:"pattern"` @@ -72,3 +72,5 @@ func NewAutoFollowPattern() *AutoFollowPattern { return r } + +// false diff --git a/typedapi/types/autofollowpatternsummary.go b/typedapi/types/autofollowpatternsummary.go index 970830636c..7b6ac4bdcc 100644 --- a/typedapi/types/autofollowpatternsummary.go +++ b/typedapi/types/autofollowpatternsummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AutoFollowPatternSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/get_auto_follow_pattern/types.ts#L28-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/get_auto_follow_pattern/types.ts#L28-L52 type AutoFollowPatternSummary struct { Active bool `json:"active"` // FollowIndexPattern The name of follower index. @@ -131,3 +131,5 @@ func NewAutoFollowPatternSummary() *AutoFollowPatternSummary { return r } + +// false diff --git a/typedapi/types/autofollowstats.go b/typedapi/types/autofollowstats.go index 2d1712f5c9..c7ee64a666 100644 --- a/typedapi/types/autofollowstats.go +++ b/typedapi/types/autofollowstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,13 +31,22 @@ import ( // AutoFollowStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/stats/types.ts.ts#L32-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/stats/types.ts.ts#L32-L47 type AutoFollowStats struct { - AutoFollowedClusters []AutoFollowedCluster `json:"auto_followed_clusters"` - NumberOfFailedFollowIndices int64 `json:"number_of_failed_follow_indices"` - NumberOfFailedRemoteClusterStateRequests int64 `json:"number_of_failed_remote_cluster_state_requests"` - NumberOfSuccessfulFollowIndices int64 `json:"number_of_successful_follow_indices"` - RecentAutoFollowErrors []ErrorCause `json:"recent_auto_follow_errors"` + AutoFollowedClusters []AutoFollowedCluster `json:"auto_followed_clusters"` + // NumberOfFailedFollowIndices The number of indices that the auto-follow coordinator failed to + // automatically follow. + // The causes of recent failures are captured in the logs of the elected master + // node and in the `auto_follow_stats.recent_auto_follow_errors` field. + NumberOfFailedFollowIndices int64 `json:"number_of_failed_follow_indices"` + // NumberOfFailedRemoteClusterStateRequests The number of times that the auto-follow coordinator failed to retrieve the + // cluster state from a remote cluster registered in a collection of auto-follow + // patterns. + NumberOfFailedRemoteClusterStateRequests int64 `json:"number_of_failed_remote_cluster_state_requests"` + // NumberOfSuccessfulFollowIndices The number of indices that the auto-follow coordinator successfully followed. + NumberOfSuccessfulFollowIndices int64 `json:"number_of_successful_follow_indices"` + // RecentAutoFollowErrors An array of objects representing failures by the auto-follow coordinator. + RecentAutoFollowErrors []ErrorCause `json:"recent_auto_follow_errors"` } func (s *AutoFollowStats) UnmarshalJSON(data []byte) error { @@ -121,3 +130,5 @@ func NewAutoFollowStats() *AutoFollowStats { return r } + +// false diff --git a/typedapi/types/autoscalingcapacity.go b/typedapi/types/autoscalingcapacity.go index d236feeca5..bb1bc3d878 100644 --- a/typedapi/types/autoscalingcapacity.go +++ b/typedapi/types/autoscalingcapacity.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // AutoscalingCapacity type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L38-L41 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L38-L41 type AutoscalingCapacity struct { Node AutoscalingResources `json:"node"` Total AutoscalingResources `json:"total"` @@ -34,3 +34,5 @@ func NewAutoscalingCapacity() *AutoscalingCapacity { return r } + +// false diff --git a/typedapi/types/autoscalingdecider.go b/typedapi/types/autoscalingdecider.go index f42b43231a..3fde798dbb 100644 --- a/typedapi/types/autoscalingdecider.go +++ b/typedapi/types/autoscalingdecider.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AutoscalingDecider type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L52-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L52-L56 type AutoscalingDecider struct { ReasonDetails json.RawMessage `json:"reason_details,omitempty"` ReasonSummary *string `json:"reason_summary,omitempty"` @@ -86,3 +86,5 @@ func NewAutoscalingDecider() *AutoscalingDecider { return r } + +// false diff --git a/typedapi/types/autoscalingdeciders.go b/typedapi/types/autoscalingdeciders.go index 235df68074..0421792322 100644 --- a/typedapi/types/autoscalingdeciders.go +++ b/typedapi/types/autoscalingdeciders.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // AutoscalingDeciders type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L31-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L31-L36 type AutoscalingDeciders struct { CurrentCapacity AutoscalingCapacity `json:"current_capacity"` CurrentNodes []AutoscalingNode `json:"current_nodes"` @@ -33,8 +33,10 @@ type AutoscalingDeciders struct { // NewAutoscalingDeciders returns a AutoscalingDeciders. func NewAutoscalingDeciders() *AutoscalingDeciders { r := &AutoscalingDeciders{ - Deciders: make(map[string]AutoscalingDecider, 0), + Deciders: make(map[string]AutoscalingDecider), } return r } + +// false diff --git a/typedapi/types/autoscalingnode.go b/typedapi/types/autoscalingnode.go index b60126ef82..d4e617171a 100644 --- a/typedapi/types/autoscalingnode.go +++ b/typedapi/types/autoscalingnode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // AutoscalingNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L48-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L48-L50 type AutoscalingNode struct { Name string `json:"name"` } @@ -66,3 +66,5 @@ func NewAutoscalingNode() *AutoscalingNode { return r } + +// false diff --git a/typedapi/types/autoscalingpolicy.go b/typedapi/types/autoscalingpolicy.go index 63627ab2a9..56c47223b6 100644 --- a/typedapi/types/autoscalingpolicy.go +++ b/typedapi/types/autoscalingpolicy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // AutoscalingPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/autoscaling/_types/AutoscalingPolicy.ts#L23-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/autoscaling/_types/AutoscalingPolicy.ts#L23-L30 type AutoscalingPolicy struct { // Deciders Decider settings. Deciders map[string]json.RawMessage `json:"deciders"` @@ -36,8 +36,18 @@ type AutoscalingPolicy struct { // NewAutoscalingPolicy returns a AutoscalingPolicy. func NewAutoscalingPolicy() *AutoscalingPolicy { r := &AutoscalingPolicy{ - Deciders: make(map[string]json.RawMessage, 0), + Deciders: make(map[string]json.RawMessage), } return r } + +// true + +type AutoscalingPolicyVariant interface { + AutoscalingPolicyCaster() *AutoscalingPolicy +} + +func (s *AutoscalingPolicy) AutoscalingPolicyCaster() *AutoscalingPolicy { + return s +} diff --git a/typedapi/types/autoscalingresources.go b/typedapi/types/autoscalingresources.go index d9bf913475..37e162987f 100644 --- a/typedapi/types/autoscalingresources.go +++ b/typedapi/types/autoscalingresources.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AutoscalingResources type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L43-L46 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L43-L46 type AutoscalingResources struct { Memory int `json:"memory"` Storage int `json:"storage"` @@ -95,3 +95,5 @@ func NewAutoscalingResources() *AutoscalingResources { return r } + +// false diff --git a/typedapi/types/averageaggregation.go b/typedapi/types/averageaggregation.go index 702638365d..73f8721a74 100644 --- a/typedapi/types/averageaggregation.go +++ b/typedapi/types/averageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L55-L55 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L55-L55 type AverageAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -95,3 +95,13 @@ func NewAverageAggregation() *AverageAggregation { return r } + +// true + +type AverageAggregationVariant interface { + AverageAggregationCaster() *AverageAggregation +} + +func (s *AverageAggregation) AverageAggregationCaster() *AverageAggregation { + return s +} diff --git a/typedapi/types/averagebucketaggregation.go b/typedapi/types/averagebucketaggregation.go index 3f8d6969b5..4a8ddd49cc 100644 --- a/typedapi/types/averagebucketaggregation.go +++ b/typedapi/types/averagebucketaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // AverageBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L78-L81 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L78-L81 type AverageBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -93,3 +93,13 @@ func NewAverageBucketAggregation() *AverageBucketAggregation { return r } + +// true + +type AverageBucketAggregationVariant interface { + AverageBucketAggregationCaster() *AverageBucketAggregation +} + +func (s *AverageBucketAggregation) AverageBucketAggregationCaster() *AverageBucketAggregation { + return s +} diff --git a/typedapi/types/avgaggregate.go b/typedapi/types/avgaggregate.go index 38c68d85bc..8ff65399bf 100644 --- a/typedapi/types/avgaggregate.go +++ b/typedapi/types/avgaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // AvgAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L218-L222 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L218-L222 type AvgAggregate struct { Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to @@ -89,3 +89,5 @@ func NewAvgAggregate() *AvgAggregate { return r } + +// false diff --git a/typedapi/types/azurerepository.go b/typedapi/types/azurerepository.go index e90a1b11fd..8d5fc56e57 100644 --- a/typedapi/types/azurerepository.go +++ b/typedapi/types/azurerepository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,11 +30,13 @@ import ( // AzureRepository type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotRepository.ts#L40-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotRepository.ts#L40-L50 type AzureRepository struct { - Settings AzureRepositorySettings `json:"settings"` - Type string `json:"type,omitempty"` - Uuid *string `json:"uuid,omitempty"` + // Settings The repository settings. + Settings *AzureRepositorySettings `json:"settings,omitempty"` + // Type The Azure repository type. + Type string `json:"type,omitempty"` + Uuid *string `json:"uuid,omitempty"` } func (s *AzureRepository) UnmarshalJSON(data []byte) error { @@ -92,3 +94,13 @@ func NewAzureRepository() *AzureRepository { return r } + +// true + +type AzureRepositoryVariant interface { + AzureRepositoryCaster() *AzureRepository +} + +func (s *AzureRepository) AzureRepositoryCaster() *AzureRepository { + return s +} diff --git a/typedapi/types/azurerepositorysettings.go b/typedapi/types/azurerepositorysettings.go index 5ad464d326..c435572c45 100644 --- a/typedapi/types/azurerepositorysettings.go +++ b/typedapi/types/azurerepositorysettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,17 +31,74 @@ import ( // AzureRepositorySettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotRepository.ts#L77-L83 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotRepository.ts#L145-L196 type AzureRepositorySettings struct { - BasePath *string `json:"base_path,omitempty"` - ChunkSize ByteSize `json:"chunk_size,omitempty"` - Client *string `json:"client,omitempty"` - Compress *bool `json:"compress,omitempty"` - Container *string `json:"container,omitempty"` - LocationMode *string `json:"location_mode,omitempty"` - MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // BasePath The path to the repository data within the container. + // It defaults to the root directory. + // + // NOTE: Don't set `base_path` when configuring a snapshot repository for + // Elastic Cloud Enterprise. + // Elastic Cloud Enterprise automatically generates the `base_path` for each + // deployment so that multiple deployments can share the same bucket. + BasePath *string `json:"base_path,omitempty"` + // ChunkSize Big files can be broken down into multiple smaller blobs in the blob store + // during snapshotting. + // It is not recommended to change this value from its default unless there is + // an explicit reason for limiting the size of blobs in the repository. + // Setting a value lower than the default can result in an increased number of + // API calls to the blob store during snapshot create and restore operations + // compared to using the default value and thus make both operations slower and + // more costly. + // Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. + // The default varies by repository type. + ChunkSize ByteSize `json:"chunk_size,omitempty"` + // Client The name of the Azure repository client to use. + Client *string `json:"client,omitempty"` + // Compress When set to `true`, metadata files are stored in compressed format. + // This setting doesn't affect index files that are already compressed by + // default. + Compress *bool `json:"compress,omitempty"` + // Container The Azure container. + Container *string `json:"container,omitempty"` + // DeleteObjectsMaxSize The maxmimum batch size, between 1 and 256, used for `BlobBatch` requests. + // Defaults to 256 which is the maximum number supported by the Azure blob batch + // API. + DeleteObjectsMaxSize *int `json:"delete_objects_max_size,omitempty"` + // LocationMode Either `primary_only` or `secondary_only`. + // Note that if you set it to `secondary_only`, it will force `readonly` to + // `true`. + LocationMode *string `json:"location_mode,omitempty"` + // MaxConcurrentBatchDeletes The maximum number of concurrent batch delete requests that will be submitted + // for any individual bulk delete with `BlobBatch`. + // Note that the effective number of concurrent deletes is further limited by + // the Azure client connection and event loop thread limits. + // Defaults to 10, minimum is 1, maximum is 100. + MaxConcurrentBatchDeletes *int `json:"max_concurrent_batch_deletes,omitempty"` + // MaxRestoreBytesPerSec The maximum snapshot restore rate per node. + // It defaults to unlimited. + // Note that restores are also throttled through recovery settings. + MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // MaxSnapshotBytesPerSec The maximum snapshot creation rate per node. + // It defaults to 40mb per second. + // Note that if the recovery settings for managed services are set, then it + // defaults to unlimited, and the rate is additionally throttled through + // recovery settings. MaxSnapshotBytesPerSec ByteSize `json:"max_snapshot_bytes_per_sec,omitempty"` - Readonly *bool `json:"readonly,omitempty"` + // Readonly If `true`, the repository is read-only. + // The cluster can retrieve and restore snapshots from the repository but not + // write to the repository or create snapshots in it. + // + // Only a cluster with write access can create snapshots in the repository. + // All other clusters connected to the repository should have the `readonly` + // parameter set to `true`. + // If `false`, the cluster can write to the repository and create snapshots in + // it. + // + // IMPORTANT: If you register the same snapshot repository with multiple + // clusters, only one cluster should have write access to the repository. + // Having multiple clusters write to the repository at the same time risks + // corrupting the contents of the repository. + Readonly *bool `json:"readonly,omitempty"` } func (s *AzureRepositorySettings) UnmarshalJSON(data []byte) error { @@ -114,6 +171,22 @@ func (s *AzureRepositorySettings) UnmarshalJSON(data []byte) error { } s.Container = &o + case "delete_objects_max_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DeleteObjectsMaxSize", err) + } + s.DeleteObjectsMaxSize = &value + case float64: + f := int(v) + s.DeleteObjectsMaxSize = &f + } + case "location_mode": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -126,6 +199,22 @@ func (s *AzureRepositorySettings) UnmarshalJSON(data []byte) error { } s.LocationMode = &o + case "max_concurrent_batch_deletes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxConcurrentBatchDeletes", err) + } + s.MaxConcurrentBatchDeletes = &value + case float64: + f := int(v) + s.MaxConcurrentBatchDeletes = &f + } + case "max_restore_bytes_per_sec": if err := dec.Decode(&s.MaxRestoreBytesPerSec); err != nil { return fmt.Errorf("%s | %w", "MaxRestoreBytesPerSec", err) @@ -161,3 +250,13 @@ func NewAzureRepositorySettings() *AzureRepositorySettings { return r } + +// true + +type AzureRepositorySettingsVariant interface { + AzureRepositorySettingsCaster() *AzureRepositorySettings +} + +func (s *AzureRepositorySettings) AzureRepositorySettingsCaster() *AzureRepositorySettings { + return s +} diff --git a/typedapi/types/base.go b/typedapi/types/base.go index 3176bac36e..8629d95e08 100644 --- a/typedapi/types/base.go +++ b/typedapi/types/base.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Base type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L28-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L27-L30 type Base struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -91,3 +91,5 @@ func NewBase() *Base { return r } + +// false diff --git a/typedapi/types/basqueanalyzer.go b/typedapi/types/basqueanalyzer.go index c2f5234834..21347283f7 100644 --- a/typedapi/types/basqueanalyzer.go +++ b/typedapi/types/basqueanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BasqueAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L75-L80 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L86-L91 type BasqueAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewBasqueAnalyzer() *BasqueAnalyzer { return r } + +// true + +type BasqueAnalyzerVariant interface { + BasqueAnalyzerCaster() *BasqueAnalyzer +} + +func (s *BasqueAnalyzer) BasqueAnalyzerCaster() *BasqueAnalyzer { + return s +} diff --git a/typedapi/types/bengalianalyzer.go b/typedapi/types/bengalianalyzer.go index eacfad2c66..a31df7021f 100644 --- a/typedapi/types/bengalianalyzer.go +++ b/typedapi/types/bengalianalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BengaliAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L82-L87 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L93-L98 type BengaliAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewBengaliAnalyzer() *BengaliAnalyzer { return r } + +// true + +type BengaliAnalyzerVariant interface { + BengaliAnalyzerCaster() *BengaliAnalyzer +} + +func (s *BengaliAnalyzer) BengaliAnalyzerCaster() *BengaliAnalyzer { + return s +} diff --git a/typedapi/types/binaryproperty.go b/typedapi/types/binaryproperty.go index 0a63227385..09a3cc2550 100644 --- a/typedapi/types/binaryproperty.go +++ b/typedapi/types/binaryproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // BinaryProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L54-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L54-L56 type BinaryProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -41,10 +42,11 @@ type BinaryProperty struct { Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *BinaryProperty) UnmarshalJSON(data []byte) error { @@ -116,301 +118,313 @@ func (s *BinaryProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -459,301 +473,313 @@ func (s *BinaryProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -773,6 +799,11 @@ func (s *BinaryProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -787,15 +818,16 @@ func (s *BinaryProperty) UnmarshalJSON(data []byte) error { func (s BinaryProperty) MarshalJSON() ([]byte, error) { type innerBinaryProperty BinaryProperty tmp := innerBinaryProperty{ - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Properties: s.Properties, - Store: s.Store, - Type: s.Type, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "binary" @@ -806,10 +838,20 @@ func (s BinaryProperty) MarshalJSON() ([]byte, error) { // NewBinaryProperty returns a BinaryProperty. func NewBinaryProperty() *BinaryProperty { r := &BinaryProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type BinaryPropertyVariant interface { + BinaryPropertyCaster() *BinaryProperty +} + +func (s *BinaryProperty) BinaryPropertyCaster() *BinaryProperty { + return s +} diff --git a/typedapi/types/blobdetails.go b/typedapi/types/blobdetails.go new file mode 100644 index 0000000000..a04f3b44cb --- /dev/null +++ b/typedapi/types/blobdetails.go @@ -0,0 +1,178 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BlobDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L250-L284 +type BlobDetails struct { + // Name The name of the blob. + Name string `json:"name"` + // Overwritten Indicates whether the blob was overwritten while the read operations were + // ongoing. + // + // /** + Overwritten bool `json:"overwritten"` + ReadEarly bool `json:"read_early"` + // ReadEnd The position, in bytes, at which read operations completed. + ReadEnd int64 `json:"read_end"` + // ReadStart The position, in bytes, at which read operations started. + ReadStart int64 `json:"read_start"` + // Reads A description of every read operation performed on the blob. + Reads ReadBlobDetails `json:"reads"` + // Size The size of the blob. + Size ByteSize `json:"size"` + // SizeBytes The size of the blob in bytes. + SizeBytes int64 `json:"size_bytes"` +} + +func (s *BlobDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "overwritten": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Overwritten", err) + } + s.Overwritten = value + case bool: + s.Overwritten = v + } + + case "read_early": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ReadEarly", err) + } + s.ReadEarly = value + case bool: + s.ReadEarly = v + } + + case "read_end": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ReadEnd", err) + } + s.ReadEnd = value + case float64: + f := int64(v) + s.ReadEnd = f + } + + case "read_start": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ReadStart", err) + } + s.ReadStart = value + case float64: + f := int64(v) + s.ReadStart = f + } + + case "reads": + if err := dec.Decode(&s.Reads); err != nil { + return fmt.Errorf("%s | %w", "Reads", err) + } + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + + case "size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SizeBytes", err) + } + s.SizeBytes = value + case float64: + f := int64(v) + s.SizeBytes = f + } + + } + } + return nil +} + +// NewBlobDetails returns a BlobDetails. +func NewBlobDetails() *BlobDetails { + r := &BlobDetails{} + + return r +} + +// false diff --git a/typedapi/types/booleanproperty.go b/typedapi/types/booleanproperty.go index 696c358169..df16dc8b54 100644 --- a/typedapi/types/booleanproperty.go +++ b/typedapi/types/booleanproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // BooleanProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L58-L64 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L58-L64 type BooleanProperty struct { Boost *Float64 `json:"boost,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -44,11 +45,12 @@ type BooleanProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *bool `json:"null_value,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *bool `json:"null_value,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *BooleanProperty) UnmarshalJSON(data []byte) error { @@ -141,301 +143,313 @@ func (s *BooleanProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -512,301 +526,313 @@ func (s *BooleanProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -826,6 +852,11 @@ func (s *BooleanProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -840,19 +871,20 @@ func (s *BooleanProperty) UnmarshalJSON(data []byte) error { func (s BooleanProperty) MarshalJSON() ([]byte, error) { type innerBooleanProperty BooleanProperty tmp := innerBooleanProperty{ - Boost: s.Boost, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fielddata: s.Fielddata, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Index: s.Index, - Meta: s.Meta, - NullValue: s.NullValue, - Properties: s.Properties, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fielddata: s.Fielddata, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "boolean" @@ -863,10 +895,20 @@ func (s BooleanProperty) MarshalJSON() ([]byte, error) { // NewBooleanProperty returns a BooleanProperty. func NewBooleanProperty() *BooleanProperty { r := &BooleanProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type BooleanPropertyVariant interface { + BooleanPropertyCaster() *BooleanProperty +} + +func (s *BooleanProperty) BooleanPropertyCaster() *BooleanProperty { + return s +} diff --git a/typedapi/types/boolquery.go b/typedapi/types/boolquery.go index 13e6627bfb..d2258f2b78 100644 --- a/typedapi/types/boolquery.go +++ b/typedapi/types/boolquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BoolQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L29-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L29-L56 type BoolQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -179,3 +179,13 @@ func NewBoolQuery() *BoolQuery { return r } + +// true + +type BoolQueryVariant interface { + BoolQueryCaster() *BoolQuery +} + +func (s *BoolQuery) BoolQueryCaster() *BoolQuery { + return s +} diff --git a/typedapi/types/boostingquery.go b/typedapi/types/boostingquery.go index b90d706f15..116fa3b52f 100644 --- a/typedapi/types/boostingquery.go +++ b/typedapi/types/boostingquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BoostingQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L58-L74 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L58-L74 type BoostingQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -40,12 +40,12 @@ type BoostingQuery struct { // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` // Negative Query used to decrease the relevance score of matching documents. - Negative *Query `json:"negative,omitempty"` + Negative Query `json:"negative"` // NegativeBoost Floating point number between 0 and 1.0 used to decrease the relevance scores // of documents matching the `negative` query. NegativeBoost Float64 `json:"negative_boost"` // Positive Any returned documents must match this query. - Positive *Query `json:"positive,omitempty"` + Positive Query `json:"positive"` QueryName_ *string `json:"_name,omitempty"` } @@ -129,3 +129,13 @@ func NewBoostingQuery() *BoostingQuery { return r } + +// true + +type BoostingQueryVariant interface { + BoostingQueryCaster() *BoostingQuery +} + +func (s *BoostingQuery) BoostingQueryCaster() *BoostingQuery { + return s +} diff --git a/typedapi/types/boxplotaggregate.go b/typedapi/types/boxplotaggregate.go index 1cfee35a71..b75be2e185 100644 --- a/typedapi/types/boxplotaggregate.go +++ b/typedapi/types/boxplotaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BoxPlotAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L806-L825 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L806-L825 type BoxPlotAggregate struct { Lower Float64 `json:"lower"` LowerAsString *string `json:"lower_as_string,omitempty"` @@ -277,3 +277,5 @@ func NewBoxPlotAggregate() *BoxPlotAggregate { return r } + +// false diff --git a/typedapi/types/boxplotaggregation.go b/typedapi/types/boxplotaggregation.go index 23c46a6cf9..083bed4738 100644 --- a/typedapi/types/boxplotaggregation.go +++ b/typedapi/types/boxplotaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BoxplotAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L57-L62 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L57-L62 type BoxplotAggregation struct { // Compression Limits the maximum number of nodes used by the underlying TDigest algorithm // to `20 * compression`, enabling control of memory usage and approximation @@ -102,3 +102,13 @@ func NewBoxplotAggregation() *BoxplotAggregation { return r } + +// true + +type BoxplotAggregationVariant interface { + BoxplotAggregationCaster() *BoxplotAggregation +} + +func (s *BoxplotAggregation) BoxplotAggregationCaster() *BoxplotAggregation { + return s +} diff --git a/typedapi/types/braziliananalyzer.go b/typedapi/types/braziliananalyzer.go index 05e3d01cab..a7f7aa190f 100644 --- a/typedapi/types/braziliananalyzer.go +++ b/typedapi/types/braziliananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BrazilianAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L89-L93 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L100-L104 type BrazilianAnalyzer struct { Stopwords []string `json:"stopwords,omitempty"` StopwordsPath *string `json:"stopwords_path,omitempty"` @@ -111,3 +111,13 @@ func NewBrazilianAnalyzer() *BrazilianAnalyzer { return r } + +// true + +type BrazilianAnalyzerVariant interface { + BrazilianAnalyzerCaster() *BrazilianAnalyzer +} + +func (s *BrazilianAnalyzer) BrazilianAnalyzerCaster() *BrazilianAnalyzer { + return s +} diff --git a/typedapi/types/breaker.go b/typedapi/types/breaker.go index dc74f23c02..1a1c2c9956 100644 --- a/typedapi/types/breaker.go +++ b/typedapi/types/breaker.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Breaker type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L470-L495 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L470-L495 type Breaker struct { // EstimatedSize Estimated memory used for the operation. EstimatedSize *string `json:"estimated_size,omitempty"` @@ -161,3 +161,5 @@ func NewBreaker() *Breaker { return r } + +// false diff --git a/typedapi/types/bucketcorrelationaggregation.go b/typedapi/types/bucketcorrelationaggregation.go index 09a4891685..995c83ee43 100644 --- a/typedapi/types/bucketcorrelationaggregation.go +++ b/typedapi/types/bucketcorrelationaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // BucketCorrelationAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L139-L146 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L139-L146 type BucketCorrelationAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -74,3 +74,13 @@ func NewBucketCorrelationAggregation() *BucketCorrelationAggregation { return r } + +// true + +type BucketCorrelationAggregationVariant interface { + BucketCorrelationAggregationCaster() *BucketCorrelationAggregation +} + +func (s *BucketCorrelationAggregation) BucketCorrelationAggregationCaster() *BucketCorrelationAggregation { + return s +} diff --git a/typedapi/types/bucketcorrelationfunction.go b/typedapi/types/bucketcorrelationfunction.go index 9e50e4fca9..5dea342094 100644 --- a/typedapi/types/bucketcorrelationfunction.go +++ b/typedapi/types/bucketcorrelationfunction.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // BucketCorrelationFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L148-L153 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L148-L153 type BucketCorrelationFunction struct { // CountCorrelation The configuration to calculate a count correlation. This function is designed // for determining the correlation of a term value and a given metric. @@ -35,3 +35,13 @@ func NewBucketCorrelationFunction() *BucketCorrelationFunction { return r } + +// true + +type BucketCorrelationFunctionVariant interface { + BucketCorrelationFunctionCaster() *BucketCorrelationFunction +} + +func (s *BucketCorrelationFunction) BucketCorrelationFunctionCaster() *BucketCorrelationFunction { + return s +} diff --git a/typedapi/types/bucketcorrelationfunctioncountcorrelation.go b/typedapi/types/bucketcorrelationfunctioncountcorrelation.go index 5ff6b53be0..125de4166c 100644 --- a/typedapi/types/bucketcorrelationfunctioncountcorrelation.go +++ b/typedapi/types/bucketcorrelationfunctioncountcorrelation.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // BucketCorrelationFunctionCountCorrelation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L155-L158 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L155-L158 type BucketCorrelationFunctionCountCorrelation struct { // Indicator The indicator with which to correlate the configured `bucket_path` values. Indicator BucketCorrelationFunctionCountCorrelationIndicator `json:"indicator"` @@ -34,3 +34,13 @@ func NewBucketCorrelationFunctionCountCorrelation() *BucketCorrelationFunctionCo return r } + +// true + +type BucketCorrelationFunctionCountCorrelationVariant interface { + BucketCorrelationFunctionCountCorrelationCaster() *BucketCorrelationFunctionCountCorrelation +} + +func (s *BucketCorrelationFunctionCountCorrelation) BucketCorrelationFunctionCountCorrelationCaster() *BucketCorrelationFunctionCountCorrelation { + return s +} diff --git a/typedapi/types/bucketcorrelationfunctioncountcorrelationindicator.go b/typedapi/types/bucketcorrelationfunctioncountcorrelationindicator.go index 99fa31b485..83deee0270 100644 --- a/typedapi/types/bucketcorrelationfunctioncountcorrelationindicator.go +++ b/typedapi/types/bucketcorrelationfunctioncountcorrelationindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BucketCorrelationFunctionCountCorrelationIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L160-L178 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L160-L178 type BucketCorrelationFunctionCountCorrelationIndicator struct { // DocCount The total number of documents that initially created the expectations. It’s // required to be greater @@ -104,3 +104,13 @@ func NewBucketCorrelationFunctionCountCorrelationIndicator() *BucketCorrelationF return r } + +// true + +type BucketCorrelationFunctionCountCorrelationIndicatorVariant interface { + BucketCorrelationFunctionCountCorrelationIndicatorCaster() *BucketCorrelationFunctionCountCorrelationIndicator +} + +func (s *BucketCorrelationFunctionCountCorrelationIndicator) BucketCorrelationFunctionCountCorrelationIndicatorCaster() *BucketCorrelationFunctionCountCorrelationIndicator { + return s +} diff --git a/typedapi/types/bucketinfluencer.go b/typedapi/types/bucketinfluencer.go index 77280dd9b8..73917eeab0 100644 --- a/typedapi/types/bucketinfluencer.go +++ b/typedapi/types/bucketinfluencer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BucketInfluencer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Bucket.ts#L79-L127 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Bucket.ts#L79-L127 type BucketInfluencer struct { // AnomalyScore A normalized score between 0-100, which is calculated for each bucket // influencer. This score might be updated as @@ -208,3 +208,5 @@ func NewBucketInfluencer() *BucketInfluencer { return r } + +// false diff --git a/typedapi/types/bucketksaggregation.go b/typedapi/types/bucketksaggregation.go index 23b5ec50bb..cc5dbccd31 100644 --- a/typedapi/types/bucketksaggregation.go +++ b/typedapi/types/bucketksaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BucketKsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L103-L137 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L103-L137 type BucketKsAggregation struct { // Alternative A list of string values indicating which K-S test alternative to calculate. // The valid values @@ -115,3 +115,13 @@ func NewBucketKsAggregation() *BucketKsAggregation { return r } + +// true + +type BucketKsAggregationVariant interface { + BucketKsAggregationCaster() *BucketKsAggregation +} + +func (s *BucketKsAggregation) BucketKsAggregationCaster() *BucketKsAggregation { + return s +} diff --git a/typedapi/types/bucketmetricvalueaggregate.go b/typedapi/types/bucketmetricvalueaggregate.go index efba970e7f..5881740c84 100644 --- a/typedapi/types/bucketmetricvalueaggregate.go +++ b/typedapi/types/bucketmetricvalueaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BucketMetricValueAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L250-L253 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L250-L253 type BucketMetricValueAggregate struct { Keys []string `json:"keys"` Meta Metadata `json:"meta,omitempty"` @@ -95,3 +95,5 @@ func NewBucketMetricValueAggregate() *BucketMetricValueAggregate { return r } + +// false diff --git a/typedapi/types/bucketsadjacencymatrixbucket.go b/typedapi/types/bucketsadjacencymatrixbucket.go index c70b959b82..7f7a453a69 100644 --- a/typedapi/types/bucketsadjacencymatrixbucket.go +++ b/typedapi/types/bucketsadjacencymatrixbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]AdjacencyMatrixBucket // []AdjacencyMatrixBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsAdjacencyMatrixBucket any diff --git a/typedapi/types/bucketsapikeyquerycontainer.go b/typedapi/types/bucketsapikeyquerycontainer.go index b6e23a2797..647db0ec6f 100644 --- a/typedapi/types/bucketsapikeyquerycontainer.go +++ b/typedapi/types/bucketsapikeyquerycontainer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // map[string]ApiKeyQueryContainer // []ApiKeyQueryContainer // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsApiKeyQueryContainer any + +type BucketsApiKeyQueryContainerVariant interface { + BucketsApiKeyQueryContainerCaster() *BucketsApiKeyQueryContainer +} diff --git a/typedapi/types/bucketscompositebucket.go b/typedapi/types/bucketscompositebucket.go index e39231ee81..1bbc17b46e 100644 --- a/typedapi/types/bucketscompositebucket.go +++ b/typedapi/types/bucketscompositebucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]CompositeBucket // []CompositeBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsCompositeBucket any diff --git a/typedapi/types/bucketscriptaggregation.go b/typedapi/types/bucketscriptaggregation.go index 62ba1a921a..8411512f47 100644 --- a/typedapi/types/bucketscriptaggregation.go +++ b/typedapi/types/bucketscriptaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // BucketScriptAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L83-L91 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L83-L91 type BucketScriptAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -100,3 +100,13 @@ func NewBucketScriptAggregation() *BucketScriptAggregation { return r } + +// true + +type BucketScriptAggregationVariant interface { + BucketScriptAggregationCaster() *BucketScriptAggregation +} + +func (s *BucketScriptAggregation) BucketScriptAggregationCaster() *BucketScriptAggregation { + return s +} diff --git a/typedapi/types/bucketsdatehistogrambucket.go b/typedapi/types/bucketsdatehistogrambucket.go index 52e5e7bffc..8ff6a88277 100644 --- a/typedapi/types/bucketsdatehistogrambucket.go +++ b/typedapi/types/bucketsdatehistogrambucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]DateHistogramBucket // []DateHistogramBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsDateHistogramBucket any diff --git a/typedapi/types/bucketsdoubletermsbucket.go b/typedapi/types/bucketsdoubletermsbucket.go index b4fd6d73c3..f31b90ddb2 100644 --- a/typedapi/types/bucketsdoubletermsbucket.go +++ b/typedapi/types/bucketsdoubletermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]DoubleTermsBucket // []DoubleTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsDoubleTermsBucket any diff --git a/typedapi/types/bucketselectoraggregation.go b/typedapi/types/bucketselectoraggregation.go index eada4d66c7..6ed706cfa4 100644 --- a/typedapi/types/bucketselectoraggregation.go +++ b/typedapi/types/bucketselectoraggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // BucketSelectorAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L93-L101 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L93-L101 type BucketSelectorAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -100,3 +100,13 @@ func NewBucketSelectorAggregation() *BucketSelectorAggregation { return r } + +// true + +type BucketSelectorAggregationVariant interface { + BucketSelectorAggregationCaster() *BucketSelectorAggregation +} + +func (s *BucketSelectorAggregation) BucketSelectorAggregationCaster() *BucketSelectorAggregation { + return s +} diff --git a/typedapi/types/bucketsfiltersbucket.go b/typedapi/types/bucketsfiltersbucket.go index 6e29f2a42f..58c36edc51 100644 --- a/typedapi/types/bucketsfiltersbucket.go +++ b/typedapi/types/bucketsfiltersbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]FiltersBucket // []FiltersBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsFiltersBucket any diff --git a/typedapi/types/bucketsfrequentitemsetsbucket.go b/typedapi/types/bucketsfrequentitemsetsbucket.go index 6bcee643ea..7c566d8d19 100644 --- a/typedapi/types/bucketsfrequentitemsetsbucket.go +++ b/typedapi/types/bucketsfrequentitemsetsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]FrequentItemSetsBucket // []FrequentItemSetsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsFrequentItemSetsBucket any diff --git a/typedapi/types/bucketsgeohashgridbucket.go b/typedapi/types/bucketsgeohashgridbucket.go index 7e4fa8286a..31bb5395dc 100644 --- a/typedapi/types/bucketsgeohashgridbucket.go +++ b/typedapi/types/bucketsgeohashgridbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]GeoHashGridBucket // []GeoHashGridBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsGeoHashGridBucket any diff --git a/typedapi/types/bucketsgeohexgridbucket.go b/typedapi/types/bucketsgeohexgridbucket.go index 4984002b66..1e5453cd7c 100644 --- a/typedapi/types/bucketsgeohexgridbucket.go +++ b/typedapi/types/bucketsgeohexgridbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]GeoHexGridBucket // []GeoHexGridBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsGeoHexGridBucket any diff --git a/typedapi/types/bucketsgeotilegridbucket.go b/typedapi/types/bucketsgeotilegridbucket.go index 650ad6c54e..64d4fd1dc2 100644 --- a/typedapi/types/bucketsgeotilegridbucket.go +++ b/typedapi/types/bucketsgeotilegridbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]GeoTileGridBucket // []GeoTileGridBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsGeoTileGridBucket any diff --git a/typedapi/types/bucketshistogrambucket.go b/typedapi/types/bucketshistogrambucket.go index f4a57fa4bf..00628fae36 100644 --- a/typedapi/types/bucketshistogrambucket.go +++ b/typedapi/types/bucketshistogrambucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]HistogramBucket // []HistogramBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsHistogramBucket any diff --git a/typedapi/types/bucketsipprefixbucket.go b/typedapi/types/bucketsipprefixbucket.go index ba8d8990f6..1b3b2c9320 100644 --- a/typedapi/types/bucketsipprefixbucket.go +++ b/typedapi/types/bucketsipprefixbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]IpPrefixBucket // []IpPrefixBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsIpPrefixBucket any diff --git a/typedapi/types/bucketsiprangebucket.go b/typedapi/types/bucketsiprangebucket.go index 49de816d5b..425fc1b4f2 100644 --- a/typedapi/types/bucketsiprangebucket.go +++ b/typedapi/types/bucketsiprangebucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]IpRangeBucket // []IpRangeBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsIpRangeBucket any diff --git a/typedapi/types/bucketslongraretermsbucket.go b/typedapi/types/bucketslongraretermsbucket.go index 176c8f68a6..07ad6a8204 100644 --- a/typedapi/types/bucketslongraretermsbucket.go +++ b/typedapi/types/bucketslongraretermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]LongRareTermsBucket // []LongRareTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsLongRareTermsBucket any diff --git a/typedapi/types/bucketslongtermsbucket.go b/typedapi/types/bucketslongtermsbucket.go index 774c8cc55f..7da07c993d 100644 --- a/typedapi/types/bucketslongtermsbucket.go +++ b/typedapi/types/bucketslongtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]LongTermsBucket // []LongTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsLongTermsBucket any diff --git a/typedapi/types/bucketsmultitermsbucket.go b/typedapi/types/bucketsmultitermsbucket.go index fa68debe43..2dffaddb0c 100644 --- a/typedapi/types/bucketsmultitermsbucket.go +++ b/typedapi/types/bucketsmultitermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]MultiTermsBucket // []MultiTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsMultiTermsBucket any diff --git a/typedapi/types/bucketsortaggregation.go b/typedapi/types/bucketsortaggregation.go index 358857f296..cfc9c509ea 100644 --- a/typedapi/types/bucketsortaggregation.go +++ b/typedapi/types/bucketsortaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // BucketSortAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L180-L204 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L180-L204 type BucketSortAggregation struct { // From Buckets in positions prior to `from` will be truncated. From *int `json:"from,omitempty"` @@ -125,3 +125,13 @@ func NewBucketSortAggregation() *BucketSortAggregation { return r } + +// true + +type BucketSortAggregationVariant interface { + BucketSortAggregationCaster() *BucketSortAggregation +} + +func (s *BucketSortAggregation) BucketSortAggregationCaster() *BucketSortAggregation { + return s +} diff --git a/typedapi/types/bucketspath.go b/typedapi/types/bucketspath.go index bf8b42028e..c35d4a9b78 100644 --- a/typedapi/types/bucketspath.go +++ b/typedapi/types/bucketspath.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,5 +26,9 @@ package types // []string // map[string]string // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L53-L59 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L53-L59 type BucketsPath any + +type BucketsPathVariant interface { + BucketsPathCaster() *BucketsPath +} diff --git a/typedapi/types/bucketsquery.go b/typedapi/types/bucketsquery.go index bc4b66d031..e7c72958ef 100644 --- a/typedapi/types/bucketsquery.go +++ b/typedapi/types/bucketsquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // map[string]Query // []Query // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsQuery any + +type BucketsQueryVariant interface { + BucketsQueryCaster() *BucketsQuery +} diff --git a/typedapi/types/bucketsrangebucket.go b/typedapi/types/bucketsrangebucket.go index 3d73809158..47b36c99cc 100644 --- a/typedapi/types/bucketsrangebucket.go +++ b/typedapi/types/bucketsrangebucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]RangeBucket // []RangeBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsRangeBucket any diff --git a/typedapi/types/bucketssignificantlongtermsbucket.go b/typedapi/types/bucketssignificantlongtermsbucket.go index d43ef57925..707b7387df 100644 --- a/typedapi/types/bucketssignificantlongtermsbucket.go +++ b/typedapi/types/bucketssignificantlongtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]SignificantLongTermsBucket // []SignificantLongTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsSignificantLongTermsBucket any diff --git a/typedapi/types/bucketssignificantstringtermsbucket.go b/typedapi/types/bucketssignificantstringtermsbucket.go index 30999598e5..52ce0c490a 100644 --- a/typedapi/types/bucketssignificantstringtermsbucket.go +++ b/typedapi/types/bucketssignificantstringtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]SignificantStringTermsBucket // []SignificantStringTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsSignificantStringTermsBucket any diff --git a/typedapi/types/bucketsstringraretermsbucket.go b/typedapi/types/bucketsstringraretermsbucket.go index 6bc2de2139..aee10efe5a 100644 --- a/typedapi/types/bucketsstringraretermsbucket.go +++ b/typedapi/types/bucketsstringraretermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]StringRareTermsBucket // []StringRareTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsStringRareTermsBucket any diff --git a/typedapi/types/bucketsstringtermsbucket.go b/typedapi/types/bucketsstringtermsbucket.go index 19ef55e6e5..8587197d35 100644 --- a/typedapi/types/bucketsstringtermsbucket.go +++ b/typedapi/types/bucketsstringtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]StringTermsBucket // []StringTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsStringTermsBucket any diff --git a/typedapi/types/bucketstimeseriesbucket.go b/typedapi/types/bucketstimeseriesbucket.go index 853cf656e9..cb60e3fbde 100644 --- a/typedapi/types/bucketstimeseriesbucket.go +++ b/typedapi/types/bucketstimeseriesbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]TimeSeriesBucket // []TimeSeriesBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsTimeSeriesBucket any diff --git a/typedapi/types/bucketsummary.go b/typedapi/types/bucketsummary.go index 4287d1f749..cf436371d3 100644 --- a/typedapi/types/bucketsummary.go +++ b/typedapi/types/bucketsummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BucketSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Bucket.ts#L30-L77 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Bucket.ts#L30-L77 type BucketSummary struct { // AnomalyScore The maximum anomaly score, between 0-100, for any of the bucket influencers. // This is an overall, rate-limited @@ -198,3 +198,5 @@ func NewBucketSummary() *BucketSummary { return r } + +// false diff --git a/typedapi/types/bucketsvariablewidthhistogrambucket.go b/typedapi/types/bucketsvariablewidthhistogrambucket.go index d978b20a5d..438f0701b6 100644 --- a/typedapi/types/bucketsvariablewidthhistogrambucket.go +++ b/typedapi/types/bucketsvariablewidthhistogrambucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]VariableWidthHistogramBucket // []VariableWidthHistogramBucket // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsVariableWidthHistogramBucket any diff --git a/typedapi/types/bucketsvoid.go b/typedapi/types/bucketsvoid.go index 582b362b95..025a5050f4 100644 --- a/typedapi/types/bucketsvoid.go +++ b/typedapi/types/bucketsvoid.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // map[string]any // []any // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L346-L355 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsVoid any diff --git a/typedapi/types/buildinformation.go b/typedapi/types/buildinformation.go index f1b9f0312c..dcae2c6261 100644 --- a/typedapi/types/buildinformation.go +++ b/typedapi/types/buildinformation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BuildInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/info/types.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/info/types.ts#L24-L27 type BuildInformation struct { Date DateTime `json:"date"` Hash string `json:"hash"` @@ -80,3 +80,5 @@ func NewBuildInformation() *BuildInformation { return r } + +// false diff --git a/typedapi/types/bulgariananalyzer.go b/typedapi/types/bulgariananalyzer.go index 0e0884a070..147837427a 100644 --- a/typedapi/types/bulgariananalyzer.go +++ b/typedapi/types/bulgariananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BulgarianAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L95-L100 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L106-L111 type BulgarianAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewBulgarianAnalyzer() *BulgarianAnalyzer { return r } + +// true + +type BulgarianAnalyzerVariant interface { + BulgarianAnalyzerCaster() *BulgarianAnalyzer +} + +func (s *BulgarianAnalyzer) BulgarianAnalyzerCaster() *BulgarianAnalyzer { + return s +} diff --git a/typedapi/types/bulkerror.go b/typedapi/types/bulkerror.go index f7b7b113e4..80642c2b80 100644 --- a/typedapi/types/bulkerror.go +++ b/typedapi/types/bulkerror.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BulkError type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Bulk.ts#L24-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Bulk.ts#L24-L33 type BulkError struct { // Count The number of errors Count int `json:"count"` @@ -86,8 +86,10 @@ func (s *BulkError) UnmarshalJSON(data []byte) error { // NewBulkError returns a BulkError. func NewBulkError() *BulkError { r := &BulkError{ - Details: make(map[string]ErrorCause, 0), + Details: make(map[string]ErrorCause), } return r } + +// false diff --git a/typedapi/types/bulkindexbyscrollfailure.go b/typedapi/types/bulkindexbyscrollfailure.go index 1a23162ed2..898c76a0d7 100644 --- a/typedapi/types/bulkindexbyscrollfailure.go +++ b/typedapi/types/bulkindexbyscrollfailure.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,13 +31,12 @@ import ( // BulkIndexByScrollFailure type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Errors.ts#L60-L66 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Errors.ts#L60-L65 type BulkIndexByScrollFailure struct { Cause ErrorCause `json:"cause"` Id string `json:"id"` Index string `json:"index"` Status int `json:"status"` - Type string `json:"type"` } func (s *BulkIndexByScrollFailure) UnmarshalJSON(data []byte) error { @@ -86,18 +85,6 @@ func (s *BulkIndexByScrollFailure) UnmarshalJSON(data []byte) error { s.Status = f } - case "type": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Type", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Type = o - } } return nil @@ -109,3 +96,5 @@ func NewBulkIndexByScrollFailure() *BulkIndexByScrollFailure { return r } + +// false diff --git a/typedapi/types/bulkstats.go b/typedapi/types/bulkstats.go index ed116ac394..fa3597ed34 100644 --- a/typedapi/types/bulkstats.go +++ b/typedapi/types/bulkstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BulkStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L68-L78 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L71-L81 type BulkStats struct { AvgSize ByteSize `json:"avg_size,omitempty"` AvgSizeInBytes int64 `json:"avg_size_in_bytes"` @@ -145,3 +145,5 @@ func NewBulkStats() *BulkStats { return r } + +// false diff --git a/typedapi/types/bytenumberproperty.go b/typedapi/types/bytenumberproperty.go index f8aa22be70..bc8525edf7 100644 --- a/typedapi/types/bytenumberproperty.go +++ b/typedapi/types/bytenumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // ByteNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L172-L175 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L176-L179 type ByteNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,12 +48,13 @@ type ByteNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *byte `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *byte `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -161,301 +163,313 @@ func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -542,301 +556,313 @@ func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -861,6 +887,11 @@ func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -909,6 +940,7 @@ func (s ByteNumberProperty) MarshalJSON() ([]byte, error) { Properties: s.Properties, Script: s.Script, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -922,10 +954,20 @@ func (s ByteNumberProperty) MarshalJSON() ([]byte, error) { // NewByteNumberProperty returns a ByteNumberProperty. func NewByteNumberProperty() *ByteNumberProperty { r := &ByteNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type ByteNumberPropertyVariant interface { + ByteNumberPropertyCaster() *ByteNumberProperty +} + +func (s *ByteNumberProperty) ByteNumberPropertyCaster() *ByteNumberProperty { + return s +} diff --git a/typedapi/types/bytesize.go b/typedapi/types/bytesize.go index fb8dbced98..6cb46f6263 100644 --- a/typedapi/types/bytesize.go +++ b/typedapi/types/bytesize.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // int64 // string // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L97-L98 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L90-L91 type ByteSize any + +type ByteSizeVariant interface { + ByteSizeCaster() *ByteSize +} diff --git a/typedapi/types/bytesprocessor.go b/typedapi/types/bytesprocessor.go index 806e8fc808..5e9d02e5f1 100644 --- a/typedapi/types/bytesprocessor.go +++ b/typedapi/types/bytesprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // BytesProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L515-L531 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L556-L572 type BytesProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type BytesProcessor struct { // Field The field to convert. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly @@ -88,16 +88,9 @@ func (s *BytesProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -160,3 +153,13 @@ func NewBytesProcessor() *BytesProcessor { return r } + +// true + +type BytesProcessorVariant interface { + BytesProcessorCaster() *BytesProcessor +} + +func (s *BytesProcessor) BytesProcessorCaster() *BytesProcessor { + return s +} diff --git a/typedapi/types/cachequeries.go b/typedapi/types/cachequeries.go index 0e0cf652a5..092bc68d08 100644 --- a/typedapi/types/cachequeries.go +++ b/typedapi/types/cachequeries.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CacheQueries type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L407-L409 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L421-L423 type CacheQueries struct { Enabled bool `json:"enabled"` } @@ -76,3 +76,13 @@ func NewCacheQueries() *CacheQueries { return r } + +// true + +type CacheQueriesVariant interface { + CacheQueriesCaster() *CacheQueries +} + +func (s *CacheQueries) CacheQueriesCaster() *CacheQueries { + return s +} diff --git a/typedapi/types/cachestats.go b/typedapi/types/cachestats.go index 407cd5fdaf..cf46c7edeb 100644 --- a/typedapi/types/cachestats.go +++ b/typedapi/types/cachestats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CacheStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/enrich/stats/types.ts#L38-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/enrich/stats/types.ts#L38-L50 type CacheStats struct { Count int `json:"count"` Evictions int `json:"evictions"` @@ -163,3 +163,5 @@ func NewCacheStats() *CacheStats { return r } + +// false diff --git a/typedapi/types/calendar.go b/typedapi/types/calendar.go index b04eca2834..26aefee002 100644 --- a/typedapi/types/calendar.go +++ b/typedapi/types/calendar.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Calendar type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_calendars/types.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_calendars/types.ts#L22-L29 type Calendar struct { // CalendarId A string that uniquely identifies a calendar. CalendarId string `json:"calendar_id"` @@ -89,3 +89,5 @@ func NewCalendar() *Calendar { return r } + +// false diff --git a/typedapi/types/calendarevent.go b/typedapi/types/calendarevent.go index 7d8446c00b..e317e1b2a3 100644 --- a/typedapi/types/calendarevent.go +++ b/typedapi/types/calendarevent.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CalendarEvent type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/CalendarEvent.ts#L24-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/CalendarEvent.ts#L24-L44 type CalendarEvent struct { // CalendarId A string that uniquely identifies a calendar. CalendarId *string `json:"calendar_id,omitempty"` @@ -155,3 +155,13 @@ func NewCalendarEvent() *CalendarEvent { return r } + +// true + +type CalendarEventVariant interface { + CalendarEventCaster() *CalendarEvent +} + +func (s *CalendarEvent) CalendarEventCaster() *CalendarEvent { + return s +} diff --git a/typedapi/types/cardinalityaggregate.go b/typedapi/types/cardinalityaggregate.go index a4255dda92..d08f4d3935 100644 --- a/typedapi/types/cardinalityaggregate.go +++ b/typedapi/types/cardinalityaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CardinalityAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L140-L143 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L140-L143 type CardinalityAggregate struct { Meta Metadata `json:"meta,omitempty"` Value int64 `json:"value"` @@ -83,3 +83,5 @@ func NewCardinalityAggregate() *CardinalityAggregate { return r } + +// false diff --git a/typedapi/types/cardinalityaggregation.go b/typedapi/types/cardinalityaggregation.go index 1fde3bc050..fdbd7b334c 100644 --- a/typedapi/types/cardinalityaggregation.go +++ b/typedapi/types/cardinalityaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // CardinalityAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L87-L99 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L87-L99 type CardinalityAggregation struct { // ExecutionHint Mechanism by which cardinality aggregations is run. ExecutionHint *cardinalityexecutionmode.CardinalityExecutionMode `json:"execution_hint,omitempty"` @@ -125,3 +125,13 @@ func NewCardinalityAggregation() *CardinalityAggregation { return r } + +// true + +type CardinalityAggregationVariant interface { + CardinalityAggregationCaster() *CardinalityAggregation +} + +func (s *CardinalityAggregation) CardinalityAggregationCaster() *CardinalityAggregation { + return s +} diff --git a/typedapi/types/catalananalyzer.go b/typedapi/types/catalananalyzer.go index e4a4f8b9e2..896f5617df 100644 --- a/typedapi/types/catalananalyzer.go +++ b/typedapi/types/catalananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CatalanAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L102-L107 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L113-L118 type CatalanAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewCatalanAnalyzer() *CatalanAnalyzer { return r } + +// true + +type CatalanAnalyzerVariant interface { + CatalanAnalyzerCaster() *CatalanAnalyzer +} + +func (s *CatalanAnalyzer) CatalanAnalyzerCaster() *CatalanAnalyzer { + return s +} diff --git a/typedapi/types/catanonalydetectorcolumns.go b/typedapi/types/catanonalydetectorcolumns.go index 4f94b597f9..39e22bffc2 100644 --- a/typedapi/types/catanonalydetectorcolumns.go +++ b/typedapi/types/catanonalydetectorcolumns.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,5 +26,5 @@ import ( // CatAnonalyDetectorColumns type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/_types/CatBase.ts#L402-L404 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/_types/CatBase.ts#L402-L404 type CatAnonalyDetectorColumns []catanomalydetectorcolumn.CatAnomalyDetectorColumn diff --git a/typedapi/types/catcomponenttemplate.go b/typedapi/types/catcomponenttemplate.go index 501dfab5a6..e3637b5bcd 100644 --- a/typedapi/types/catcomponenttemplate.go +++ b/typedapi/types/catcomponenttemplate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,15 +31,15 @@ import ( // CatComponentTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/component_templates/types.ts#L20-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/component_templates/types.ts#L20-L28 type CatComponentTemplate struct { - AliasCount string `json:"alias_count"` - IncludedIn string `json:"included_in"` - MappingCount string `json:"mapping_count"` - MetadataCount string `json:"metadata_count"` - Name string `json:"name"` - SettingsCount string `json:"settings_count"` - Version string `json:"version"` + AliasCount string `json:"alias_count"` + IncludedIn string `json:"included_in"` + MappingCount string `json:"mapping_count"` + MetadataCount string `json:"metadata_count"` + Name string `json:"name"` + SettingsCount string `json:"settings_count"` + Version *string `json:"version,omitempty"` } func (s *CatComponentTemplate) UnmarshalJSON(data []byte) error { @@ -139,7 +139,7 @@ func (s *CatComponentTemplate) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Version = o + s.Version = &o } } @@ -152,3 +152,5 @@ func NewCatComponentTemplate() *CatComponentTemplate { return r } + +// false diff --git a/typedapi/types/catdatafeedcolumns.go b/typedapi/types/catdatafeedcolumns.go index a6610ff9c1..f26bed7197 100644 --- a/typedapi/types/catdatafeedcolumns.go +++ b/typedapi/types/catdatafeedcolumns.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,5 +26,5 @@ import ( // CatDatafeedColumns type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/_types/CatBase.ts#L559-L559 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/_types/CatBase.ts#L559-L559 type CatDatafeedColumns []catdatafeedcolumn.CatDatafeedColumn diff --git a/typedapi/types/catdfacolumns.go b/typedapi/types/catdfacolumns.go index 1f9fdc1343..02afff67bf 100644 --- a/typedapi/types/catdfacolumns.go +++ b/typedapi/types/catdfacolumns.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,5 +26,5 @@ import ( // CatDfaColumns type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/_types/CatBase.ts#L558-L558 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/_types/CatBase.ts#L558-L558 type CatDfaColumns []catdfacolumn.CatDfaColumn diff --git a/typedapi/types/categorizationanalyzer.go b/typedapi/types/categorizationanalyzer.go index 289cf34d0f..8fdc6d7b55 100644 --- a/typedapi/types/categorizationanalyzer.go +++ b/typedapi/types/categorizationanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // string // CategorizationAnalyzerDefinition // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Analysis.ts#L181-L182 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Analysis.ts#L181-L182 type CategorizationAnalyzer any + +type CategorizationAnalyzerVariant interface { + CategorizationAnalyzerCaster() *CategorizationAnalyzer +} diff --git a/typedapi/types/categorizationanalyzerdefinition.go b/typedapi/types/categorizationanalyzerdefinition.go index 0f606ce14d..cba8a23a5c 100644 --- a/typedapi/types/categorizationanalyzerdefinition.go +++ b/typedapi/types/categorizationanalyzerdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -24,12 +24,13 @@ import ( "bytes" "encoding/json" "errors" + "fmt" "io" ) // CategorizationAnalyzerDefinition type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Analysis.ts#L184-L198 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Analysis.ts#L184-L198 type CategorizationAnalyzerDefinition struct { // CharFilter One or more character filters. In addition to the built-in character filters, // other plugins can provide more character filters. If this property is not @@ -91,37 +92,37 @@ func (s *CategorizationAnalyzerDefinition) UnmarshalJSON(data []byte) error { case "html_strip": o := NewHtmlStripCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "html_strip", err) } s.CharFilter = append(s.CharFilter, *o) case "mapping": o := NewMappingCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "mapping", err) } s.CharFilter = append(s.CharFilter, *o) case "pattern_replace": o := NewPatternReplaceCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "pattern_replace", err) } s.CharFilter = append(s.CharFilter, *o) case "icu_normalizer": o := NewIcuNormalizationCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_normalizer", err) } s.CharFilter = append(s.CharFilter, *o) case "kuromoji_iteration_mark": o := NewKuromojiIterationMarkCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_iteration_mark", err) } s.CharFilter = append(s.CharFilter, *o) default: o := new(any) if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("CharFilter | %w", err) } s.CharFilter = append(s.CharFilter, *o) } @@ -144,289 +145,289 @@ func (s *CategorizationAnalyzerDefinition) UnmarshalJSON(data []byte) error { case "asciifolding": o := NewAsciiFoldingTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "asciifolding", err) } s.Filter = append(s.Filter, *o) case "common_grams": o := NewCommonGramsTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "common_grams", err) } s.Filter = append(s.Filter, *o) case "condition": o := NewConditionTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "condition", err) } s.Filter = append(s.Filter, *o) case "delimited_payload": o := NewDelimitedPayloadTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "delimited_payload", err) } s.Filter = append(s.Filter, *o) case "edge_ngram": o := NewEdgeNGramTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "edge_ngram", err) } s.Filter = append(s.Filter, *o) case "elision": o := NewElisionTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "elision", err) } s.Filter = append(s.Filter, *o) case "fingerprint": o := NewFingerprintTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "fingerprint", err) } s.Filter = append(s.Filter, *o) case "hunspell": o := NewHunspellTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "hunspell", err) } s.Filter = append(s.Filter, *o) case "hyphenation_decompounder": o := NewHyphenationDecompounderTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "hyphenation_decompounder", err) } s.Filter = append(s.Filter, *o) case "keep_types": o := NewKeepTypesTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keep_types", err) } s.Filter = append(s.Filter, *o) case "keep": o := NewKeepWordsTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keep", err) } s.Filter = append(s.Filter, *o) case "keyword_marker": o := NewKeywordMarkerTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keyword_marker", err) } s.Filter = append(s.Filter, *o) case "kstem": o := NewKStemTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kstem", err) } s.Filter = append(s.Filter, *o) case "length": o := NewLengthTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "length", err) } s.Filter = append(s.Filter, *o) case "limit": o := NewLimitTokenCountTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "limit", err) } s.Filter = append(s.Filter, *o) case "lowercase": o := NewLowercaseTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "lowercase", err) } s.Filter = append(s.Filter, *o) case "multiplexer": o := NewMultiplexerTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "multiplexer", err) } s.Filter = append(s.Filter, *o) case "ngram": o := NewNGramTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "ngram", err) } s.Filter = append(s.Filter, *o) case "nori_part_of_speech": o := NewNoriPartOfSpeechTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "nori_part_of_speech", err) } s.Filter = append(s.Filter, *o) case "pattern_capture": o := NewPatternCaptureTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "pattern_capture", err) } s.Filter = append(s.Filter, *o) case "pattern_replace": o := NewPatternReplaceTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "pattern_replace", err) } s.Filter = append(s.Filter, *o) case "porter_stem": o := NewPorterStemTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "porter_stem", err) } s.Filter = append(s.Filter, *o) case "predicate_token_filter": o := NewPredicateTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "predicate_token_filter", err) } s.Filter = append(s.Filter, *o) case "remove_duplicates": o := NewRemoveDuplicatesTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "remove_duplicates", err) } s.Filter = append(s.Filter, *o) case "reverse": o := NewReverseTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "reverse", err) } s.Filter = append(s.Filter, *o) case "shingle": o := NewShingleTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "shingle", err) } s.Filter = append(s.Filter, *o) case "snowball": o := NewSnowballTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "snowball", err) } s.Filter = append(s.Filter, *o) case "stemmer_override": o := NewStemmerOverrideTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "stemmer_override", err) } s.Filter = append(s.Filter, *o) case "stemmer": o := NewStemmerTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "stemmer", err) } s.Filter = append(s.Filter, *o) case "stop": o := NewStopTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "stop", err) } s.Filter = append(s.Filter, *o) case "synonym_graph": o := NewSynonymGraphTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "synonym_graph", err) } s.Filter = append(s.Filter, *o) case "synonym": o := NewSynonymTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "synonym", err) } s.Filter = append(s.Filter, *o) case "trim": o := NewTrimTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "trim", err) } s.Filter = append(s.Filter, *o) case "truncate": o := NewTruncateTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "truncate", err) } s.Filter = append(s.Filter, *o) case "unique": o := NewUniqueTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "unique", err) } s.Filter = append(s.Filter, *o) case "uppercase": o := NewUppercaseTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "uppercase", err) } s.Filter = append(s.Filter, *o) case "word_delimiter_graph": o := NewWordDelimiterGraphTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "word_delimiter_graph", err) } s.Filter = append(s.Filter, *o) case "word_delimiter": o := NewWordDelimiterTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "word_delimiter", err) } s.Filter = append(s.Filter, *o) case "kuromoji_stemmer": o := NewKuromojiStemmerTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_stemmer", err) } s.Filter = append(s.Filter, *o) case "kuromoji_readingform": o := NewKuromojiReadingFormTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_readingform", err) } s.Filter = append(s.Filter, *o) case "kuromoji_part_of_speech": o := NewKuromojiPartOfSpeechTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_part_of_speech", err) } s.Filter = append(s.Filter, *o) case "icu_collation": o := NewIcuCollationTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_collation", err) } s.Filter = append(s.Filter, *o) case "icu_folding": o := NewIcuFoldingTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_folding", err) } s.Filter = append(s.Filter, *o) case "icu_normalizer": o := NewIcuNormalizationTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_normalizer", err) } s.Filter = append(s.Filter, *o) case "icu_transform": o := NewIcuTransformTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_transform", err) } s.Filter = append(s.Filter, *o) case "phonetic": o := NewPhoneticTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "phonetic", err) } s.Filter = append(s.Filter, *o) case "dictionary_decompounder": o := NewDictionaryDecompounderTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "dictionary_decompounder", err) } s.Filter = append(s.Filter, *o) default: o := new(any) if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter = append(s.Filter, *o) } @@ -447,114 +448,114 @@ func (s *CategorizationAnalyzerDefinition) UnmarshalJSON(data []byte) error { case "char_group": o := NewCharGroupTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "char_group", err) } s.Tokenizer = *o case "classic": o := NewClassicTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "classic", err) } s.Tokenizer = *o case "edge_ngram": o := NewEdgeNGramTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "edge_ngram", err) } s.Tokenizer = *o case "keyword": o := NewKeywordTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keyword", err) } s.Tokenizer = *o case "letter": o := NewLetterTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "letter", err) } s.Tokenizer = *o case "lowercase": o := NewLowercaseTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "lowercase", err) } s.Tokenizer = *o case "ngram": o := NewNGramTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "ngram", err) } s.Tokenizer = *o case "path_hierarchy": o := NewPathHierarchyTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "path_hierarchy", err) } s.Tokenizer = *o case "pattern": o := NewPatternTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "pattern", err) } s.Tokenizer = *o case "simple_pattern": o := NewSimplePatternTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "simple_pattern", err) } s.Tokenizer = *o case "simple_pattern_split": o := NewSimplePatternSplitTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "simple_pattern_split", err) } s.Tokenizer = *o case "standard": o := NewStandardTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "standard", err) } s.Tokenizer = *o case "thai": o := NewThaiTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "thai", err) } s.Tokenizer = *o case "uax_url_email": o := NewUaxEmailUrlTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "uax_url_email", err) } s.Tokenizer = *o case "whitespace": o := NewWhitespaceTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "whitespace", err) } s.Tokenizer = *o case "icu_tokenizer": o := NewIcuTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_tokenizer", err) } s.Tokenizer = *o case "kuromoji_tokenizer": o := NewKuromojiTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_tokenizer", err) } s.Tokenizer = *o case "nori_tokenizer": o := NewNoriTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "nori_tokenizer", err) } s.Tokenizer = *o default: if err := localDec.Decode(&s.Tokenizer); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } } @@ -569,3 +570,13 @@ func NewCategorizationAnalyzerDefinition() *CategorizationAnalyzerDefinition { return r } + +// true + +type CategorizationAnalyzerDefinitionVariant interface { + CategorizationAnalyzerDefinitionCaster() *CategorizationAnalyzerDefinition +} + +func (s *CategorizationAnalyzerDefinition) CategorizationAnalyzerDefinitionCaster() *CategorizationAnalyzerDefinition { + return s +} diff --git a/typedapi/types/categorizetextaggregation.go b/typedapi/types/categorizetextaggregation.go index d90960d605..e6bfc9bec5 100644 --- a/typedapi/types/categorizetextaggregation.go +++ b/typedapi/types/categorizetextaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CategorizeTextAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L1117-L1182 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L1117-L1182 type CategorizeTextAggregation struct { // CategorizationAnalyzer The categorization analyzer specifies how the text is analyzed and tokenized // before being categorized. @@ -271,3 +271,13 @@ func NewCategorizeTextAggregation() *CategorizeTextAggregation { return r } + +// true + +type CategorizeTextAggregationVariant interface { + CategorizeTextAggregationCaster() *CategorizeTextAggregation +} + +func (s *CategorizeTextAggregation) CategorizeTextAggregationCaster() *CategorizeTextAggregation { + return s +} diff --git a/typedapi/types/categorizetextanalyzer.go b/typedapi/types/categorizetextanalyzer.go index 3817c94330..c30f3cddf5 100644 --- a/typedapi/types/categorizetextanalyzer.go +++ b/typedapi/types/categorizetextanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // string // CustomCategorizeTextAnalyzer // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L1184-L1187 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L1184-L1187 type CategorizeTextAnalyzer any + +type CategorizeTextAnalyzerVariant interface { + CategorizeTextAnalyzerCaster() *CategorizeTextAnalyzer +} diff --git a/typedapi/types/category.go b/typedapi/types/category.go index e97cbaffc3..5b458a5566 100644 --- a/typedapi/types/category.go +++ b/typedapi/types/category.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Category type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Category.ts#L23-L49 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Category.ts#L23-L49 type Category struct { // CategoryId A unique identifier for the category. category_id is unique at the job level, // even when per-partition categorization is enabled. @@ -232,3 +232,5 @@ func NewCategory() *Category { return r } + +// false diff --git a/typedapi/types/cattrainedmodelscolumns.go b/typedapi/types/cattrainedmodelscolumns.go index a6f690f17e..98d344ce1e 100644 --- a/typedapi/types/cattrainedmodelscolumns.go +++ b/typedapi/types/cattrainedmodelscolumns.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,5 +26,5 @@ import ( // CatTrainedModelsColumns type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/_types/CatBase.ts#L636-L638 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/_types/CatBase.ts#L636-L638 type CatTrainedModelsColumns []cattrainedmodelscolumn.CatTrainedModelsColumn diff --git a/typedapi/types/cattransformcolumns.go b/typedapi/types/cattransformcolumns.go index 151fd6525d..c292089d5a 100644 --- a/typedapi/types/cattransformcolumns.go +++ b/typedapi/types/cattransformcolumns.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,5 +26,5 @@ import ( // CatTransformColumns type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/_types/CatBase.ts#L845-L845 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/_types/CatBase.ts#L845-L845 type CatTransformColumns []cattransformcolumn.CatTransformColumn diff --git a/typedapi/types/ccr.go b/typedapi/types/ccr.go index b05249bc46..4d386cfbdf 100644 --- a/typedapi/types/ccr.go +++ b/typedapi/types/ccr.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Ccr type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L332-L335 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L344-L347 type Ccr struct { AutoFollowPatternsCount int `json:"auto_follow_patterns_count"` Available bool `json:"available"` @@ -125,3 +125,5 @@ func NewCcr() *Ccr { return r } + +// false diff --git a/typedapi/types/ccrshardstats.go b/typedapi/types/ccrshardstats.go index 9a3d28dbc3..9cc1425dac 100644 --- a/typedapi/types/ccrshardstats.go +++ b/typedapi/types/ccrshardstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,41 +31,79 @@ import ( // CcrShardStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/_types/FollowIndexStats.ts#L35-L69 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/_types/FollowIndexStats.ts#L37-L109 type CcrShardStats struct { - BytesRead int64 `json:"bytes_read"` - FailedReadRequests int64 `json:"failed_read_requests"` - FailedWriteRequests int64 `json:"failed_write_requests"` - FatalException *ErrorCause `json:"fatal_exception,omitempty"` - FollowerAliasesVersion int64 `json:"follower_aliases_version"` - FollowerGlobalCheckpoint int64 `json:"follower_global_checkpoint"` - FollowerIndex string `json:"follower_index"` - FollowerMappingVersion int64 `json:"follower_mapping_version"` - FollowerMaxSeqNo int64 `json:"follower_max_seq_no"` - FollowerSettingsVersion int64 `json:"follower_settings_version"` - LastRequestedSeqNo int64 `json:"last_requested_seq_no"` - LeaderGlobalCheckpoint int64 `json:"leader_global_checkpoint"` - LeaderIndex string `json:"leader_index"` - LeaderMaxSeqNo int64 `json:"leader_max_seq_no"` - OperationsRead int64 `json:"operations_read"` - OperationsWritten int64 `json:"operations_written"` - OutstandingReadRequests int `json:"outstanding_read_requests"` - OutstandingWriteRequests int `json:"outstanding_write_requests"` - ReadExceptions []ReadException `json:"read_exceptions"` - RemoteCluster string `json:"remote_cluster"` - ShardId int `json:"shard_id"` - SuccessfulReadRequests int64 `json:"successful_read_requests"` - SuccessfulWriteRequests int64 `json:"successful_write_requests"` - TimeSinceLastRead Duration `json:"time_since_last_read,omitempty"` - TimeSinceLastReadMillis int64 `json:"time_since_last_read_millis"` - TotalReadRemoteExecTime Duration `json:"total_read_remote_exec_time,omitempty"` - TotalReadRemoteExecTimeMillis int64 `json:"total_read_remote_exec_time_millis"` - TotalReadTime Duration `json:"total_read_time,omitempty"` - TotalReadTimeMillis int64 `json:"total_read_time_millis"` - TotalWriteTime Duration `json:"total_write_time,omitempty"` - TotalWriteTimeMillis int64 `json:"total_write_time_millis"` - WriteBufferOperationCount int64 `json:"write_buffer_operation_count"` - WriteBufferSizeInBytes ByteSize `json:"write_buffer_size_in_bytes"` + // BytesRead The total of transferred bytes read from the leader. + // This is only an estimate and does not account for compression if enabled. + BytesRead int64 `json:"bytes_read"` + // FailedReadRequests The number of failed reads. + FailedReadRequests int64 `json:"failed_read_requests"` + // FailedWriteRequests The number of failed bulk write requests on the follower. + FailedWriteRequests int64 `json:"failed_write_requests"` + FatalException *ErrorCause `json:"fatal_exception,omitempty"` + // FollowerAliasesVersion The index aliases version the follower is synced up to. + FollowerAliasesVersion int64 `json:"follower_aliases_version"` + // FollowerGlobalCheckpoint The current global checkpoint on the follower. + // The difference between the `leader_global_checkpoint` and the + // `follower_global_checkpoint` is an indication of how much the follower is + // lagging the leader. + FollowerGlobalCheckpoint int64 `json:"follower_global_checkpoint"` + // FollowerIndex The name of the follower index. + FollowerIndex string `json:"follower_index"` + // FollowerMappingVersion The mapping version the follower is synced up to. + FollowerMappingVersion int64 `json:"follower_mapping_version"` + // FollowerMaxSeqNo The current maximum sequence number on the follower. + FollowerMaxSeqNo int64 `json:"follower_max_seq_no"` + // FollowerSettingsVersion The index settings version the follower is synced up to. + FollowerSettingsVersion int64 `json:"follower_settings_version"` + // LastRequestedSeqNo The starting sequence number of the last batch of operations requested from + // the leader. + LastRequestedSeqNo int64 `json:"last_requested_seq_no"` + // LeaderGlobalCheckpoint The current global checkpoint on the leader known to the follower task. + LeaderGlobalCheckpoint int64 `json:"leader_global_checkpoint"` + // LeaderIndex The name of the index in the leader cluster being followed. + LeaderIndex string `json:"leader_index"` + // LeaderMaxSeqNo The current maximum sequence number on the leader known to the follower task. + LeaderMaxSeqNo int64 `json:"leader_max_seq_no"` + // OperationsRead The total number of operations read from the leader. + OperationsRead int64 `json:"operations_read"` + // OperationsWritten The number of operations written on the follower. + OperationsWritten int64 `json:"operations_written"` + // OutstandingReadRequests The number of active read requests from the follower. + OutstandingReadRequests int `json:"outstanding_read_requests"` + // OutstandingWriteRequests The number of active bulk write requests on the follower. + OutstandingWriteRequests int `json:"outstanding_write_requests"` + // ReadExceptions An array of objects representing failed reads. + ReadExceptions []ReadException `json:"read_exceptions"` + // RemoteCluster The remote cluster containing the leader index. + RemoteCluster string `json:"remote_cluster"` + // ShardId The numerical shard ID, with values from 0 to one less than the number of + // replicas. + ShardId int `json:"shard_id"` + // SuccessfulReadRequests The number of successful fetches. + SuccessfulReadRequests int64 `json:"successful_read_requests"` + // SuccessfulWriteRequests The number of bulk write requests run on the follower. + SuccessfulWriteRequests int64 `json:"successful_write_requests"` + TimeSinceLastRead Duration `json:"time_since_last_read,omitempty"` + // TimeSinceLastReadMillis The number of milliseconds since a read request was sent to the leader. + // When the follower is caught up to the leader, this number will increase up to + // the configured `read_poll_timeout` at which point another read request will + // be sent to the leader. + TimeSinceLastReadMillis int64 `json:"time_since_last_read_millis"` + TotalReadRemoteExecTime Duration `json:"total_read_remote_exec_time,omitempty"` + // TotalReadRemoteExecTimeMillis The total time reads spent running on the remote cluster. + TotalReadRemoteExecTimeMillis int64 `json:"total_read_remote_exec_time_millis"` + TotalReadTime Duration `json:"total_read_time,omitempty"` + // TotalReadTimeMillis The total time reads were outstanding, measured from the time a read was sent + // to the leader to the time a reply was returned to the follower. + TotalReadTimeMillis int64 `json:"total_read_time_millis"` + TotalWriteTime Duration `json:"total_write_time,omitempty"` + // TotalWriteTimeMillis The total time spent writing on the follower. + TotalWriteTimeMillis int64 `json:"total_write_time_millis"` + // WriteBufferOperationCount The number of write operations queued on the follower. + WriteBufferOperationCount int64 `json:"write_buffer_operation_count"` + // WriteBufferSizeInBytes The total number of bytes of operations currently queued for writing. + WriteBufferSizeInBytes ByteSize `json:"write_buffer_size_in_bytes"` } func (s *CcrShardStats) UnmarshalJSON(data []byte) error { @@ -413,3 +451,5 @@ func NewCcrShardStats() *CcrShardStats { return r } + +// false diff --git a/typedapi/types/certificateinformation.go b/typedapi/types/certificateinformation.go index 68b14adc9d..38d2a6301a 100644 --- a/typedapi/types/certificateinformation.go +++ b/typedapi/types/certificateinformation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,16 +31,28 @@ import ( // CertificateInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ssl/certificates/types.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ssl/certificates/types.ts#L22-L57 type CertificateInformation struct { - Alias *string `json:"alias,omitempty"` - Expiry DateTime `json:"expiry"` - Format string `json:"format"` - HasPrivateKey bool `json:"has_private_key"` - Issuer *string `json:"issuer,omitempty"` - Path string `json:"path"` - SerialNumber string `json:"serial_number"` - SubjectDn string `json:"subject_dn"` + // Alias If the path refers to a container file (a jks keystore, or a PKCS#12 file), + // it is the alias of the certificate. + // Otherwise, it is null. + Alias *string `json:"alias,omitempty"` + // Expiry The ISO formatted date of the certificate's expiry (not-after) date. + Expiry DateTime `json:"expiry"` + // Format The format of the file. + // Valid values include `jks`, `PKCS12`, and `PEM`. + Format string `json:"format"` + // HasPrivateKey Indicates whether Elasticsearch has access to the private key for this + // certificate. + HasPrivateKey bool `json:"has_private_key"` + // Issuer The Distinguished Name of the certificate's issuer. + Issuer *string `json:"issuer,omitempty"` + // Path The path to the certificate, as configured in the `elasticsearch.yml` file. + Path string `json:"path"` + // SerialNumber The hexadecimal representation of the certificate's serial number. + SerialNumber string `json:"serial_number"` + // SubjectDn The Distinguished Name of the certificate's subject. + SubjectDn string `json:"subject_dn"` } func (s *CertificateInformation) UnmarshalJSON(data []byte) error { @@ -160,3 +172,5 @@ func NewCertificateInformation() *CertificateInformation { return r } + +// false diff --git a/typedapi/types/cgroup.go b/typedapi/types/cgroup.go index 9827b9d475..c3e3dd74d0 100644 --- a/typedapi/types/cgroup.go +++ b/typedapi/types/cgroup.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Cgroup type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L497-L510 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L497-L510 type Cgroup struct { // Cpu Contains statistics about `cpu` control group for the node. Cpu *CgroupCpu `json:"cpu,omitempty"` @@ -38,3 +38,5 @@ func NewCgroup() *Cgroup { return r } + +// false diff --git a/typedapi/types/cgroupcpu.go b/typedapi/types/cgroupcpu.go index 46444ccdf3..ce8c170717 100644 --- a/typedapi/types/cgroupcpu.go +++ b/typedapi/types/cgroupcpu.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CgroupCpu type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L523-L540 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L523-L540 type CgroupCpu struct { // CfsPeriodMicros The period of time, in microseconds, for how regularly all tasks in the same // cgroup as the Elasticsearch process should have their access to CPU resources @@ -122,3 +122,5 @@ func NewCgroupCpu() *CgroupCpu { return r } + +// false diff --git a/typedapi/types/cgroupcpustat.go b/typedapi/types/cgroupcpustat.go index 7b820a9e1b..245b3545e0 100644 --- a/typedapi/types/cgroupcpustat.go +++ b/typedapi/types/cgroupcpustat.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CgroupCpuStat type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L542-L555 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L542-L555 type CgroupCpuStat struct { // NumberOfElapsedPeriods The number of reporting periods (as specified by `cfs_period_micros`) that // have elapsed. @@ -105,3 +105,5 @@ func NewCgroupCpuStat() *CgroupCpuStat { return r } + +// false diff --git a/typedapi/types/cgroupmemory.go b/typedapi/types/cgroupmemory.go index fc91a60da1..8151881c3c 100644 --- a/typedapi/types/cgroupmemory.go +++ b/typedapi/types/cgroupmemory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CgroupMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L557-L573 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L557-L573 type CgroupMemory struct { // ControlGroup The `memory` control group to which the Elasticsearch process belongs. ControlGroup *string `json:"control_group,omitempty"` @@ -111,3 +111,5 @@ func NewCgroupMemory() *CgroupMemory { return r } + +// false diff --git a/typedapi/types/chaininput.go b/typedapi/types/chaininput.go index bfc6f6d8c6..58902bd278 100644 --- a/typedapi/types/chaininput.go +++ b/typedapi/types/chaininput.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ChainInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Input.ts#L35-L37 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Input.ts#L35-L37 type ChainInput struct { Inputs []map[string]WatcherInput `json:"inputs"` } @@ -33,3 +33,13 @@ func NewChainInput() *ChainInput { return r } + +// true + +type ChainInputVariant interface { + ChainInputCaster() *ChainInput +} + +func (s *ChainInput) ChainInputCaster() *ChainInput { + return s +} diff --git a/typedapi/types/charfilter.go b/typedapi/types/charfilter.go index 25ff8979be..275b8efae8 100644 --- a/typedapi/types/charfilter.go +++ b/typedapi/types/charfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // string // CharFilterDefinition // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/char_filters.ts#L28-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/char_filters.ts#L28-L33 type CharFilter any + +type CharFilterVariant interface { + CharFilterCaster() *CharFilter +} diff --git a/typedapi/types/charfilterdefinition.go b/typedapi/types/charfilterdefinition.go index 0683f1707f..7510b5b48c 100644 --- a/typedapi/types/charfilterdefinition.go +++ b/typedapi/types/charfilterdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -28,5 +28,9 @@ package types // IcuNormalizationCharFilter // KuromojiIterationMarkCharFilter // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/char_filters.ts#L35-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/char_filters.ts#L35-L44 type CharFilterDefinition any + +type CharFilterDefinitionVariant interface { + CharFilterDefinitionCaster() *CharFilterDefinition +} diff --git a/typedapi/types/charfilterdetail.go b/typedapi/types/charfilterdetail.go index 41e72d39d9..4f9e45dd04 100644 --- a/typedapi/types/charfilterdetail.go +++ b/typedapi/types/charfilterdetail.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CharFilterDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/analyze/types.ts#L46-L49 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/analyze/types.ts#L46-L49 type CharFilterDetail struct { FilteredText []string `json:"filtered_text"` Name string `json:"name"` @@ -80,3 +80,5 @@ func NewCharFilterDetail() *CharFilterDetail { return r } + +// false diff --git a/typedapi/types/charfiltertypes.go b/typedapi/types/charfiltertypes.go index 79cf28592e..f96f7525fe 100644 --- a/typedapi/types/charfiltertypes.go +++ b/typedapi/types/charfiltertypes.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // CharFilterTypes type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L228-L261 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L228-L261 type CharFilterTypes struct { // AnalyzerTypes Contains statistics about analyzer types used in selected nodes. AnalyzerTypes []FieldTypes `json:"analyzer_types"` @@ -48,3 +48,5 @@ func NewCharFilterTypes() *CharFilterTypes { return r } + +// false diff --git a/typedapi/types/chargrouptokenizer.go b/typedapi/types/chargrouptokenizer.go index 642e9e333e..4e5cff8284 100644 --- a/typedapi/types/chargrouptokenizer.go +++ b/typedapi/types/chargrouptokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CharGroupTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L31-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L31-L38 type CharGroupTokenizer struct { MaxTokenLength *int `json:"max_token_length,omitempty"` TokenizeOnChars []string `json:"tokenize_on_chars"` @@ -111,3 +111,13 @@ func NewCharGroupTokenizer() *CharGroupTokenizer { return r } + +// true + +type CharGroupTokenizerVariant interface { + CharGroupTokenizerCaster() *CharGroupTokenizer +} + +func (s *CharGroupTokenizer) CharGroupTokenizerCaster() *CharGroupTokenizer { + return s +} diff --git a/typedapi/types/checkpointing.go b/typedapi/types/checkpointing.go index eb391c12c3..80405d1da1 100644 --- a/typedapi/types/checkpointing.go +++ b/typedapi/types/checkpointing.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Checkpointing type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/get_transform_stats/types.ts#L85-L92 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/get_transform_stats/types.ts#L85-L92 type Checkpointing struct { ChangesLastDetectedAt *int64 `json:"changes_last_detected_at,omitempty"` ChangesLastDetectedAtDateTime DateTime `json:"changes_last_detected_at_date_time,omitempty"` @@ -127,3 +127,5 @@ func NewCheckpointing() *Checkpointing { return r } + +// false diff --git a/typedapi/types/checkpointstats.go b/typedapi/types/checkpointstats.go index 5a411ec230..3f65b09936 100644 --- a/typedapi/types/checkpointstats.go +++ b/typedapi/types/checkpointstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CheckpointStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/get_transform_stats/types.ts#L76-L83 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/get_transform_stats/types.ts#L76-L83 type CheckpointStats struct { Checkpoint int64 `json:"checkpoint"` CheckpointProgress *TransformProgress `json:"checkpoint_progress,omitempty"` @@ -107,3 +107,5 @@ func NewCheckpointStats() *CheckpointStats { return r } + +// false diff --git a/typedapi/types/childrenaggregate.go b/typedapi/types/childrenaggregate.go index 47e4302475..0e201d5694 100644 --- a/typedapi/types/childrenaggregate.go +++ b/typedapi/types/childrenaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // ChildrenAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L888-L892 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L888-L892 type ChildrenAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -631,8 +631,10 @@ func (s ChildrenAggregate) MarshalJSON() ([]byte, error) { // NewChildrenAggregate returns a ChildrenAggregate. func NewChildrenAggregate() *ChildrenAggregate { r := &ChildrenAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/childrenaggregation.go b/typedapi/types/childrenaggregation.go index cdb106c707..e28578918e 100644 --- a/typedapi/types/childrenaggregation.go +++ b/typedapi/types/childrenaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ChildrenAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L121-L126 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L121-L126 type ChildrenAggregation struct { // Type The child type that should be selected. Type *string `json:"type,omitempty"` @@ -67,3 +67,13 @@ func NewChildrenAggregation() *ChildrenAggregation { return r } + +// true + +type ChildrenAggregationVariant interface { + ChildrenAggregationCaster() *ChildrenAggregation +} + +func (s *ChildrenAggregation) ChildrenAggregationCaster() *ChildrenAggregation { + return s +} diff --git a/typedapi/types/chineseanalyzer.go b/typedapi/types/chineseanalyzer.go index 66f07ca514..2097204db8 100644 --- a/typedapi/types/chineseanalyzer.go +++ b/typedapi/types/chineseanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ChineseAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L109-L113 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L120-L124 type ChineseAnalyzer struct { Stopwords []string `json:"stopwords,omitempty"` StopwordsPath *string `json:"stopwords_path,omitempty"` @@ -111,3 +111,13 @@ func NewChineseAnalyzer() *ChineseAnalyzer { return r } + +// true + +type ChineseAnalyzerVariant interface { + ChineseAnalyzerCaster() *ChineseAnalyzer +} + +func (s *ChineseAnalyzer) ChineseAnalyzerCaster() *ChineseAnalyzer { + return s +} diff --git a/typedapi/types/chisquareheuristic.go b/typedapi/types/chisquareheuristic.go index 435a014d57..58c3521e30 100644 --- a/typedapi/types/chisquareheuristic.go +++ b/typedapi/types/chisquareheuristic.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ChiSquareHeuristic type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L782-L791 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L782-L791 type ChiSquareHeuristic struct { // BackgroundIsSuperset Set to `false` if you defined a custom background filter that represents a // different set of documents that you want to compare to. @@ -95,3 +95,13 @@ func NewChiSquareHeuristic() *ChiSquareHeuristic { return r } + +// true + +type ChiSquareHeuristicVariant interface { + ChiSquareHeuristicCaster() *ChiSquareHeuristic +} + +func (s *ChiSquareHeuristic) ChiSquareHeuristicCaster() *ChiSquareHeuristic { + return s +} diff --git a/typedapi/types/chunkingconfig.go b/typedapi/types/chunkingconfig.go index 09449da6b3..b67fa91ed6 100644 --- a/typedapi/types/chunkingconfig.go +++ b/typedapi/types/chunkingconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // ChunkingConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Datafeed.ts#L241-L254 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Datafeed.ts#L251-L264 type ChunkingConfig struct { // Mode If the mode is `auto`, the chunk size is dynamically calculated; // this is the recommended value when the datafeed does not use aggregations. @@ -82,3 +82,13 @@ func NewChunkingConfig() *ChunkingConfig { return r } + +// true + +type ChunkingConfigVariant interface { + ChunkingConfigCaster() *ChunkingConfig +} + +func (s *ChunkingConfig) ChunkingConfigCaster() *ChunkingConfig { + return s +} diff --git a/typedapi/types/circleprocessor.go b/typedapi/types/circleprocessor.go index ca6f31f68a..7c4fbd2f28 100644 --- a/typedapi/types/circleprocessor.go +++ b/typedapi/types/circleprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // CircleProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L533-L556 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L574-L597 type CircleProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -46,7 +46,7 @@ type CircleProcessor struct { // for GeoJSON. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without @@ -114,16 +114,9 @@ func (s *CircleProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -191,3 +184,13 @@ func NewCircleProcessor() *CircleProcessor { return r } + +// true + +type CircleProcessorVariant interface { + CircleProcessorCaster() *CircleProcessor +} + +func (s *CircleProcessor) CircleProcessorCaster() *CircleProcessor { + return s +} diff --git a/typedapi/types/cjkanalyzer.go b/typedapi/types/cjkanalyzer.go index 6b40237520..d12d8d9068 100644 --- a/typedapi/types/cjkanalyzer.go +++ b/typedapi/types/cjkanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CjkAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L115-L119 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L126-L130 type CjkAnalyzer struct { Stopwords []string `json:"stopwords,omitempty"` StopwordsPath *string `json:"stopwords_path,omitempty"` @@ -111,3 +111,13 @@ func NewCjkAnalyzer() *CjkAnalyzer { return r } + +// true + +type CjkAnalyzerVariant interface { + CjkAnalyzerCaster() *CjkAnalyzer +} + +func (s *CjkAnalyzer) CjkAnalyzerCaster() *CjkAnalyzer { + return s +} diff --git a/typedapi/types/classictokenizer.go b/typedapi/types/classictokenizer.go index 81438f735d..2a9f8a7f16 100644 --- a/typedapi/types/classictokenizer.go +++ b/typedapi/types/classictokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClassicTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L40-L46 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L40-L46 type ClassicTokenizer struct { MaxTokenLength *int `json:"max_token_length,omitempty"` Type string `json:"type,omitempty"` @@ -104,3 +104,13 @@ func NewClassicTokenizer() *ClassicTokenizer { return r } + +// true + +type ClassicTokenizerVariant interface { + ClassicTokenizerCaster() *ClassicTokenizer +} + +func (s *ClassicTokenizer) ClassicTokenizerCaster() *ClassicTokenizer { + return s +} diff --git a/typedapi/types/classificationinferenceoptions.go b/typedapi/types/classificationinferenceoptions.go index 350d1bce5c..9cb4309430 100644 --- a/typedapi/types/classificationinferenceoptions.go +++ b/typedapi/types/classificationinferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClassificationInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L93-L108 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L93-L108 type ClassificationInferenceOptions struct { // NumTopClasses Specifies the number of top class predictions to return. Defaults to 0. NumTopClasses *int `json:"num_top_classes,omitempty"` @@ -143,3 +143,13 @@ func NewClassificationInferenceOptions() *ClassificationInferenceOptions { return r } + +// true + +type ClassificationInferenceOptionsVariant interface { + ClassificationInferenceOptionsCaster() *ClassificationInferenceOptions +} + +func (s *ClassificationInferenceOptions) ClassificationInferenceOptionsCaster() *ClassificationInferenceOptions { + return s +} diff --git a/typedapi/types/cleanuprepositoryresults.go b/typedapi/types/cleanuprepositoryresults.go index df5a0dc18e..fefe3939a4 100644 --- a/typedapi/types/cleanuprepositoryresults.go +++ b/typedapi/types/cleanuprepositoryresults.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,11 +31,14 @@ import ( // CleanupRepositoryResults type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L29-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L29-L37 type CleanupRepositoryResults struct { - // DeletedBlobs Number of binary large objects (blobs) removed during cleanup. + // DeletedBlobs The number of binary large objects (blobs) removed from the snapshot + // repository during cleanup operations. + // A non-zero value indicates that unreferenced blobs were found and + // subsequently cleaned up. DeletedBlobs int64 `json:"deleted_blobs"` - // DeletedBytes Number of bytes freed by cleanup operations. + // DeletedBytes The number of bytes freed by cleanup operations. DeletedBytes int64 `json:"deleted_bytes"` } @@ -95,3 +98,5 @@ func NewCleanupRepositoryResults() *CleanupRepositoryResults { return r } + +// false diff --git a/typedapi/types/client.go b/typedapi/types/client.go index be75395ca8..1be1a8352a 100644 --- a/typedapi/types/client.go +++ b/typedapi/types/client.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Client type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L720-L767 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L720-L767 type Client struct { // Agent Reported agent for the HTTP client. // If unavailable, this property is not included in the response. @@ -235,3 +235,5 @@ func NewClient() *Client { return r } + +// false diff --git a/typedapi/types/closeindexresult.go b/typedapi/types/closeindexresult.go index 70859bdaeb..30c94198a7 100644 --- a/typedapi/types/closeindexresult.go +++ b/typedapi/types/closeindexresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CloseIndexResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/close/CloseIndexResponse.ts#L32-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/close/CloseIndexResponse.ts#L32-L35 type CloseIndexResult struct { Closed bool `json:"closed"` Shards map[string]CloseShardResult `json:"shards,omitempty"` @@ -82,8 +82,10 @@ func (s *CloseIndexResult) UnmarshalJSON(data []byte) error { // NewCloseIndexResult returns a CloseIndexResult. func NewCloseIndexResult() *CloseIndexResult { r := &CloseIndexResult{ - Shards: make(map[string]CloseShardResult, 0), + Shards: make(map[string]CloseShardResult), } return r } + +// false diff --git a/typedapi/types/closeshardresult.go b/typedapi/types/closeshardresult.go index 8d2ccea972..9065f53e9f 100644 --- a/typedapi/types/closeshardresult.go +++ b/typedapi/types/closeshardresult.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // CloseShardResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/close/CloseIndexResponse.ts#L37-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/close/CloseIndexResponse.ts#L37-L39 type CloseShardResult struct { Failures []ShardFailure `json:"failures"` } @@ -33,3 +33,5 @@ func NewCloseShardResult() *CloseShardResult { return r } + +// false diff --git a/typedapi/types/clusterappliedstats.go b/typedapi/types/clusterappliedstats.go index 8cca051019..a757a4c8c5 100644 --- a/typedapi/types/clusterappliedstats.go +++ b/typedapi/types/clusterappliedstats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ClusterAppliedStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L221-L223 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L221-L223 type ClusterAppliedStats struct { Recordings []Recording `json:"recordings,omitempty"` } @@ -33,3 +33,5 @@ func NewClusterAppliedStats() *ClusterAppliedStats { return r } + +// false diff --git a/typedapi/types/clustercomponenttemplate.go b/typedapi/types/clustercomponenttemplate.go index c9a43daff4..7fdb8c3553 100644 --- a/typedapi/types/clustercomponenttemplate.go +++ b/typedapi/types/clustercomponenttemplate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ClusterComponentTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/_types/ComponentTemplate.ts#L27-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/_types/ComponentTemplate.ts#L27-L30 type ClusterComponentTemplate struct { ComponentTemplate ComponentTemplateNode `json:"component_template"` Name string `json:"name"` @@ -72,3 +72,5 @@ func NewClusterComponentTemplate() *ClusterComponentTemplate { return r } + +// false diff --git a/typedapi/types/clusterdetails.go b/typedapi/types/clusterdetails.go index 0648b83ede..da376bc4bb 100644 --- a/typedapi/types/clusterdetails.go +++ b/typedapi/types/clusterdetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // ClusterDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L45-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L45-L52 type ClusterDetails struct { Failures []ShardFailure `json:"failures,omitempty"` Indices string `json:"indices"` @@ -115,3 +115,5 @@ func NewClusterDetails() *ClusterDetails { return r } + +// false diff --git a/typedapi/types/clusterfilesystem.go b/typedapi/types/clusterfilesystem.go index 9a9440b71b..699aa471a2 100644 --- a/typedapi/types/clusterfilesystem.go +++ b/typedapi/types/clusterfilesystem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterFileSystem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L34-L49 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L34-L49 type ClusterFileSystem struct { // AvailableInBytes Total number of bytes available to JVM in file stores across all selected // nodes. @@ -117,3 +117,5 @@ func NewClusterFileSystem() *ClusterFileSystem { return r } + +// false diff --git a/typedapi/types/clusterindexingpressure.go b/typedapi/types/clusterindexingpressure.go index d57ed4ad4e..5557ae97ee 100644 --- a/typedapi/types/clusterindexingpressure.go +++ b/typedapi/types/clusterindexingpressure.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ClusterIndexingPressure type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L570-L572 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L570-L572 type ClusterIndexingPressure struct { Memory ClusterPressureMemory `json:"memory"` } @@ -33,3 +33,5 @@ func NewClusterIndexingPressure() *ClusterIndexingPressure { return r } + +// false diff --git a/typedapi/types/clusterindices.go b/typedapi/types/clusterindices.go index 55c335a9c5..4419be5617 100644 --- a/typedapi/types/clusterindices.go +++ b/typedapi/types/clusterindices.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterIndices type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L74-L107 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L74-L107 type ClusterIndices struct { // Analysis Contains statistics about analyzers and analyzer components used in selected // nodes. @@ -150,3 +150,5 @@ func NewClusterIndices() *ClusterIndices { return r } + +// false diff --git a/typedapi/types/clusterindicesshards.go b/typedapi/types/clusterindicesshards.go index 9915723ed4..418a5a26fa 100644 --- a/typedapi/types/clusterindicesshards.go +++ b/typedapi/types/clusterindicesshards.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterIndicesShards type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L60-L72 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L60-L72 type ClusterIndicesShards struct { // Index Contains statistics about shards assigned to selected nodes. Index *ClusterIndicesShardsIndex `json:"index,omitempty"` @@ -122,3 +122,5 @@ func NewClusterIndicesShards() *ClusterIndicesShards { return r } + +// false diff --git a/typedapi/types/clusterindicesshardsindex.go b/typedapi/types/clusterindicesshardsindex.go index 67db87987e..752fd4d770 100644 --- a/typedapi/types/clusterindicesshardsindex.go +++ b/typedapi/types/clusterindicesshardsindex.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ClusterIndicesShardsIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L51-L58 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L51-L58 type ClusterIndicesShardsIndex struct { // Primaries Contains statistics about the number of primary shards assigned to selected // nodes. @@ -40,3 +40,5 @@ func NewClusterIndicesShardsIndex() *ClusterIndicesShardsIndex { return r } + +// false diff --git a/typedapi/types/clusterinfo.go b/typedapi/types/clusterinfo.go index 026f6b8267..21a218fe17 100644 --- a/typedapi/types/clusterinfo.go +++ b/typedapi/types/clusterinfo.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ClusterInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/allocation_explain/types.ts#L49-L55 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/allocation_explain/types.ts#L49-L55 type ClusterInfo struct { Nodes map[string]NodeDiskUsage `json:"nodes"` ReservedSizes []ReservedSize `json:"reserved_sizes"` @@ -34,11 +34,13 @@ type ClusterInfo struct { // NewClusterInfo returns a ClusterInfo. func NewClusterInfo() *ClusterInfo { r := &ClusterInfo{ - Nodes: make(map[string]NodeDiskUsage, 0), - ShardDataSetSizes: make(map[string]string, 0), - ShardPaths: make(map[string]string, 0), - ShardSizes: make(map[string]int64, 0), + Nodes: make(map[string]NodeDiskUsage), + ShardDataSetSizes: make(map[string]string), + ShardPaths: make(map[string]string), + ShardSizes: make(map[string]int64), } return r } + +// false diff --git a/typedapi/types/clusterinfotargets.go b/typedapi/types/clusterinfotargets.go index d76a3a4414..56019aeb8a 100644 --- a/typedapi/types/clusterinfotargets.go +++ b/typedapi/types/clusterinfotargets.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,5 +26,5 @@ import ( // ClusterInfoTargets type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L390-L390 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L386-L386 type ClusterInfoTargets []clusterinfotarget.ClusterInfoTarget diff --git a/typedapi/types/clusteringest.go b/typedapi/types/clusteringest.go index 8be9c56590..a05b83f8d8 100644 --- a/typedapi/types/clusteringest.go +++ b/typedapi/types/clusteringest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterIngest type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L270-L273 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L270-L273 type ClusterIngest struct { NumberOfPipelines int `json:"number_of_pipelines"` ProcessorStats map[string]ClusterProcessor `json:"processor_stats"` @@ -84,8 +84,10 @@ func (s *ClusterIngest) UnmarshalJSON(data []byte) error { // NewClusterIngest returns a ClusterIngest. func NewClusterIngest() *ClusterIngest { r := &ClusterIngest{ - ProcessorStats: make(map[string]ClusterProcessor, 0), + ProcessorStats: make(map[string]ClusterProcessor), } return r } + +// false diff --git a/typedapi/types/clusterjvm.go b/typedapi/types/clusterjvm.go index 4ec174444c..8ff5bec8b7 100644 --- a/typedapi/types/clusterjvm.go +++ b/typedapi/types/clusterjvm.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterJvm type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L275-L292 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L275-L292 type ClusterJvm struct { // MaxUptimeInMillis Uptime duration, in milliseconds, since JVM last started. MaxUptimeInMillis int64 `json:"max_uptime_in_millis"` @@ -99,3 +99,5 @@ func NewClusterJvm() *ClusterJvm { return r } + +// false diff --git a/typedapi/types/clusterjvmmemory.go b/typedapi/types/clusterjvmmemory.go index 5fd49fcb31..e7882fd00d 100644 --- a/typedapi/types/clusterjvmmemory.go +++ b/typedapi/types/clusterjvmmemory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterJvmMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L294-L303 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L294-L303 type ClusterJvmMemory struct { // HeapMaxInBytes Maximum amount of memory, in bytes, available for use by the heap across all // selected nodes. @@ -96,3 +96,5 @@ func NewClusterJvmMemory() *ClusterJvmMemory { return r } + +// false diff --git a/typedapi/types/clusterjvmversion.go b/typedapi/types/clusterjvmversion.go index 2caaeae16c..746278dd91 100644 --- a/typedapi/types/clusterjvmversion.go +++ b/typedapi/types/clusterjvmversion.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterJvmVersion type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L305-L335 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L305-L335 type ClusterJvmVersion struct { // BundledJdk Always `true`. All distributions come with a bundled Java Development Kit // (JDK). @@ -156,3 +156,5 @@ func NewClusterJvmVersion() *ClusterJvmVersion { return r } + +// false diff --git a/typedapi/types/clusternetworktypes.go b/typedapi/types/clusternetworktypes.go index b9522649d3..9b638046c2 100644 --- a/typedapi/types/clusternetworktypes.go +++ b/typedapi/types/clusternetworktypes.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ClusterNetworkTypes type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L337-L346 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L337-L346 type ClusterNetworkTypes struct { // HttpTypes Contains statistics about the HTTP network types used by selected nodes. HttpTypes map[string]int `json:"http_types"` @@ -33,9 +33,11 @@ type ClusterNetworkTypes struct { // NewClusterNetworkTypes returns a ClusterNetworkTypes. func NewClusterNetworkTypes() *ClusterNetworkTypes { r := &ClusterNetworkTypes{ - HttpTypes: make(map[string]int, 0), - TransportTypes: make(map[string]int, 0), + HttpTypes: make(map[string]int), + TransportTypes: make(map[string]int), } return r } + +// false diff --git a/typedapi/types/clusternode.go b/typedapi/types/clusternode.go index e1ba859422..94761c51d3 100644 --- a/typedapi/types/clusternode.go +++ b/typedapi/types/clusternode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ClusterNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/ClusterNode.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/ClusterNode.ts#L22-L24 type ClusterNode struct { Name string `json:"name"` } @@ -66,3 +66,5 @@ func NewClusterNode() *ClusterNode { return r } + +// false diff --git a/typedapi/types/clusternodecount.go b/typedapi/types/clusternodecount.go index f0b219af5d..35051f7295 100644 --- a/typedapi/types/clusternodecount.go +++ b/typedapi/types/clusternodecount.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterNodeCount type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L348-L367 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L348-L367 type ClusterNodeCount struct { CoordinatingOnly int `json:"coordinating_only"` Data int `json:"data"` @@ -299,3 +299,5 @@ func NewClusterNodeCount() *ClusterNodeCount { return r } + +// false diff --git a/typedapi/types/clusternodes.go b/typedapi/types/clusternodes.go index cf15837a00..d610af305c 100644 --- a/typedapi/types/clusternodes.go +++ b/typedapi/types/clusternodes.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ClusterNodes type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L369-L402 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L369-L402 type ClusterNodes struct { // Count Contains counts for nodes selected by the request’s node filters. Count ClusterNodeCount `json:"count"` @@ -55,8 +55,10 @@ type ClusterNodes struct { // NewClusterNodes returns a ClusterNodes. func NewClusterNodes() *ClusterNodes { r := &ClusterNodes{ - DiscoveryTypes: make(map[string]int, 0), + DiscoveryTypes: make(map[string]int), } return r } + +// false diff --git a/typedapi/types/clusteroperatingsystem.go b/typedapi/types/clusteroperatingsystem.go index 630a9d4fa8..b301bccdf3 100644 --- a/typedapi/types/clusteroperatingsystem.go +++ b/typedapi/types/clusteroperatingsystem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterOperatingSystem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L415-L442 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L415-L442 type ClusterOperatingSystem struct { // AllocatedProcessors Number of processors used to calculate thread pool size across all selected // nodes. @@ -130,3 +130,5 @@ func NewClusterOperatingSystem() *ClusterOperatingSystem { return r } + +// false diff --git a/typedapi/types/clusteroperatingsystemarchitecture.go b/typedapi/types/clusteroperatingsystemarchitecture.go index 14d9f4783d..ccc2a8783b 100644 --- a/typedapi/types/clusteroperatingsystemarchitecture.go +++ b/typedapi/types/clusteroperatingsystemarchitecture.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterOperatingSystemArchitecture type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L404-L413 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L404-L413 type ClusterOperatingSystemArchitecture struct { // Arch Name of an architecture used by one or more selected nodes. Arch string `json:"arch"` @@ -93,3 +93,5 @@ func NewClusterOperatingSystemArchitecture() *ClusterOperatingSystemArchitecture return r } + +// false diff --git a/typedapi/types/clusteroperatingsystemname.go b/typedapi/types/clusteroperatingsystemname.go index 26e16116f1..104e553a5b 100644 --- a/typedapi/types/clusteroperatingsystemname.go +++ b/typedapi/types/clusteroperatingsystemname.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterOperatingSystemName type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L444-L453 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L444-L453 type ClusterOperatingSystemName struct { // Count Number of selected nodes using the operating system. Count int `json:"count"` @@ -86,3 +86,5 @@ func NewClusterOperatingSystemName() *ClusterOperatingSystemName { return r } + +// false diff --git a/typedapi/types/clusteroperatingsystemprettyname.go b/typedapi/types/clusteroperatingsystemprettyname.go index 25fe2cb713..90fb37aa91 100644 --- a/typedapi/types/clusteroperatingsystemprettyname.go +++ b/typedapi/types/clusteroperatingsystemprettyname.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterOperatingSystemPrettyName type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L455-L464 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L455-L464 type ClusterOperatingSystemPrettyName struct { // Count Number of selected nodes using the operating system. Count int `json:"count"` @@ -87,3 +87,5 @@ func NewClusterOperatingSystemPrettyName() *ClusterOperatingSystemPrettyName { return r } + +// false diff --git a/typedapi/types/clusterpressurememory.go b/typedapi/types/clusterpressurememory.go index 829e4795bb..471f2480e6 100644 --- a/typedapi/types/clusterpressurememory.go +++ b/typedapi/types/clusterpressurememory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterPressureMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L574-L578 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L574-L578 type ClusterPressureMemory struct { Current IndexingPressureMemorySummary `json:"current"` LimitInBytes int64 `json:"limit_in_bytes"` @@ -89,3 +89,5 @@ func NewClusterPressureMemory() *ClusterPressureMemory { return r } + +// false diff --git a/typedapi/types/clusterprocess.go b/typedapi/types/clusterprocess.go index 9d4caac5ca..6d4410b924 100644 --- a/typedapi/types/clusterprocess.go +++ b/typedapi/types/clusterprocess.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ClusterProcess type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L466-L475 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L466-L475 type ClusterProcess struct { // Cpu Contains statistics about CPU used by selected nodes. Cpu ClusterProcessCpu `json:"cpu"` @@ -36,3 +36,5 @@ func NewClusterProcess() *ClusterProcess { return r } + +// false diff --git a/typedapi/types/clusterprocesscpu.go b/typedapi/types/clusterprocesscpu.go index 68f79f430f..efb67d093d 100644 --- a/typedapi/types/clusterprocesscpu.go +++ b/typedapi/types/clusterprocesscpu.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterProcessCpu type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L477-L483 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L477-L483 type ClusterProcessCpu struct { // Percent Percentage of CPU used across all selected nodes. // Returns `-1` if not supported. @@ -80,3 +80,5 @@ func NewClusterProcessCpu() *ClusterProcessCpu { return r } + +// false diff --git a/typedapi/types/clusterprocessopenfiledescriptors.go b/typedapi/types/clusterprocessopenfiledescriptors.go index c1c8d53d83..e7f4c142bb 100644 --- a/typedapi/types/clusterprocessopenfiledescriptors.go +++ b/typedapi/types/clusterprocessopenfiledescriptors.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterProcessOpenFileDescriptors type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L485-L501 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L485-L501 type ClusterProcessOpenFileDescriptors struct { // Avg Average number of concurrently open file descriptors. // Returns `-1` if not supported. @@ -117,3 +117,5 @@ func NewClusterProcessOpenFileDescriptors() *ClusterProcessOpenFileDescriptors { return r } + +// false diff --git a/typedapi/types/clusterprocessor.go b/typedapi/types/clusterprocessor.go index aed0bac7a4..695b95d32d 100644 --- a/typedapi/types/clusterprocessor.go +++ b/typedapi/types/clusterprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L503-L509 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L503-L509 type ClusterProcessor struct { Count int64 `json:"count"` Current int64 `json:"current"` @@ -121,3 +121,5 @@ func NewClusterProcessor() *ClusterProcessor { return r } + +// false diff --git a/typedapi/types/clusterremoteinfo.go b/typedapi/types/clusterremoteinfo.go index 9e1366becc..aeb95f5dc8 100644 --- a/typedapi/types/clusterremoteinfo.go +++ b/typedapi/types/clusterremoteinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // ClusterRemoteSniffInfo // ClusterRemoteProxyInfo // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L29-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L29-L30 type ClusterRemoteInfo any diff --git a/typedapi/types/clusterremoteproxyinfo.go b/typedapi/types/clusterremoteproxyinfo.go index 26a7a25f07..c712ddc99d 100644 --- a/typedapi/types/clusterremoteproxyinfo.go +++ b/typedapi/types/clusterremoteproxyinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,16 +31,35 @@ import ( // ClusterRemoteProxyInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L42-L51 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L58-L83 type ClusterRemoteProxyInfo struct { - Connected bool `json:"connected"` - InitialConnectTimeout Duration `json:"initial_connect_timeout"` - MaxProxySocketConnections int `json:"max_proxy_socket_connections"` - Mode string `json:"mode,omitempty"` - NumProxySocketsConnected int `json:"num_proxy_sockets_connected"` - ProxyAddress string `json:"proxy_address"` - ServerName string `json:"server_name"` - SkipUnavailable bool `json:"skip_unavailable"` + // ClusterCredentials This field is present and has a value of `::es_redacted::` only when the + // remote cluster is configured with the API key based model. Otherwise, the + // field is not present. + ClusterCredentials *string `json:"cluster_credentials,omitempty"` + // Connected If it is `true`, there is at least one open connection to the remote cluster. + // If it is `false`, it means that the cluster no longer has an open connection + // to the remote cluster. + // It does not necessarily mean that the remote cluster is down or unavailable, + // just that at some point a connection was lost. + Connected bool `json:"connected"` + // InitialConnectTimeout The initial connect timeout for remote cluster connections. + InitialConnectTimeout Duration `json:"initial_connect_timeout"` + // MaxProxySocketConnections The maximum number of socket connections to the remote cluster when proxy + // mode is configured. + MaxProxySocketConnections int `json:"max_proxy_socket_connections"` + // Mode The connection mode for the remote cluster. + Mode string `json:"mode,omitempty"` + // NumProxySocketsConnected The number of open socket connections to the remote cluster when proxy mode + // is configured. + NumProxySocketsConnected int `json:"num_proxy_sockets_connected"` + // ProxyAddress The address for remote connections when proxy mode is configured. + ProxyAddress string `json:"proxy_address"` + ServerName string `json:"server_name"` + // SkipUnavailable If `true`, cross-cluster search skips the remote cluster when its nodes are + // unavailable during the search and ignores errors returned by the remote + // cluster. + SkipUnavailable bool `json:"skip_unavailable"` } func (s *ClusterRemoteProxyInfo) UnmarshalJSON(data []byte) error { @@ -58,6 +77,18 @@ func (s *ClusterRemoteProxyInfo) UnmarshalJSON(data []byte) error { switch t { + case "cluster_credentials": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ClusterCredentials", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClusterCredentials = &o + case "connected": var tmp any dec.Decode(&tmp) @@ -161,6 +192,7 @@ func (s *ClusterRemoteProxyInfo) UnmarshalJSON(data []byte) error { func (s ClusterRemoteProxyInfo) MarshalJSON() ([]byte, error) { type innerClusterRemoteProxyInfo ClusterRemoteProxyInfo tmp := innerClusterRemoteProxyInfo{ + ClusterCredentials: s.ClusterCredentials, Connected: s.Connected, InitialConnectTimeout: s.InitialConnectTimeout, MaxProxySocketConnections: s.MaxProxySocketConnections, @@ -182,3 +214,5 @@ func NewClusterRemoteProxyInfo() *ClusterRemoteProxyInfo { return r } + +// false diff --git a/typedapi/types/clusterremotesniffinfo.go b/typedapi/types/clusterremotesniffinfo.go index e36b8e2295..d1a4275c9d 100644 --- a/typedapi/types/clusterremotesniffinfo.go +++ b/typedapi/types/clusterremotesniffinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,15 +31,31 @@ import ( // ClusterRemoteSniffInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L32-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L32-L56 type ClusterRemoteSniffInfo struct { - Connected bool `json:"connected"` - InitialConnectTimeout Duration `json:"initial_connect_timeout"` - MaxConnectionsPerCluster int `json:"max_connections_per_cluster"` - Mode string `json:"mode,omitempty"` - NumNodesConnected int64 `json:"num_nodes_connected"` - Seeds []string `json:"seeds"` - SkipUnavailable bool `json:"skip_unavailable"` + // Connected If it is `true`, there is at least one open connection to the remote cluster. + // If it is `false`, it means that the cluster no longer has an open connection + // to the remote cluster. + // It does not necessarily mean that the remote cluster is down or unavailable, + // just that at some point a connection was lost. + Connected bool `json:"connected"` + // InitialConnectTimeout The initial connect timeout for remote cluster connections. + InitialConnectTimeout Duration `json:"initial_connect_timeout"` + // MaxConnectionsPerCluster The maximum number of connections maintained for the remote cluster when + // sniff mode is configured. + MaxConnectionsPerCluster int `json:"max_connections_per_cluster"` + // Mode The connection mode for the remote cluster. + Mode string `json:"mode,omitempty"` + // NumNodesConnected The number of connected nodes in the remote cluster when sniff mode is + // configured. + NumNodesConnected int64 `json:"num_nodes_connected"` + // Seeds The initial seed transport addresses of the remote cluster when sniff mode is + // configured. + Seeds []string `json:"seeds"` + // SkipUnavailable If `true`, cross-cluster search skips the remote cluster when its nodes are + // unavailable during the search and ignores errors returned by the remote + // cluster. + SkipUnavailable bool `json:"skip_unavailable"` } func (s *ClusterRemoteSniffInfo) UnmarshalJSON(data []byte) error { @@ -160,3 +176,5 @@ func NewClusterRemoteSniffInfo() *ClusterRemoteSniffInfo { return r } + +// false diff --git a/typedapi/types/clusterruntimefieldtypes.go b/typedapi/types/clusterruntimefieldtypes.go index b02a4729bb..b7a5db5281 100644 --- a/typedapi/types/clusterruntimefieldtypes.go +++ b/typedapi/types/clusterruntimefieldtypes.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterRuntimeFieldTypes type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L169-L226 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L169-L226 type ClusterRuntimeFieldTypes struct { // CharsMax Maximum number of characters for a single runtime field script. CharsMax int `json:"chars_max"` @@ -296,3 +296,5 @@ func NewClusterRuntimeFieldTypes() *ClusterRuntimeFieldTypes { return r } + +// false diff --git a/typedapi/types/clustershardmetrics.go b/typedapi/types/clustershardmetrics.go index 489bc39010..321d4af455 100644 --- a/typedapi/types/clustershardmetrics.go +++ b/typedapi/types/clustershardmetrics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterShardMetrics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L511-L524 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L511-L524 type ClusterShardMetrics struct { // Avg Mean number of shards in an index, counting only shards assigned to selected // nodes. @@ -118,3 +118,5 @@ func NewClusterShardMetrics() *ClusterShardMetrics { return r } + +// false diff --git a/typedapi/types/clusterstatequeue.go b/typedapi/types/clusterstatequeue.go index b92c8149df..0ca42fa24d 100644 --- a/typedapi/types/clusterstatequeue.go +++ b/typedapi/types/clusterstatequeue.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterStateQueue type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L248-L261 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L248-L261 type ClusterStateQueue struct { // Committed Number of committed cluster states in queue. Committed *int64 `json:"committed,omitempty"` @@ -112,3 +112,5 @@ func NewClusterStateQueue() *ClusterStateQueue { return r } + +// false diff --git a/typedapi/types/clusterstateupdate.go b/typedapi/types/clusterstateupdate.go index 8005433e85..6da6cb1ff9 100644 --- a/typedapi/types/clusterstateupdate.go +++ b/typedapi/types/clusterstateupdate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterStateUpdate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L278-L343 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L278-L343 type ClusterStateUpdate struct { // CommitTime The cumulative amount of time spent waiting for a successful cluster state // update to commit, which measures the time from the start of each publication @@ -216,3 +216,5 @@ func NewClusterStateUpdate() *ClusterStateUpdate { return r } + +// false diff --git a/typedapi/types/clusterstatistics.go b/typedapi/types/clusterstatistics.go index 91a901b06c..9cbba51161 100644 --- a/typedapi/types/clusterstatistics.go +++ b/typedapi/types/clusterstatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ClusterStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L27-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L27-L35 type ClusterStatistics struct { Details map[string]ClusterDetails `json:"details,omitempty"` Failed int `json:"failed"` @@ -169,8 +169,10 @@ func (s *ClusterStatistics) UnmarshalJSON(data []byte) error { // NewClusterStatistics returns a ClusterStatistics. func NewClusterStatistics() *ClusterStatistics { r := &ClusterStatistics{ - Details: make(map[string]ClusterDetails, 0), + Details: make(map[string]ClusterDetails), } return r } + +// false diff --git a/typedapi/types/collector.go b/typedapi/types/collector.go index 604a8a2edd..5a3c6df7c7 100644 --- a/typedapi/types/collector.go +++ b/typedapi/types/collector.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Collector type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L94-L99 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L94-L99 type Collector struct { Children []Collector `json:"children,omitempty"` Name string `json:"name"` @@ -99,3 +99,5 @@ func NewCollector() *Collector { return r } + +// false diff --git a/typedapi/types/column.go b/typedapi/types/column.go index 44ffc35108..6b023c80ba 100644 --- a/typedapi/types/column.go +++ b/typedapi/types/column.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Column type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/sql/types.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/sql/_types/types.ts#L23-L26 type Column struct { Name string `json:"name"` Type string `json:"type"` @@ -80,3 +80,5 @@ func NewColumn() *Column { return r } + +// false diff --git a/typedapi/types/combinedfieldsquery.go b/typedapi/types/combinedfieldsquery.go index f7035b5bbf..e0298e1726 100644 --- a/typedapi/types/combinedfieldsquery.go +++ b/typedapi/types/combinedfieldsquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // CombinedFieldsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/abstractions.ts#L465-L499 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/abstractions.ts#L472-L506 type CombinedFieldsQuery struct { // AutoGenerateSynonymsPhraseQuery If true, match phrase queries are automatically created for multi-term // synonyms. @@ -162,3 +162,13 @@ func NewCombinedFieldsQuery() *CombinedFieldsQuery { return r } + +// true + +type CombinedFieldsQueryVariant interface { + CombinedFieldsQueryCaster() *CombinedFieldsQuery +} + +func (s *CombinedFieldsQuery) CombinedFieldsQueryCaster() *CombinedFieldsQuery { + return s +} diff --git a/typedapi/types/command.go b/typedapi/types/command.go index 1538e590b1..2bc772e44a 100644 --- a/typedapi/types/command.go +++ b/typedapi/types/command.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Command type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/reroute/types.ts#L22-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/reroute/types.ts#L22-L43 type Command struct { // AllocateEmptyPrimary Allocate an empty primary shard to a node. Accepts the index and shard for // index name and shard number, and node to allocate the shard to. Using this @@ -66,3 +66,13 @@ func NewCommand() *Command { return r } + +// true + +type CommandVariant interface { + CommandCaster() *Command +} + +func (s *Command) CommandCaster() *Command { + return s +} diff --git a/typedapi/types/commandallocateprimaryaction.go b/typedapi/types/commandallocateprimaryaction.go index 1ff168a6e7..630fcd5073 100644 --- a/typedapi/types/commandallocateprimaryaction.go +++ b/typedapi/types/commandallocateprimaryaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CommandAllocatePrimaryAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/reroute/types.ts#L78-L84 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/reroute/types.ts#L78-L84 type CommandAllocatePrimaryAction struct { // AcceptDataLoss If a node which has a copy of the data rejoins the cluster later on, that // data will be deleted. To ensure that these implications are well-understood, @@ -115,3 +115,13 @@ func NewCommandAllocatePrimaryAction() *CommandAllocatePrimaryAction { return r } + +// true + +type CommandAllocatePrimaryActionVariant interface { + CommandAllocatePrimaryActionCaster() *CommandAllocatePrimaryAction +} + +func (s *CommandAllocatePrimaryAction) CommandAllocatePrimaryActionCaster() *CommandAllocatePrimaryAction { + return s +} diff --git a/typedapi/types/commandallocatereplicaaction.go b/typedapi/types/commandallocatereplicaaction.go index 26a7b1e6ab..8a1f68f678 100644 --- a/typedapi/types/commandallocatereplicaaction.go +++ b/typedapi/types/commandallocatereplicaaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CommandAllocateReplicaAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/reroute/types.ts#L69-L76 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/reroute/types.ts#L69-L76 type CommandAllocateReplicaAction struct { Index string `json:"index"` Node string `json:"node"` @@ -97,3 +97,13 @@ func NewCommandAllocateReplicaAction() *CommandAllocateReplicaAction { return r } + +// true + +type CommandAllocateReplicaActionVariant interface { + CommandAllocateReplicaActionCaster() *CommandAllocateReplicaAction +} + +func (s *CommandAllocateReplicaAction) CommandAllocateReplicaActionCaster() *CommandAllocateReplicaAction { + return s +} diff --git a/typedapi/types/commandcancelaction.go b/typedapi/types/commandcancelaction.go index d674c549fd..e12f5f683f 100644 --- a/typedapi/types/commandcancelaction.go +++ b/typedapi/types/commandcancelaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CommandCancelAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/reroute/types.ts#L45-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/reroute/types.ts#L45-L50 type CommandCancelAction struct { AllowPrimary *bool `json:"allow_primary,omitempty"` Index string `json:"index"` @@ -112,3 +112,13 @@ func NewCommandCancelAction() *CommandCancelAction { return r } + +// true + +type CommandCancelActionVariant interface { + CommandCancelActionCaster() *CommandCancelAction +} + +func (s *CommandCancelAction) CommandCancelActionCaster() *CommandCancelAction { + return s +} diff --git a/typedapi/types/commandmoveaction.go b/typedapi/types/commandmoveaction.go index 388db1df71..cf20ec154c 100644 --- a/typedapi/types/commandmoveaction.go +++ b/typedapi/types/commandmoveaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CommandMoveAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/reroute/types.ts#L60-L67 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/reroute/types.ts#L60-L67 type CommandMoveAction struct { // FromNode The node to move the shard from FromNode string `json:"from_node"` @@ -112,3 +112,13 @@ func NewCommandMoveAction() *CommandMoveAction { return r } + +// true + +type CommandMoveActionVariant interface { + CommandMoveActionCaster() *CommandMoveAction +} + +func (s *CommandMoveAction) CommandMoveActionCaster() *CommandMoveAction { + return s +} diff --git a/typedapi/types/commoncatqueryparameters.go b/typedapi/types/commoncatqueryparameters.go deleted file mode 100644 index 4984b25220..0000000000 --- a/typedapi/types/commoncatqueryparameters.go +++ /dev/null @@ -1,175 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// CommonCatQueryParameters type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_spec_utils/behaviors.ts#L86-L132 -type CommonCatQueryParameters struct { - // Format Specifies the format to return the columnar data in, can be set to - // `text`, `json`, `cbor`, `yaml`, or `smile`. - Format *string `json:"format,omitempty"` - // H List of columns to appear in the response. Supports simple wildcards. - H []string `json:"h,omitempty"` - // Help When set to `true` will output available columns. This option - // can't be combined with any other query string option. - Help *bool `json:"help,omitempty"` - // Local If `true`, the request computes the list of selected nodes from the - // local cluster state. If `false` the list of selected nodes are computed - // from the cluster state of the master node. In both cases the coordinating - // node will send requests for further information to each selected node. - Local *bool `json:"local,omitempty"` - // MasterTimeout Period to wait for a connection to the master node. - MasterTimeout Duration `json:"master_timeout,omitempty"` - // S List of columns that determine how the table should be sorted. - // Sorting defaults to ascending and can be changed by setting `:asc` - // or `:desc` as a suffix to the column name. - S []string `json:"s,omitempty"` - // V When set to `true` will enable verbose output. - V *bool `json:"v,omitempty"` -} - -func (s *CommonCatQueryParameters) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "format": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Format", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Format = &o - - case "h": - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - if !bytes.HasPrefix(rawMsg, []byte("[")) { - o := new(string) - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "H", err) - } - - s.H = append(s.H, *o) - } else { - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.H); err != nil { - return fmt.Errorf("%s | %w", "H", err) - } - } - - case "help": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "Help", err) - } - s.Help = &value - case bool: - s.Help = &v - } - - case "local": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "Local", err) - } - s.Local = &value - case bool: - s.Local = &v - } - - case "master_timeout": - if err := dec.Decode(&s.MasterTimeout); err != nil { - return fmt.Errorf("%s | %w", "MasterTimeout", err) - } - - case "s": - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - if !bytes.HasPrefix(rawMsg, []byte("[")) { - o := new(string) - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "S", err) - } - - s.S = append(s.S, *o) - } else { - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.S); err != nil { - return fmt.Errorf("%s | %w", "S", err) - } - } - - case "v": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "V", err) - } - s.V = &value - case bool: - s.V = &v - } - - } - } - return nil -} - -// NewCommonCatQueryParameters returns a CommonCatQueryParameters. -func NewCommonCatQueryParameters() *CommonCatQueryParameters { - r := &CommonCatQueryParameters{} - - return r -} diff --git a/typedapi/types/commongramstokenfilter.go b/typedapi/types/commongramstokenfilter.go index b9b2c8a97a..a7efc811cb 100644 --- a/typedapi/types/commongramstokenfilter.go +++ b/typedapi/types/commongramstokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CommonGramsTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L174-L180 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L174-L180 type CommonGramsTokenFilter struct { CommonWords []string `json:"common_words,omitempty"` CommonWordsPath *string `json:"common_words_path,omitempty"` @@ -139,3 +139,13 @@ func NewCommonGramsTokenFilter() *CommonGramsTokenFilter { return r } + +// true + +type CommonGramsTokenFilterVariant interface { + CommonGramsTokenFilterCaster() *CommonGramsTokenFilter +} + +func (s *CommonGramsTokenFilter) CommonGramsTokenFilterCaster() *CommonGramsTokenFilter { + return s +} diff --git a/typedapi/types/commonqueryparameters.go b/typedapi/types/commonqueryparameters.go deleted file mode 100644 index 654c7911fb..0000000000 --- a/typedapi/types/commonqueryparameters.go +++ /dev/null @@ -1,137 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// CommonQueryParameters type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_spec_utils/behaviors.ts#L50-L84 -type CommonQueryParameters struct { - // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors - // when they occur. - ErrorTrace *bool `json:"error_trace,omitempty"` - // FilterPath Comma-separated list of filters in dot notation which reduce the response - // returned by Elasticsearch. - FilterPath []string `json:"filter_path,omitempty"` - // Human When set to `true` will return statistics in a format suitable for humans. - // For example `"exists_time": "1h"` for humans and - // `"eixsts_time_in_millis": 3600000` for computers. When disabled the human - // readable values will be omitted. This makes sense for responses being - // consumed - // only by machines. - Human *bool `json:"human,omitempty"` - // Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use - // this option for debugging only. - Pretty *bool `json:"pretty,omitempty"` -} - -func (s *CommonQueryParameters) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "error_trace": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "ErrorTrace", err) - } - s.ErrorTrace = &value - case bool: - s.ErrorTrace = &v - } - - case "filter_path": - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - if !bytes.HasPrefix(rawMsg, []byte("[")) { - o := new(string) - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "FilterPath", err) - } - - s.FilterPath = append(s.FilterPath, *o) - } else { - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.FilterPath); err != nil { - return fmt.Errorf("%s | %w", "FilterPath", err) - } - } - - case "human": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "Human", err) - } - s.Human = &value - case bool: - s.Human = &v - } - - case "pretty": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "Pretty", err) - } - s.Pretty = &value - case bool: - s.Pretty = &v - } - - } - } - return nil -} - -// NewCommonQueryParameters returns a CommonQueryParameters. -func NewCommonQueryParameters() *CommonQueryParameters { - r := &CommonQueryParameters{} - - return r -} diff --git a/typedapi/types/commontermsquery.go b/typedapi/types/commontermsquery.go index f77020463c..e168407f99 100644 --- a/typedapi/types/commontermsquery.go +++ b/typedapi/types/commontermsquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // CommonTermsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L34-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L34-L44 type CommonTermsQuery struct { Analyzer *string `json:"analyzer,omitempty"` // Boost Floating point number used to decrease or increase the relevance scores of @@ -171,3 +171,13 @@ func NewCommonTermsQuery() *CommonTermsQuery { return r } + +// true + +type CommonTermsQueryVariant interface { + CommonTermsQueryCaster() *CommonTermsQuery +} + +func (s *CommonTermsQuery) CommonTermsQueryCaster() *CommonTermsQuery { + return s +} diff --git a/typedapi/types/communityidprocessor.go b/typedapi/types/communityidprocessor.go index fcd9491f8f..b372955284 100644 --- a/typedapi/types/communityidprocessor.go +++ b/typedapi/types/communityidprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CommunityIDProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L558-L619 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L599-L660 type CommunityIDProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -47,7 +47,7 @@ type CommunityIDProcessor struct { // IcmpType Field containing the ICMP type. IcmpType *string `json:"icmp_type,omitempty"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If true and any required fields are missing, the processor quietly exits @@ -128,16 +128,9 @@ func (s *CommunityIDProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -231,3 +224,13 @@ func NewCommunityIDProcessor() *CommunityIDProcessor { return r } + +// true + +type CommunityIDProcessorVariant interface { + CommunityIDProcessorCaster() *CommunityIDProcessor +} + +func (s *CommunityIDProcessor) CommunityIDProcessorCaster() *CommunityIDProcessor { + return s +} diff --git a/typedapi/types/compactnodeinfo.go b/typedapi/types/compactnodeinfo.go index 316bf52087..47b24af5ce 100644 --- a/typedapi/types/compactnodeinfo.go +++ b/typedapi/types/compactnodeinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,8 +30,11 @@ import ( // CompactNodeInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L27-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L33-L40 type CompactNodeInfo struct { + // Name A human-readable name for the node. + // You can set this name using the `node.name` property in `elasticsearch.yml`. + // The default value is the machine's hostname. Name string `json:"name"` } @@ -66,3 +69,5 @@ func NewCompactNodeInfo() *CompactNodeInfo { return r } + +// false diff --git a/typedapi/types/completioncontext.go b/typedapi/types/completioncontext.go index ca8c204eba..89e524ff93 100644 --- a/typedapi/types/completioncontext.go +++ b/typedapi/types/completioncontext.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CompletionContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L235-L264 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L235-L264 type CompletionContext struct { // Boost The factor by which the score of the suggestion should be boosted. // The score is computed by multiplying the boost with the suggestion weight. @@ -129,3 +129,13 @@ func NewCompletionContext() *CompletionContext { return r } + +// true + +type CompletionContextVariant interface { + CompletionContextCaster() *CompletionContext +} + +func (s *CompletionContext) CompletionContextCaster() *CompletionContext { + return s +} diff --git a/typedapi/types/completionproperty.go b/typedapi/types/completionproperty.go index b4b646afd0..a0e58c4f85 100644 --- a/typedapi/types/completionproperty.go +++ b/typedapi/types/completionproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // CompletionProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/specialized.ts#L33-L41 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/specialized.ts#L33-L41 type CompletionProperty struct { Analyzer *string `json:"analyzer,omitempty"` Contexts []SuggestContext `json:"contexts,omitempty"` @@ -44,13 +45,14 @@ type CompletionProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` MaxInputLength *int `json:"max_input_length,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - PreservePositionIncrements *bool `json:"preserve_position_increments,omitempty"` - PreserveSeparators *bool `json:"preserve_separators,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - SearchAnalyzer *string `json:"search_analyzer,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + PreservePositionIncrements *bool `json:"preserve_position_increments,omitempty"` + PreserveSeparators *bool `json:"preserve_separators,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SearchAnalyzer *string `json:"search_analyzer,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *CompletionProperty) UnmarshalJSON(data []byte) error { @@ -139,301 +141,313 @@ func (s *CompletionProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -526,301 +540,313 @@ func (s *CompletionProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -852,6 +878,11 @@ func (s *CompletionProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -880,6 +911,7 @@ func (s CompletionProperty) MarshalJSON() ([]byte, error) { Properties: s.Properties, SearchAnalyzer: s.SearchAnalyzer, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, Type: s.Type, } @@ -891,10 +923,20 @@ func (s CompletionProperty) MarshalJSON() ([]byte, error) { // NewCompletionProperty returns a CompletionProperty. func NewCompletionProperty() *CompletionProperty { r := &CompletionProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type CompletionPropertyVariant interface { + CompletionPropertyCaster() *CompletionProperty +} + +func (s *CompletionProperty) CompletionPropertyCaster() *CompletionProperty { + return s +} diff --git a/typedapi/types/completionresult.go b/typedapi/types/completionresult.go index 4ac27cabd2..d94d941eb6 100644 --- a/typedapi/types/completionresult.go +++ b/typedapi/types/completionresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CompletionResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/_types/Results.ts#L60-L65 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/_types/Results.ts#L77-L82 type CompletionResult struct { Result string `json:"result"` } @@ -74,3 +74,5 @@ func NewCompletionResult() *CompletionResult { return r } + +// false diff --git a/typedapi/types/completionstats.go b/typedapi/types/completionstats.go index b618c23a34..42a0efd251 100644 --- a/typedapi/types/completionstats.go +++ b/typedapi/types/completionstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CompletionStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L80-L90 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L83-L93 type CompletionStats struct { Fields map[string]FieldSizeUsage `json:"fields,omitempty"` // Size Total amount of memory used for completion across all shards assigned to @@ -93,8 +93,10 @@ func (s *CompletionStats) UnmarshalJSON(data []byte) error { // NewCompletionStats returns a CompletionStats. func NewCompletionStats() *CompletionStats { r := &CompletionStats{ - Fields: make(map[string]FieldSizeUsage, 0), + Fields: make(map[string]FieldSizeUsage), } return r } + +// false diff --git a/typedapi/types/completionsuggest.go b/typedapi/types/completionsuggest.go index aca1a37cdb..4180367123 100644 --- a/typedapi/types/completionsuggest.go +++ b/typedapi/types/completionsuggest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CompletionSuggest type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L48-L55 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L48-L55 type CompletionSuggest struct { Length int `json:"length"` Offset int `json:"offset"` @@ -125,3 +125,5 @@ func NewCompletionSuggest() *CompletionSuggest { return r } + +// false diff --git a/typedapi/types/completionsuggester.go b/typedapi/types/completionsuggester.go index 60e17d0738..c1eb854d96 100644 --- a/typedapi/types/completionsuggester.go +++ b/typedapi/types/completionsuggester.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CompletionSuggester type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L163-L181 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L163-L181 type CompletionSuggester struct { // Analyzer The analyzer to analyze the suggest text with. // Defaults to the search analyzer of the suggest field. @@ -158,8 +158,18 @@ func (s *CompletionSuggester) UnmarshalJSON(data []byte) error { // NewCompletionSuggester returns a CompletionSuggester. func NewCompletionSuggester() *CompletionSuggester { r := &CompletionSuggester{ - Contexts: make(map[string][]CompletionContext, 0), + Contexts: make(map[string][]CompletionContext), } return r } + +// true + +type CompletionSuggesterVariant interface { + CompletionSuggesterCaster() *CompletionSuggester +} + +func (s *CompletionSuggester) CompletionSuggesterCaster() *CompletionSuggester { + return s +} diff --git a/typedapi/types/completionsuggestoption.go b/typedapi/types/completionsuggestoption.go index be88d8b56c..d270490a17 100644 --- a/typedapi/types/completionsuggestoption.go +++ b/typedapi/types/completionsuggestoption.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CompletionSuggestOption type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L73-L84 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L73-L84 type CompletionSuggestOption struct { CollateMatch *bool `json:"collate_match,omitempty"` Contexts map[string][]Context `json:"contexts,omitempty"` @@ -169,9 +169,11 @@ func (s *CompletionSuggestOption) UnmarshalJSON(data []byte) error { // NewCompletionSuggestOption returns a CompletionSuggestOption. func NewCompletionSuggestOption() *CompletionSuggestOption { r := &CompletionSuggestOption{ - Contexts: make(map[string][]Context, 0), - Fields: make(map[string]json.RawMessage, 0), + Contexts: make(map[string][]Context), + Fields: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/completiontool.go b/typedapi/types/completiontool.go new file mode 100644 index 0000000000..227f0bab0c --- /dev/null +++ b/typedapi/types/completiontool.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionTool type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/chat_completion_unified/UnifiedRequest.ts#L215-L227 +type CompletionTool struct { + // Function The function definition. + Function CompletionToolFunction `json:"function"` + // Type The type of tool. + Type string `json:"type"` +} + +func (s *CompletionTool) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "function": + if err := dec.Decode(&s.Function); err != nil { + return fmt.Errorf("%s | %w", "Function", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewCompletionTool returns a CompletionTool. +func NewCompletionTool() *CompletionTool { + r := &CompletionTool{} + + return r +} + +// true + +type CompletionToolVariant interface { + CompletionToolCaster() *CompletionTool +} + +func (s *CompletionTool) CompletionToolCaster() *CompletionTool { + return s +} diff --git a/typedapi/types/completiontoolchoice.go b/typedapi/types/completiontoolchoice.go new file mode 100644 index 0000000000..ea607eab0e --- /dev/null +++ b/typedapi/types/completiontoolchoice.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionToolChoice type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/chat_completion_unified/UnifiedRequest.ts#L178-L190 +type CompletionToolChoice struct { + // Function The tool choice function. + Function CompletionToolChoiceFunction `json:"function"` + // Type The type of the tool. + Type string `json:"type"` +} + +func (s *CompletionToolChoice) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "function": + if err := dec.Decode(&s.Function); err != nil { + return fmt.Errorf("%s | %w", "Function", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewCompletionToolChoice returns a CompletionToolChoice. +func NewCompletionToolChoice() *CompletionToolChoice { + r := &CompletionToolChoice{} + + return r +} + +// true + +type CompletionToolChoiceVariant interface { + CompletionToolChoiceCaster() *CompletionToolChoice +} + +func (s *CompletionToolChoice) CompletionToolChoiceCaster() *CompletionToolChoice { + return s +} diff --git a/typedapi/types/completiontoolchoicefunction.go b/typedapi/types/completiontoolchoicefunction.go new file mode 100644 index 0000000000..9f529f07a6 --- /dev/null +++ b/typedapi/types/completiontoolchoicefunction.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionToolChoiceFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/chat_completion_unified/UnifiedRequest.ts#L167-L176 +type CompletionToolChoiceFunction struct { + // Name The name of the function to call. + Name string `json:"name"` +} + +func (s *CompletionToolChoiceFunction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + } + } + return nil +} + +// NewCompletionToolChoiceFunction returns a CompletionToolChoiceFunction. +func NewCompletionToolChoiceFunction() *CompletionToolChoiceFunction { + r := &CompletionToolChoiceFunction{} + + return r +} + +// true + +type CompletionToolChoiceFunctionVariant interface { + CompletionToolChoiceFunctionCaster() *CompletionToolChoiceFunction +} + +func (s *CompletionToolChoiceFunction) CompletionToolChoiceFunctionCaster() *CompletionToolChoiceFunction { + return s +} diff --git a/typedapi/types/completiontoolfunction.go b/typedapi/types/completiontoolfunction.go new file mode 100644 index 0000000000..fffc5e6e00 --- /dev/null +++ b/typedapi/types/completiontoolfunction.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionToolFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/chat_completion_unified/UnifiedRequest.ts#L192-L213 +type CompletionToolFunction struct { + // Description A description of what the function does. + // This is used by the model to choose when and how to call the function. + Description *string `json:"description,omitempty"` + // Name The name of the function. + Name string `json:"name"` + // Parameters The parameters the functional accepts. This should be formatted as a JSON + // object. + Parameters json.RawMessage `json:"parameters,omitempty"` + // Strict Whether to enable schema adherence when generating the function call. + Strict *bool `json:"strict,omitempty"` +} + +func (s *CompletionToolFunction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "parameters": + if err := dec.Decode(&s.Parameters); err != nil { + return fmt.Errorf("%s | %w", "Parameters", err) + } + + case "strict": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Strict", err) + } + s.Strict = &value + case bool: + s.Strict = &v + } + + } + } + return nil +} + +// NewCompletionToolFunction returns a CompletionToolFunction. +func NewCompletionToolFunction() *CompletionToolFunction { + r := &CompletionToolFunction{} + + return r +} + +// true + +type CompletionToolFunctionVariant interface { + CompletionToolFunctionCaster() *CompletionToolFunction +} + +func (s *CompletionToolFunction) CompletionToolFunctionCaster() *CompletionToolFunction { + return s +} diff --git a/typedapi/types/completiontooltype.go b/typedapi/types/completiontooltype.go new file mode 100644 index 0000000000..a888e41bd0 --- /dev/null +++ b/typedapi/types/completiontooltype.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +// CompletionToolType holds the union for the following types: +// +// string +// CompletionToolChoice +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/chat_completion_unified/UnifiedRequest.ts#L89-L92 +type CompletionToolType any + +type CompletionToolTypeVariant interface { + CompletionToolTypeCaster() *CompletionToolType +} diff --git a/typedapi/types/componenttemplatenode.go b/typedapi/types/componenttemplatenode.go index 9953826616..9de440879b 100644 --- a/typedapi/types/componenttemplatenode.go +++ b/typedapi/types/componenttemplatenode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,15 +26,17 @@ import ( "errors" "fmt" "io" + "strconv" ) // ComponentTemplateNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/_types/ComponentTemplate.ts#L32-L37 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/_types/ComponentTemplate.ts#L32-L41 type ComponentTemplateNode struct { - Meta_ Metadata `json:"_meta,omitempty"` - Template ComponentTemplateSummary `json:"template"` - Version *int64 `json:"version,omitempty"` + Deprecated *bool `json:"deprecated,omitempty"` + Meta_ Metadata `json:"_meta,omitempty"` + Template ComponentTemplateSummary `json:"template"` + Version *int64 `json:"version,omitempty"` } func (s *ComponentTemplateNode) UnmarshalJSON(data []byte) error { @@ -52,6 +54,20 @@ func (s *ComponentTemplateNode) UnmarshalJSON(data []byte) error { switch t { + case "deprecated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deprecated", err) + } + s.Deprecated = &value + case bool: + s.Deprecated = &v + } + case "_meta": if err := dec.Decode(&s.Meta_); err != nil { return fmt.Errorf("%s | %w", "Meta_", err) @@ -78,3 +94,13 @@ func NewComponentTemplateNode() *ComponentTemplateNode { return r } + +// true + +type ComponentTemplateNodeVariant interface { + ComponentTemplateNodeCaster() *ComponentTemplateNode +} + +func (s *ComponentTemplateNode) ComponentTemplateNodeCaster() *ComponentTemplateNode { + return s +} diff --git a/typedapi/types/componenttemplatesummary.go b/typedapi/types/componenttemplatesummary.go index d70c4fc534..90d0a0620d 100644 --- a/typedapi/types/componenttemplatesummary.go +++ b/typedapi/types/componenttemplatesummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ComponentTemplateSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/_types/ComponentTemplate.ts#L39-L51 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/_types/ComponentTemplate.ts#L43-L55 type ComponentTemplateSummary struct { Aliases map[string]AliasDefinition `json:"aliases,omitempty"` Lifecycle *DataStreamLifecycleWithRollover `json:"lifecycle,omitempty"` @@ -99,9 +99,19 @@ func (s *ComponentTemplateSummary) UnmarshalJSON(data []byte) error { // NewComponentTemplateSummary returns a ComponentTemplateSummary. func NewComponentTemplateSummary() *ComponentTemplateSummary { r := &ComponentTemplateSummary{ - Aliases: make(map[string]AliasDefinition, 0), - Settings: make(map[string]IndexSettings, 0), + Aliases: make(map[string]AliasDefinition), + Settings: make(map[string]IndexSettings), } return r } + +// true + +type ComponentTemplateSummaryVariant interface { + ComponentTemplateSummaryCaster() *ComponentTemplateSummary +} + +func (s *ComponentTemplateSummary) ComponentTemplateSummaryCaster() *ComponentTemplateSummary { + return s +} diff --git a/typedapi/types/compositeaggregate.go b/typedapi/types/compositeaggregate.go index 933f49c6fd..8861e34353 100644 --- a/typedapi/types/compositeaggregate.go +++ b/typedapi/types/compositeaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // CompositeAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L698-L703 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L698-L703 type CompositeAggregate struct { AfterKey CompositeAggregateKey `json:"after_key,omitempty"` Buckets BucketsCompositeBucket `json:"buckets"` @@ -94,3 +94,5 @@ func NewCompositeAggregate() *CompositeAggregate { return r } + +// false diff --git a/typedapi/types/compositeaggregatekey.go b/typedapi/types/compositeaggregatekey.go index d7a0184b6e..99f026733d 100644 --- a/typedapi/types/compositeaggregatekey.go +++ b/typedapi/types/compositeaggregatekey.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // CompositeAggregateKey type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L128-L128 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L128-L128 type CompositeAggregateKey map[string]FieldValue + +type CompositeAggregateKeyVariant interface { + CompositeAggregateKeyCaster() *CompositeAggregateKey +} diff --git a/typedapi/types/compositeaggregation.go b/typedapi/types/compositeaggregation.go index 833e000eaf..9c2b6a49a5 100644 --- a/typedapi/types/compositeaggregation.go +++ b/typedapi/types/compositeaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CompositeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L130-L149 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L130-L149 type CompositeAggregation struct { // After When paginating, use the `after_key` value returned in the previous response // to retrieve the next page. @@ -95,3 +95,13 @@ func NewCompositeAggregation() *CompositeAggregation { return r } + +// true + +type CompositeAggregationVariant interface { + CompositeAggregationCaster() *CompositeAggregation +} + +func (s *CompositeAggregation) CompositeAggregationCaster() *CompositeAggregation { + return s +} diff --git a/typedapi/types/compositeaggregationsource.go b/typedapi/types/compositeaggregationsource.go index 965dcd1f4b..8ac9bf6bb5 100644 --- a/typedapi/types/compositeaggregationsource.go +++ b/typedapi/types/compositeaggregationsource.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // CompositeAggregationSource type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L151-L168 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L151-L168 type CompositeAggregationSource struct { // DateHistogram A date histogram aggregation. DateHistogram *CompositeDateHistogramAggregation `json:"date_histogram,omitempty"` @@ -40,3 +40,13 @@ func NewCompositeAggregationSource() *CompositeAggregationSource { return r } + +// true + +type CompositeAggregationSourceVariant interface { + CompositeAggregationSourceCaster() *CompositeAggregationSource +} + +func (s *CompositeAggregationSource) CompositeAggregationSourceCaster() *CompositeAggregationSource { + return s +} diff --git a/typedapi/types/compositebucket.go b/typedapi/types/compositebucket.go index f90deaf241..df11677f19 100644 --- a/typedapi/types/compositebucket.go +++ b/typedapi/types/compositebucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // CompositeBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L705-L707 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L705-L707 type CompositeBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -631,8 +631,10 @@ func (s CompositeBucket) MarshalJSON() ([]byte, error) { // NewCompositeBucket returns a CompositeBucket. func NewCompositeBucket() *CompositeBucket { r := &CompositeBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/compositedatehistogramaggregation.go b/typedapi/types/compositedatehistogramaggregation.go index ea35068c9c..07867fd0f5 100644 --- a/typedapi/types/compositedatehistogramaggregation.go +++ b/typedapi/types/compositedatehistogramaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -35,7 +35,7 @@ import ( // CompositeDateHistogramAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L187-L195 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L187-L195 type CompositeDateHistogramAggregation struct { // CalendarInterval Either `calendar_interval` or `fixed_interval` must be present CalendarInterval *string `json:"calendar_interval,omitempty"` @@ -151,3 +151,13 @@ func NewCompositeDateHistogramAggregation() *CompositeDateHistogramAggregation { return r } + +// true + +type CompositeDateHistogramAggregationVariant interface { + CompositeDateHistogramAggregationCaster() *CompositeDateHistogramAggregation +} + +func (s *CompositeDateHistogramAggregation) CompositeDateHistogramAggregationCaster() *CompositeDateHistogramAggregation { + return s +} diff --git a/typedapi/types/compositegeotilegridaggregation.go b/typedapi/types/compositegeotilegridaggregation.go index 1b048dee1a..f664d8d754 100644 --- a/typedapi/types/compositegeotilegridaggregation.go +++ b/typedapi/types/compositegeotilegridaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -35,7 +35,7 @@ import ( // CompositeGeoTileGridAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L197-L200 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L197-L200 type CompositeGeoTileGridAggregation struct { Bounds GeoBounds `json:"bounds,omitempty"` // Field Either `field` or `script` must be present @@ -193,3 +193,13 @@ func NewCompositeGeoTileGridAggregation() *CompositeGeoTileGridAggregation { return r } + +// true + +type CompositeGeoTileGridAggregationVariant interface { + CompositeGeoTileGridAggregationCaster() *CompositeGeoTileGridAggregation +} + +func (s *CompositeGeoTileGridAggregation) CompositeGeoTileGridAggregationCaster() *CompositeGeoTileGridAggregation { + return s +} diff --git a/typedapi/types/compositehistogramaggregation.go b/typedapi/types/compositehistogramaggregation.go index 5a1738a1fa..46091dda6d 100644 --- a/typedapi/types/compositehistogramaggregation.go +++ b/typedapi/types/compositehistogramaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -35,7 +35,7 @@ import ( // CompositeHistogramAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L183-L185 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L183-L185 type CompositeHistogramAggregation struct { // Field Either `field` or `script` must be present Field *string `json:"field,omitempty"` @@ -129,3 +129,13 @@ func NewCompositeHistogramAggregation() *CompositeHistogramAggregation { return r } + +// true + +type CompositeHistogramAggregationVariant interface { + CompositeHistogramAggregationCaster() *CompositeHistogramAggregation +} + +func (s *CompositeHistogramAggregation) CompositeHistogramAggregationCaster() *CompositeHistogramAggregation { + return s +} diff --git a/typedapi/types/compositesubfield.go b/typedapi/types/compositesubfield.go index 58895a93f1..3f8464f004 100644 --- a/typedapi/types/compositesubfield.go +++ b/typedapi/types/compositesubfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // CompositeSubField type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/RuntimeFields.ts#L52-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/RuntimeFields.ts#L52-L54 type CompositeSubField struct { Type runtimefieldtype.RuntimeFieldType `json:"type"` } @@ -37,3 +37,13 @@ func NewCompositeSubField() *CompositeSubField { return r } + +// true + +type CompositeSubFieldVariant interface { + CompositeSubFieldCaster() *CompositeSubField +} + +func (s *CompositeSubField) CompositeSubFieldCaster() *CompositeSubField { + return s +} diff --git a/typedapi/types/compositetermsaggregation.go b/typedapi/types/compositetermsaggregation.go index bbc81e93bf..6d720ea7fc 100644 --- a/typedapi/types/compositetermsaggregation.go +++ b/typedapi/types/compositetermsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -35,7 +35,7 @@ import ( // CompositeTermsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L181-L181 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L181-L181 type CompositeTermsAggregation struct { // Field Either `field` or `script` must be present Field *string `json:"field,omitempty"` @@ -112,3 +112,13 @@ func NewCompositeTermsAggregation() *CompositeTermsAggregation { return r } + +// true + +type CompositeTermsAggregationVariant interface { + CompositeTermsAggregationCaster() *CompositeTermsAggregation +} + +func (s *CompositeTermsAggregation) CompositeTermsAggregationCaster() *CompositeTermsAggregation { + return s +} diff --git a/typedapi/types/conditiontokenfilter.go b/typedapi/types/conditiontokenfilter.go index 9e6cfe4698..b33f5ce2be 100644 --- a/typedapi/types/conditiontokenfilter.go +++ b/typedapi/types/conditiontokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ConditionTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L182-L186 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L182-L186 type ConditionTokenFilter struct { Filter []string `json:"filter"` Script Script `json:"script"` @@ -99,3 +99,13 @@ func NewConditionTokenFilter() *ConditionTokenFilter { return r } + +// true + +type ConditionTokenFilterVariant interface { + ConditionTokenFilterCaster() *ConditionTokenFilter +} + +func (s *ConditionTokenFilter) ConditionTokenFilterCaster() *ConditionTokenFilter { + return s +} diff --git a/typedapi/types/configuration.go b/typedapi/types/configuration.go index 7b4dbac1be..02674260b2 100644 --- a/typedapi/types/configuration.go +++ b/typedapi/types/configuration.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Configuration type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/_types/SnapshotLifecycle.ts#L99-L129 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/_types/SnapshotLifecycle.ts#L109-L139 type Configuration struct { // FeatureStates A list of feature states to be included in this snapshot. A list of features // available for inclusion in the snapshot and their descriptions be can be @@ -158,3 +158,13 @@ func NewConfiguration() *Configuration { return r } + +// true + +type ConfigurationVariant interface { + ConfigurationCaster() *Configuration +} + +func (s *Configuration) ConfigurationCaster() *Configuration { + return s +} diff --git a/typedapi/types/confusionmatrixitem.go b/typedapi/types/confusionmatrixitem.go index 7c316856f6..840fa40836 100644 --- a/typedapi/types/confusionmatrixitem.go +++ b/typedapi/types/confusionmatrixitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ConfusionMatrixItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/evaluate_data_frame/types.ts#L125-L130 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/evaluate_data_frame/types.ts#L125-L130 type ConfusionMatrixItem struct { ActualClass string `json:"actual_class"` ActualClassDocCount int `json:"actual_class_doc_count"` @@ -107,3 +107,5 @@ func NewConfusionMatrixItem() *ConfusionMatrixItem { return r } + +// false diff --git a/typedapi/types/confusionmatrixprediction.go b/typedapi/types/confusionmatrixprediction.go index cc098ac8bc..c9827ca2c2 100644 --- a/typedapi/types/confusionmatrixprediction.go +++ b/typedapi/types/confusionmatrixprediction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ConfusionMatrixPrediction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/evaluate_data_frame/types.ts#L132-L135 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/evaluate_data_frame/types.ts#L132-L135 type ConfusionMatrixPrediction struct { Count int `json:"count"` PredictedClass string `json:"predicted_class"` @@ -84,3 +84,5 @@ func NewConfusionMatrixPrediction() *ConfusionMatrixPrediction { return r } + +// false diff --git a/typedapi/types/confusionmatrixthreshold.go b/typedapi/types/confusionmatrixthreshold.go index cfbfc9ca65..8ee9dcc1a0 100644 --- a/typedapi/types/confusionmatrixthreshold.go +++ b/typedapi/types/confusionmatrixthreshold.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ConfusionMatrixThreshold type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/evaluate_data_frame/types.ts#L137-L158 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/evaluate_data_frame/types.ts#L137-L158 type ConfusionMatrixThreshold struct { // FalseNegative False Negative FalseNegative int `json:"fn"` @@ -133,3 +133,5 @@ func NewConfusionMatrixThreshold() *ConfusionMatrixThreshold { return r } + +// false diff --git a/typedapi/types/connection.go b/typedapi/types/connection.go index 5fdf832885..9f8de04843 100644 --- a/typedapi/types/connection.go +++ b/typedapi/types/connection.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Connection type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/graph/_types/Connection.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/graph/_types/Connection.ts#L22-L27 type Connection struct { DocCount int64 `json:"doc_count"` Source int64 `json:"source"` @@ -126,3 +126,5 @@ func NewConnection() *Connection { return r } + +// false diff --git a/typedapi/types/connector.go b/typedapi/types/connector.go index 338d9814a9..9f6ed82e90 100644 --- a/typedapi/types/connector.go +++ b/typedapi/types/connector.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,12 +34,13 @@ import ( // Connector type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L237-L268 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L252-L284 type Connector struct { ApiKeyId *string `json:"api_key_id,omitempty"` ApiKeySecretId *string `json:"api_key_secret_id,omitempty"` Configuration ConnectorConfiguration `json:"configuration"` CustomScheduling ConnectorCustomScheduling `json:"custom_scheduling"` + Deleted bool `json:"deleted"` Description *string `json:"description,omitempty"` Error *string `json:"error,omitempty"` Features *ConnectorFeatures `json:"features,omitempty"` @@ -117,6 +118,20 @@ func (s *Connector) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "CustomScheduling", err) } + case "deleted": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deleted", err) + } + s.Deleted = value + case bool: + s.Deleted = v + } + case "description": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -345,3 +360,5 @@ func NewConnector() *Connector { return r } + +// false diff --git a/typedapi/types/connectorconfigproperties.go b/typedapi/types/connectorconfigproperties.go index 93e68c6d31..d593963af4 100644 --- a/typedapi/types/connectorconfigproperties.go +++ b/typedapi/types/connectorconfigproperties.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // ConnectorConfigProperties type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L83-L99 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L83-L99 type ConnectorConfigProperties struct { Category *string `json:"category,omitempty"` DefaultValue ScalarValue `json:"default_value"` @@ -207,37 +207,37 @@ func (s *ConnectorConfigProperties) UnmarshalJSON(data []byte) error { case "less_than": o := NewLessThanValidation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "less_than", err) } s.Validations = append(s.Validations, *o) case "greater_than": o := NewGreaterThanValidation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "greater_than", err) } s.Validations = append(s.Validations, *o) case "list_type": o := NewListTypeValidation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "list_type", err) } s.Validations = append(s.Validations, *o) case "included_in": o := NewIncludedInValidation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "included_in", err) } s.Validations = append(s.Validations, *o) case "regex": o := NewRegexValidation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "regex", err) } s.Validations = append(s.Validations, *o) default: o := new(any) if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("Validations | %w", err) } s.Validations = append(s.Validations, *o) } @@ -259,3 +259,13 @@ func NewConnectorConfigProperties() *ConnectorConfigProperties { return r } + +// true + +type ConnectorConfigPropertiesVariant interface { + ConnectorConfigPropertiesCaster() *ConnectorConfigProperties +} + +func (s *ConnectorConfigProperties) ConnectorConfigPropertiesCaster() *ConnectorConfigProperties { + return s +} diff --git a/typedapi/types/connectorconfiguration.go b/typedapi/types/connectorconfiguration.go index 11b981bf67..203f8d9112 100644 --- a/typedapi/types/connectorconfiguration.go +++ b/typedapi/types/connectorconfiguration.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ConnectorConfiguration type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L101-L104 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L101-L104 type ConnectorConfiguration map[string]ConnectorConfigProperties + +type ConnectorConfigurationVariant interface { + ConnectorConfigurationCaster() *ConnectorConfiguration +} diff --git a/typedapi/types/connectorcustomscheduling.go b/typedapi/types/connectorcustomscheduling.go index f2c6b09a75..1ceb3f986e 100644 --- a/typedapi/types/connectorcustomscheduling.go +++ b/typedapi/types/connectorcustomscheduling.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ConnectorCustomScheduling type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L128-L128 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L128-L128 type ConnectorCustomScheduling map[string]CustomScheduling diff --git a/typedapi/types/connectorfeatures.go b/typedapi/types/connectorfeatures.go index 258181ab6a..6f134144d8 100644 --- a/typedapi/types/connectorfeatures.go +++ b/typedapi/types/connectorfeatures.go @@ -16,16 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ConnectorFeatures type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L224-L229 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L230-L244 type ConnectorFeatures struct { - DocumentLevelSecurity *FeatureEnabled `json:"document_level_security,omitempty"` - IncrementalSync *FeatureEnabled `json:"incremental_sync,omitempty"` + // DocumentLevelSecurity Indicates whether document-level security is enabled. + DocumentLevelSecurity *FeatureEnabled `json:"document_level_security,omitempty"` + // IncrementalSync Indicates whether incremental syncs are enabled. + IncrementalSync *FeatureEnabled `json:"incremental_sync,omitempty"` + // NativeConnectorApiKeys Indicates whether managed connector API keys are enabled. NativeConnectorApiKeys *FeatureEnabled `json:"native_connector_api_keys,omitempty"` SyncRules *SyncRulesFeature `json:"sync_rules,omitempty"` } @@ -36,3 +39,13 @@ func NewConnectorFeatures() *ConnectorFeatures { return r } + +// true + +type ConnectorFeaturesVariant interface { + ConnectorFeaturesCaster() *ConnectorFeatures +} + +func (s *ConnectorFeatures) ConnectorFeaturesCaster() *ConnectorFeatures { + return s +} diff --git a/typedapi/types/connectorscheduling.go b/typedapi/types/connectorscheduling.go index b718287b33..e9d7951efc 100644 --- a/typedapi/types/connectorscheduling.go +++ b/typedapi/types/connectorscheduling.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ConnectorScheduling type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L106-L110 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L106-L110 type ConnectorScheduling struct { Enabled bool `json:"enabled"` // Interval The interval is expressed using the crontab syntax @@ -90,3 +90,13 @@ func NewConnectorScheduling() *ConnectorScheduling { return r } + +// true + +type ConnectorSchedulingVariant interface { + ConnectorSchedulingCaster() *ConnectorScheduling +} + +func (s *ConnectorScheduling) ConnectorSchedulingCaster() *ConnectorScheduling { + return s +} diff --git a/typedapi/types/connectorsyncjob.go b/typedapi/types/connectorsyncjob.go index 8adc2a59b3..3a4e899a49 100644 --- a/typedapi/types/connectorsyncjob.go +++ b/typedapi/types/connectorsyncjob.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -35,7 +35,7 @@ import ( // ConnectorSyncJob type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/SyncJob.ts#L53-L72 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/SyncJob.ts#L53-L72 type ConnectorSyncJob struct { CancelationRequestedAt DateTime `json:"cancelation_requested_at,omitempty"` CanceledAt DateTime `json:"canceled_at,omitempty"` @@ -227,8 +227,10 @@ func (s *ConnectorSyncJob) UnmarshalJSON(data []byte) error { // NewConnectorSyncJob returns a ConnectorSyncJob. func NewConnectorSyncJob() *ConnectorSyncJob { r := &ConnectorSyncJob{ - Metadata: make(map[string]json.RawMessage, 0), + Metadata: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/constantkeywordproperty.go b/typedapi/types/constantkeywordproperty.go index d361063a0a..ba5ce17ba2 100644 --- a/typedapi/types/constantkeywordproperty.go +++ b/typedapi/types/constantkeywordproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,20 +29,22 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // ConstantKeywordProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/specialized.ts#L50-L53 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/specialized.ts#L50-L53 type ConstantKeywordProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Type string `json:"type,omitempty"` - Value json.RawMessage `json:"value,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` + Value json.RawMessage `json:"value,omitempty"` } func (s *ConstantKeywordProperty) UnmarshalJSON(data []byte) error { @@ -84,301 +86,313 @@ func (s *ConstantKeywordProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -427,306 +441,323 @@ func (s *ConstantKeywordProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -746,13 +777,14 @@ func (s *ConstantKeywordProperty) UnmarshalJSON(data []byte) error { func (s ConstantKeywordProperty) MarshalJSON() ([]byte, error) { type innerConstantKeywordProperty ConstantKeywordProperty tmp := innerConstantKeywordProperty{ - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Properties: s.Properties, - Type: s.Type, - Value: s.Value, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + Value: s.Value, } tmp.Type = "constant_keyword" @@ -763,10 +795,20 @@ func (s ConstantKeywordProperty) MarshalJSON() ([]byte, error) { // NewConstantKeywordProperty returns a ConstantKeywordProperty. func NewConstantKeywordProperty() *ConstantKeywordProperty { r := &ConstantKeywordProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type ConstantKeywordPropertyVariant interface { + ConstantKeywordPropertyCaster() *ConstantKeywordProperty +} + +func (s *ConstantKeywordProperty) ConstantKeywordPropertyCaster() *ConstantKeywordProperty { + return s +} diff --git a/typedapi/types/constantscorequery.go b/typedapi/types/constantscorequery.go index bbe55d93ad..1a9bffba00 100644 --- a/typedapi/types/constantscorequery.go +++ b/typedapi/types/constantscorequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ConstantScoreQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L76-L86 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L76-L86 type ConstantScoreQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -43,7 +43,7 @@ type ConstantScoreQuery struct { // Filter queries do not calculate relevance scores. // To speed up performance, Elasticsearch automatically caches frequently used // filter queries. - Filter *Query `json:"filter,omitempty"` + Filter Query `json:"filter"` QueryName_ *string `json:"_name,omitempty"` } @@ -106,3 +106,13 @@ func NewConstantScoreQuery() *ConstantScoreQuery { return r } + +// true + +type ConstantScoreQueryVariant interface { + ConstantScoreQueryCaster() *ConstantScoreQuery +} + +func (s *ConstantScoreQuery) ConstantScoreQueryCaster() *ConstantScoreQuery { + return s +} diff --git a/typedapi/types/contentobject.go b/typedapi/types/contentobject.go new file mode 100644 index 0000000000..eec9130b9b --- /dev/null +++ b/typedapi/types/contentobject.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ContentObject type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/chat_completion_unified/UnifiedRequest.ts#L94-L106 +type ContentObject struct { + // Text The text content. + Text string `json:"text"` + // Type The type of content. + Type string `json:"type"` +} + +func (s *ContentObject) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewContentObject returns a ContentObject. +func NewContentObject() *ContentObject { + r := &ContentObject{} + + return r +} + +// true + +type ContentObjectVariant interface { + ContentObjectCaster() *ContentObject +} + +func (s *ContentObject) ContentObjectCaster() *ContentObject { + return s +} diff --git a/typedapi/types/context.go b/typedapi/types/context.go index bbe0f996d2..b2b9dcf240 100644 --- a/typedapi/types/context.go +++ b/typedapi/types/context.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // string // GeoLocation // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L228-L233 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L228-L233 type Context any + +type ContextVariant interface { + ContextCaster() *Context +} diff --git a/typedapi/types/contextmethod.go b/typedapi/types/contextmethod.go index b2bbcaee18..ac2c2555ad 100644 --- a/typedapi/types/contextmethod.go +++ b/typedapi/types/contextmethod.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ContextMethod type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/get_script_context/types.ts#L27-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/get_script_context/types.ts#L27-L31 type ContextMethod struct { Name string `json:"name"` Params []ContextMethodParam `json:"params"` @@ -86,3 +86,5 @@ func NewContextMethod() *ContextMethod { return r } + +// false diff --git a/typedapi/types/contextmethodparam.go b/typedapi/types/contextmethodparam.go index f750248b21..31a83db27f 100644 --- a/typedapi/types/contextmethodparam.go +++ b/typedapi/types/contextmethodparam.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ContextMethodParam type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/get_script_context/types.ts#L33-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/get_script_context/types.ts#L33-L36 type ContextMethodParam struct { Name string `json:"name"` Type string `json:"type"` @@ -80,3 +80,5 @@ func NewContextMethodParam() *ContextMethodParam { return r } + +// false diff --git a/typedapi/types/convertprocessor.go b/typedapi/types/convertprocessor.go index 255f86fbc7..f094588ad9 100644 --- a/typedapi/types/convertprocessor.go +++ b/typedapi/types/convertprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // ConvertProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L632-L652 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L673-L693 type ConvertProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -41,7 +41,7 @@ type ConvertProcessor struct { // Field The field whose value is to be converted. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly @@ -92,16 +92,9 @@ func (s *ConvertProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -169,3 +162,13 @@ func NewConvertProcessor() *ConvertProcessor { return r } + +// true + +type ConvertProcessorVariant interface { + ConvertProcessorCaster() *ConvertProcessor +} + +func (s *ConvertProcessor) ConvertProcessorCaster() *ConvertProcessor { + return s +} diff --git a/typedapi/types/coordinatorstats.go b/typedapi/types/coordinatorstats.go index c4173abd3d..e48f50a819 100644 --- a/typedapi/types/coordinatorstats.go +++ b/typedapi/types/coordinatorstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CoordinatorStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/enrich/stats/types.ts#L30-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/enrich/stats/types.ts#L30-L36 type CoordinatorStats struct { ExecutedSearchesTotal int64 `json:"executed_searches_total"` NodeId string `json:"node_id"` @@ -133,3 +133,5 @@ func NewCoordinatorStats() *CoordinatorStats { return r } + +// false diff --git a/typedapi/types/coordsgeobounds.go b/typedapi/types/coordsgeobounds.go index 1477a72b28..364f186ad0 100644 --- a/typedapi/types/coordsgeobounds.go +++ b/typedapi/types/coordsgeobounds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CoordsGeoBounds type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Geo.ts#L154-L159 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Geo.ts#L154-L159 type CoordsGeoBounds struct { Bottom Float64 `json:"bottom"` Left Float64 `json:"left"` @@ -129,3 +129,13 @@ func NewCoordsGeoBounds() *CoordsGeoBounds { return r } + +// true + +type CoordsGeoBoundsVariant interface { + CoordsGeoBoundsCaster() *CoordsGeoBounds +} + +func (s *CoordsGeoBounds) CoordsGeoBoundsCaster() *CoordsGeoBounds { + return s +} diff --git a/typedapi/types/coreknnquery.go b/typedapi/types/coreknnquery.go index e9ad1c3302..13695f5632 100644 --- a/typedapi/types/coreknnquery.go +++ b/typedapi/types/coreknnquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CoreKnnQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/knn_search/_types/Knn.ts#L24-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/knn_search/_types/Knn.ts#L24-L33 type CoreKnnQuery struct { // Field The name of the vector field to search against Field string `json:"field"` @@ -111,3 +111,13 @@ func NewCoreKnnQuery() *CoreKnnQuery { return r } + +// true + +type CoreKnnQueryVariant interface { + CoreKnnQueryCaster() *CoreKnnQuery +} + +func (s *CoreKnnQuery) CoreKnnQueryCaster() *CoreKnnQuery { + return s +} diff --git a/typedapi/types/countedkeywordproperty.go b/typedapi/types/countedkeywordproperty.go new file mode 100644 index 0000000000..2ae9e8acde --- /dev/null +++ b/typedapi/types/countedkeywordproperty.go @@ -0,0 +1,823 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +// CountedKeywordProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/specialized.ts#L55-L62 +type CountedKeywordProperty struct { + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *CountedKeywordProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CountedKeywordProperty) MarshalJSON() ([]byte, error) { + type innerCountedKeywordProperty CountedKeywordProperty + tmp := innerCountedKeywordProperty{ + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "counted_keyword" + + return json.Marshal(tmp) +} + +// NewCountedKeywordProperty returns a CountedKeywordProperty. +func NewCountedKeywordProperty() *CountedKeywordProperty { + r := &CountedKeywordProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +// true + +type CountedKeywordPropertyVariant interface { + CountedKeywordPropertyCaster() *CountedKeywordProperty +} + +func (s *CountedKeywordProperty) CountedKeywordPropertyCaster() *CountedKeywordProperty { + return s +} diff --git a/typedapi/types/counter.go b/typedapi/types/counter.go index 9a251760c1..b790cd3188 100644 --- a/typedapi/types/counter.go +++ b/typedapi/types/counter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Counter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L33-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L32-L35 type Counter struct { Active int64 `json:"active"` Total int64 `json:"total"` @@ -93,3 +93,5 @@ func NewCounter() *Counter { return r } + +// false diff --git a/typedapi/types/countrecord.go b/typedapi/types/countrecord.go index 12147d1ad1..790d062788 100644 --- a/typedapi/types/countrecord.go +++ b/typedapi/types/countrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CountRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/count/types.ts#L23-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/count/types.ts#L23-L39 type CountRecord struct { // Count the document count Count *string `json:"count,omitempty"` @@ -89,3 +89,5 @@ func NewCountRecord() *CountRecord { return r } + +// false diff --git a/typedapi/types/cpu.go b/typedapi/types/cpu.go index 7cc3712f52..fe221d3abe 100644 --- a/typedapi/types/cpu.go +++ b/typedapi/types/cpu.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Cpu type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L575-L584 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L575-L584 type Cpu struct { LoadAverage map[string]Float64 `json:"load_average,omitempty"` Percent *int `json:"percent,omitempty"` @@ -120,8 +120,10 @@ func (s *Cpu) UnmarshalJSON(data []byte) error { // NewCpu returns a Cpu. func NewCpu() *Cpu { r := &Cpu{ - LoadAverage: make(map[string]Float64, 0), + LoadAverage: make(map[string]Float64), } return r } + +// false diff --git a/typedapi/types/cpuacct.go b/typedapi/types/cpuacct.go index c112f68c39..37ba283ddd 100644 --- a/typedapi/types/cpuacct.go +++ b/typedapi/types/cpuacct.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CpuAcct type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L512-L521 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L512-L521 type CpuAcct struct { // ControlGroup The `cpuacct` control group to which the Elasticsearch process belongs. ControlGroup *string `json:"control_group,omitempty"` @@ -83,3 +83,5 @@ func NewCpuAcct() *CpuAcct { return r } + +// false diff --git a/typedapi/types/createdstatus.go b/typedapi/types/createdstatus.go index 3ebe38bbdf..7f46523c51 100644 --- a/typedapi/types/createdstatus.go +++ b/typedapi/types/createdstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CreatedStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/CreatedStatus.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/CreatedStatus.ts#L20-L22 type CreatedStatus struct { Created bool `json:"created"` } @@ -76,3 +76,5 @@ func NewCreatedStatus() *CreatedStatus { return r } + +// false diff --git a/typedapi/types/createfrom.go b/typedapi/types/createfrom.go new file mode 100644 index 0000000000..9076859ef4 --- /dev/null +++ b/typedapi/types/createfrom.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CreateFrom type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/create_from/MigrateCreateFromRequest.ts#L46-L60 +type CreateFrom struct { + // MappingsOverride Mappings overrides to be applied to the destination index (optional) + MappingsOverride *TypeMapping `json:"mappings_override,omitempty"` + // RemoveIndexBlocks If index blocks should be removed when creating destination index (optional) + RemoveIndexBlocks *bool `json:"remove_index_blocks,omitempty"` + // SettingsOverride Settings overrides to be applied to the destination index (optional) + SettingsOverride *IndexSettings `json:"settings_override,omitempty"` +} + +func (s *CreateFrom) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mappings_override": + if err := dec.Decode(&s.MappingsOverride); err != nil { + return fmt.Errorf("%s | %w", "MappingsOverride", err) + } + + case "remove_index_blocks": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RemoveIndexBlocks", err) + } + s.RemoveIndexBlocks = &value + case bool: + s.RemoveIndexBlocks = &v + } + + case "settings_override": + if err := dec.Decode(&s.SettingsOverride); err != nil { + return fmt.Errorf("%s | %w", "SettingsOverride", err) + } + + } + } + return nil +} + +// NewCreateFrom returns a CreateFrom. +func NewCreateFrom() *CreateFrom { + r := &CreateFrom{} + + return r +} + +// true + +type CreateFromVariant interface { + CreateFromCaster() *CreateFrom +} + +func (s *CreateFrom) CreateFromCaster() *CreateFrom { + return s +} diff --git a/typedapi/types/createoperation.go b/typedapi/types/createoperation.go index 3110dd45a9..ad952cba29 100644 --- a/typedapi/types/createoperation.go +++ b/typedapi/types/createoperation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,30 +33,30 @@ import ( // CreateOperation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/bulk/types.ts#L130-L130 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/bulk/types.ts#L140-L140 type CreateOperation struct { // DynamicTemplates A map from the full name of fields to the name of dynamic templates. - // Defaults to an empty map. - // If a name matches a dynamic template, then that template will be applied + // It defaults to an empty map. + // If a name matches a dynamic template, that template will be applied // regardless of other match predicates defined in the template. - // If a field is already defined in the mapping, then this parameter won’t be + // If a field is already defined in the mapping, then this parameter won't be // used. DynamicTemplates map[string]string `json:"dynamic_templates,omitempty"` // Id_ The document ID. Id_ *string `json:"_id,omitempty"` IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` IfSeqNo *int64 `json:"if_seq_no,omitempty"` - // Index_ Name of the index or index alias to perform the action on. + // Index_ The name of the index or index alias to perform the action on. Index_ *string `json:"_index,omitempty"` - // Pipeline ID of the pipeline to use to preprocess incoming documents. - // If the index has a default ingest pipeline specified, then setting the value - // to `_none` disables the default ingest pipeline for this request. - // If a final pipeline is configured it will always run, regardless of the value + // Pipeline The ID of the pipeline to use to preprocess incoming documents. + // If the index has a default ingest pipeline specified, setting the value to + // `_none` turns off the default ingest pipeline for this request. + // If a final pipeline is configured, it will always run regardless of the value // of this parameter. Pipeline *string `json:"pipeline,omitempty"` - // RequireAlias If `true`, the request’s actions must target an index alias. + // RequireAlias If `true`, the request's actions must target an index alias. RequireAlias *bool `json:"require_alias,omitempty"` - // Routing Custom value used to route operations to a specific shard. + // Routing A custom value used to route operations to a specific shard. Routing *string `json:"routing,omitempty"` Version *int64 `json:"version,omitempty"` VersionType *versiontype.VersionType `json:"version_type,omitempty"` @@ -164,8 +164,18 @@ func (s *CreateOperation) UnmarshalJSON(data []byte) error { // NewCreateOperation returns a CreateOperation. func NewCreateOperation() *CreateOperation { r := &CreateOperation{ - DynamicTemplates: make(map[string]string, 0), + DynamicTemplates: make(map[string]string), } return r } + +// true + +type CreateOperationVariant interface { + CreateOperationCaster() *CreateOperation +} + +func (s *CreateOperation) CreateOperationCaster() *CreateOperation { + return s +} diff --git a/typedapi/types/csvprocessor.go b/typedapi/types/csvprocessor.go index ed531e5cbd..920d8d2741 100644 --- a/typedapi/types/csvprocessor.go +++ b/typedapi/types/csvprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CsvProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L654-L687 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L695-L728 type CsvProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -44,7 +44,7 @@ type CsvProcessor struct { // Field The field to extract data from. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without @@ -103,16 +103,9 @@ func (s *CsvProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -224,3 +217,13 @@ func NewCsvProcessor() *CsvProcessor { return r } + +// true + +type CsvProcessorVariant interface { + CsvProcessorCaster() *CsvProcessor +} + +func (s *CsvProcessor) CsvProcessorCaster() *CsvProcessor { + return s +} diff --git a/typedapi/types/cumulativecardinalityaggregate.go b/typedapi/types/cumulativecardinalityaggregate.go index bb287d65d1..04c902dc4d 100644 --- a/typedapi/types/cumulativecardinalityaggregate.go +++ b/typedapi/types/cumulativecardinalityaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CumulativeCardinalityAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L856-L864 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L856-L864 type CumulativeCardinalityAggregate struct { Meta Metadata `json:"meta,omitempty"` Value int64 `json:"value"` @@ -96,3 +96,5 @@ func NewCumulativeCardinalityAggregate() *CumulativeCardinalityAggregate { return r } + +// false diff --git a/typedapi/types/cumulativecardinalityaggregation.go b/typedapi/types/cumulativecardinalityaggregation.go index d3d2301dc5..603a00d875 100644 --- a/typedapi/types/cumulativecardinalityaggregation.go +++ b/typedapi/types/cumulativecardinalityaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // CumulativeCardinalityAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L206-L209 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L206-L209 type CumulativeCardinalityAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -93,3 +93,13 @@ func NewCumulativeCardinalityAggregation() *CumulativeCardinalityAggregation { return r } + +// true + +type CumulativeCardinalityAggregationVariant interface { + CumulativeCardinalityAggregationCaster() *CumulativeCardinalityAggregation +} + +func (s *CumulativeCardinalityAggregation) CumulativeCardinalityAggregationCaster() *CumulativeCardinalityAggregation { + return s +} diff --git a/typedapi/types/cumulativesumaggregation.go b/typedapi/types/cumulativesumaggregation.go index c76ca3d070..711f54c4f0 100644 --- a/typedapi/types/cumulativesumaggregation.go +++ b/typedapi/types/cumulativesumaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // CumulativeSumAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L211-L214 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L211-L214 type CumulativeSumAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -93,3 +93,13 @@ func NewCumulativeSumAggregation() *CumulativeSumAggregation { return r } + +// true + +type CumulativeSumAggregationVariant interface { + CumulativeSumAggregationCaster() *CumulativeSumAggregation +} + +func (s *CumulativeSumAggregation) CumulativeSumAggregationCaster() *CumulativeSumAggregation { + return s +} diff --git a/typedapi/types/currentnode.go b/typedapi/types/currentnode.go index df91fcc841..7f0cf00d9d 100644 --- a/typedapi/types/currentnode.go +++ b/typedapi/types/currentnode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // CurrentNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/allocation_explain/types.ts#L79-L90 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/allocation_explain/types.ts#L79-L90 type CurrentNode struct { Attributes map[string]string `json:"attributes"` Id string `json:"id"` @@ -110,8 +110,10 @@ func (s *CurrentNode) UnmarshalJSON(data []byte) error { // NewCurrentNode returns a CurrentNode. func NewCurrentNode() *CurrentNode { r := &CurrentNode{ - Attributes: make(map[string]string, 0), + Attributes: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/customanalyzer.go b/typedapi/types/customanalyzer.go index de628c1b49..44b0005e87 100644 --- a/typedapi/types/customanalyzer.go +++ b/typedapi/types/customanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CustomAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L28-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L28-L35 type CustomAnalyzer struct { CharFilter []string `json:"char_filter,omitempty"` Filter []string `json:"filter,omitempty"` @@ -165,3 +165,13 @@ func NewCustomAnalyzer() *CustomAnalyzer { return r } + +// true + +type CustomAnalyzerVariant interface { + CustomAnalyzerCaster() *CustomAnalyzer +} + +func (s *CustomAnalyzer) CustomAnalyzerCaster() *CustomAnalyzer { + return s +} diff --git a/typedapi/types/customcategorizetextanalyzer.go b/typedapi/types/customcategorizetextanalyzer.go index e30cc91173..5354712289 100644 --- a/typedapi/types/customcategorizetextanalyzer.go +++ b/typedapi/types/customcategorizetextanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CustomCategorizeTextAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L1189-L1193 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L1189-L1193 type CustomCategorizeTextAnalyzer struct { CharFilter []string `json:"char_filter,omitempty"` Filter []string `json:"filter,omitempty"` @@ -86,3 +86,13 @@ func NewCustomCategorizeTextAnalyzer() *CustomCategorizeTextAnalyzer { return r } + +// true + +type CustomCategorizeTextAnalyzerVariant interface { + CustomCategorizeTextAnalyzerCaster() *CustomCategorizeTextAnalyzer +} + +func (s *CustomCategorizeTextAnalyzer) CustomCategorizeTextAnalyzerCaster() *CustomCategorizeTextAnalyzer { + return s +} diff --git a/typedapi/types/customnormalizer.go b/typedapi/types/customnormalizer.go index 4f45fa14d8..e380d90066 100644 --- a/typedapi/types/customnormalizer.go +++ b/typedapi/types/customnormalizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // CustomNormalizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/normalizers.ts#L30-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/normalizers.ts#L30-L34 type CustomNormalizer struct { CharFilter []string `json:"char_filter,omitempty"` Filter []string `json:"filter,omitempty"` @@ -53,3 +53,13 @@ func NewCustomNormalizer() *CustomNormalizer { return r } + +// true + +type CustomNormalizerVariant interface { + CustomNormalizerCaster() *CustomNormalizer +} + +func (s *CustomNormalizer) CustomNormalizerCaster() *CustomNormalizer { + return s +} diff --git a/typedapi/types/customscheduling.go b/typedapi/types/customscheduling.go index f1b5cfe426..a5dd56f764 100644 --- a/typedapi/types/customscheduling.go +++ b/typedapi/types/customscheduling.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CustomScheduling type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L120-L126 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L120-L126 type CustomScheduling struct { ConfigurationOverrides CustomSchedulingConfigurationOverrides `json:"configuration_overrides"` Enabled bool `json:"enabled"` @@ -114,3 +114,5 @@ func NewCustomScheduling() *CustomScheduling { return r } + +// false diff --git a/typedapi/types/customschedulingconfigurationoverrides.go b/typedapi/types/customschedulingconfigurationoverrides.go index 0be80f7520..eb677f53de 100644 --- a/typedapi/types/customschedulingconfigurationoverrides.go +++ b/typedapi/types/customschedulingconfigurationoverrides.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CustomSchedulingConfigurationOverrides type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L112-L118 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L112-L118 type CustomSchedulingConfigurationOverrides struct { DomainAllowlist []string `json:"domain_allowlist,omitempty"` MaxCrawlDepth *int `json:"max_crawl_depth,omitempty"` @@ -111,3 +111,5 @@ func NewCustomSchedulingConfigurationOverrides() *CustomSchedulingConfigurationO return r } + +// false diff --git a/typedapi/types/czechanalyzer.go b/typedapi/types/czechanalyzer.go index 17bbab9a80..7882c3e56b 100644 --- a/typedapi/types/czechanalyzer.go +++ b/typedapi/types/czechanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // CzechAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L121-L126 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L132-L137 type CzechAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewCzechAnalyzer() *CzechAnalyzer { return r } + +// true + +type CzechAnalyzerVariant interface { + CzechAnalyzerCaster() *CzechAnalyzer +} + +func (s *CzechAnalyzer) CzechAnalyzerCaster() *CzechAnalyzer { + return s +} diff --git a/typedapi/types/dailyschedule.go b/typedapi/types/dailyschedule.go index 293ec359e1..ddb6c252d6 100644 --- a/typedapi/types/dailyschedule.go +++ b/typedapi/types/dailyschedule.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // DailySchedule type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Schedule.ts#L33-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Schedule.ts#L33-L35 type DailySchedule struct { At []ScheduleTimeOfDay `json:"at"` } @@ -33,3 +33,13 @@ func NewDailySchedule() *DailySchedule { return r } + +// true + +type DailyScheduleVariant interface { + DailyScheduleCaster() *DailySchedule +} + +func (s *DailySchedule) DailyScheduleCaster() *DailySchedule { + return s +} diff --git a/typedapi/types/danglingindex.go b/typedapi/types/danglingindex.go index 9c198aa59b..d1c80ff6be 100644 --- a/typedapi/types/danglingindex.go +++ b/typedapi/types/danglingindex.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DanglingIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponse.ts#L29-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponse.ts#L29-L34 type DanglingIndex struct { CreationDateMillis int64 `json:"creation_date_millis"` IndexName string `json:"index_name"` @@ -110,3 +110,5 @@ func NewDanglingIndex() *DanglingIndex { return r } + +// false diff --git a/typedapi/types/danishanalyzer.go b/typedapi/types/danishanalyzer.go index 271418dbc5..98d4bf724e 100644 --- a/typedapi/types/danishanalyzer.go +++ b/typedapi/types/danishanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DanishAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L128-L132 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L139-L143 type DanishAnalyzer struct { Stopwords []string `json:"stopwords,omitempty"` StopwordsPath *string `json:"stopwords_path,omitempty"` @@ -111,3 +111,13 @@ func NewDanishAnalyzer() *DanishAnalyzer { return r } + +// true + +type DanishAnalyzerVariant interface { + DanishAnalyzerCaster() *DanishAnalyzer +} + +func (s *DanishAnalyzer) DanishAnalyzerCaster() *DanishAnalyzer { + return s +} diff --git a/typedapi/types/databaseconfiguration.go b/typedapi/types/databaseconfiguration.go index b8d3cd2beb..8cd45e6023 100644 --- a/typedapi/types/databaseconfiguration.go +++ b/typedapi/types/databaseconfiguration.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,14 +30,11 @@ import ( // DatabaseConfiguration type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Database.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Database.ts#L22-L37 type DatabaseConfiguration struct { - // Maxmind The configuration necessary to identify which IP geolocation provider to use - // to download the database, as well as any provider-specific configuration - // necessary for such downloading. - // At present, the only supported provider is maxmind, and the maxmind provider - // requires that an account_id (string) is configured. - Maxmind Maxmind `json:"maxmind"` + AdditionalDatabaseConfigurationProperty map[string]json.RawMessage `json:"-"` + Ipinfo *Ipinfo `json:"ipinfo,omitempty"` + Maxmind *Maxmind `json:"maxmind,omitempty"` // Name The provider-assigned name of the IP geolocation database to download. Name string `json:"name"` } @@ -57,6 +54,11 @@ func (s *DatabaseConfiguration) UnmarshalJSON(data []byte) error { switch t { + case "ipinfo": + if err := dec.Decode(&s.Ipinfo); err != nil { + return fmt.Errorf("%s | %w", "Ipinfo", err) + } + case "maxmind": if err := dec.Decode(&s.Maxmind); err != nil { return fmt.Errorf("%s | %w", "Maxmind", err) @@ -67,14 +69,68 @@ func (s *DatabaseConfiguration) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Name", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalDatabaseConfigurationProperty == nil { + s.AdditionalDatabaseConfigurationProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalDatabaseConfigurationProperty", err) + } + s.AdditionalDatabaseConfigurationProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s DatabaseConfiguration) MarshalJSON() ([]byte, error) { + type opt DatabaseConfiguration + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDatabaseConfigurationProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDatabaseConfigurationProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewDatabaseConfiguration returns a DatabaseConfiguration. func NewDatabaseConfiguration() *DatabaseConfiguration { - r := &DatabaseConfiguration{} + r := &DatabaseConfiguration{ + AdditionalDatabaseConfigurationProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type DatabaseConfigurationVariant interface { + DatabaseConfigurationCaster() *DatabaseConfiguration +} + +func (s *DatabaseConfiguration) DatabaseConfigurationCaster() *DatabaseConfiguration { + return s +} diff --git a/typedapi/types/databaseconfigurationfull.go b/typedapi/types/databaseconfigurationfull.go new file mode 100644 index 0000000000..efec709cbe --- /dev/null +++ b/typedapi/types/databaseconfigurationfull.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DatabaseConfigurationFull type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Database.ts#L39-L53 +type DatabaseConfigurationFull struct { + AdditionalDatabaseConfigurationFullProperty map[string]json.RawMessage `json:"-"` + Ipinfo *Ipinfo `json:"ipinfo,omitempty"` + Local *Local `json:"local,omitempty"` + Maxmind *Maxmind `json:"maxmind,omitempty"` + // Name The provider-assigned name of the IP geolocation database to download. + Name string `json:"name"` + Web *Web `json:"web,omitempty"` +} + +func (s *DatabaseConfigurationFull) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ipinfo": + if err := dec.Decode(&s.Ipinfo); err != nil { + return fmt.Errorf("%s | %w", "Ipinfo", err) + } + + case "local": + if err := dec.Decode(&s.Local); err != nil { + return fmt.Errorf("%s | %w", "Local", err) + } + + case "maxmind": + if err := dec.Decode(&s.Maxmind); err != nil { + return fmt.Errorf("%s | %w", "Maxmind", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "web": + if err := dec.Decode(&s.Web); err != nil { + return fmt.Errorf("%s | %w", "Web", err) + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalDatabaseConfigurationFullProperty == nil { + s.AdditionalDatabaseConfigurationFullProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalDatabaseConfigurationFullProperty", err) + } + s.AdditionalDatabaseConfigurationFullProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s DatabaseConfigurationFull) MarshalJSON() ([]byte, error) { + type opt DatabaseConfigurationFull + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDatabaseConfigurationFullProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDatabaseConfigurationFullProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewDatabaseConfigurationFull returns a DatabaseConfigurationFull. +func NewDatabaseConfigurationFull() *DatabaseConfigurationFull { + r := &DatabaseConfigurationFull{ + AdditionalDatabaseConfigurationFullProperty: make(map[string]json.RawMessage), + } + + return r +} + +// false diff --git a/typedapi/types/datacounts.go b/typedapi/types/datacounts.go index 0190f53d95..915a198f98 100644 --- a/typedapi/types/datacounts.go +++ b/typedapi/types/datacounts.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataCounts type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Job.ts#L352-L372 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Job.ts#L352-L372 type DataCounts struct { BucketCount int64 `json:"bucket_count"` EarliestRecordTimestamp *int64 `json:"earliest_record_timestamp,omitempty"` @@ -355,3 +355,5 @@ func NewDataCounts() *DataCounts { return r } + +// false diff --git a/typedapi/types/datadescription.go b/typedapi/types/datadescription.go index 2c419be917..6f6371613e 100644 --- a/typedapi/types/datadescription.go +++ b/typedapi/types/datadescription.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataDescription type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Job.ts#L374-L390 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Job.ts#L374-L390 type DataDescription struct { FieldDelimiter *string `json:"field_delimiter,omitempty"` // Format Only JSON format is supported at this time. @@ -117,3 +117,13 @@ func NewDataDescription() *DataDescription { return r } + +// true + +type DataDescriptionVariant interface { + DataDescriptionCaster() *DataDescription +} + +func (s *DataDescription) DataDescriptionCaster() *DataDescription { + return s +} diff --git a/typedapi/types/dataemailattachment.go b/typedapi/types/dataemailattachment.go index a6f8c62568..2e761d9899 100644 --- a/typedapi/types/dataemailattachment.go +++ b/typedapi/types/dataemailattachment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // DataEmailAttachment type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L234-L236 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L234-L236 type DataEmailAttachment struct { Format *dataattachmentformat.DataAttachmentFormat `json:"format,omitempty"` } @@ -37,3 +37,13 @@ func NewDataEmailAttachment() *DataEmailAttachment { return r } + +// true + +type DataEmailAttachmentVariant interface { + DataEmailAttachmentCaster() *DataEmailAttachment +} + +func (s *DataEmailAttachment) DataEmailAttachmentCaster() *DataEmailAttachment { + return s +} diff --git a/typedapi/types/datafeedauthorization.go b/typedapi/types/datafeedauthorization.go index 385c04fb04..c7db92c311 100644 --- a/typedapi/types/datafeedauthorization.go +++ b/typedapi/types/datafeedauthorization.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DatafeedAuthorization type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Authorization.ts#L31-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Authorization.ts#L31-L43 type DatafeedAuthorization struct { // ApiKey If an API key was used for the most recent update to the datafeed, its name // and identifier are listed in the response. @@ -92,3 +92,5 @@ func NewDatafeedAuthorization() *DatafeedAuthorization { return r } + +// false diff --git a/typedapi/types/datafeedconfig.go b/typedapi/types/datafeedconfig.go index b3d079d6b5..358f5c8af1 100644 --- a/typedapi/types/datafeedconfig.go +++ b/typedapi/types/datafeedconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DatafeedConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Datafeed.ts#L62-L119 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Datafeed.ts#L63-L120 type DatafeedConfig struct { // Aggregations If set, the datafeed performs aggregation searches. Support for aggregations // is limited and should be used only with low cardinality data. @@ -233,9 +233,19 @@ func (s *DatafeedConfig) UnmarshalJSON(data []byte) error { // NewDatafeedConfig returns a DatafeedConfig. func NewDatafeedConfig() *DatafeedConfig { r := &DatafeedConfig{ - Aggregations: make(map[string]Aggregations, 0), - ScriptFields: make(map[string]ScriptField, 0), + Aggregations: make(map[string]Aggregations), + ScriptFields: make(map[string]ScriptField), } return r } + +// true + +type DatafeedConfigVariant interface { + DatafeedConfigCaster() *DatafeedConfig +} + +func (s *DatafeedConfig) DatafeedConfigCaster() *DatafeedConfig { + return s +} diff --git a/typedapi/types/datafeedrunningstate.go b/typedapi/types/datafeedrunningstate.go index 8cebb7d2be..4fa296bd3c 100644 --- a/typedapi/types/datafeedrunningstate.go +++ b/typedapi/types/datafeedrunningstate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DatafeedRunningState type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Datafeed.ts#L200-L214 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Datafeed.ts#L210-L224 type DatafeedRunningState struct { // RealTimeConfigured Indicates if the datafeed is "real-time"; meaning that the datafeed has no // configured `end` time. @@ -104,3 +104,5 @@ func NewDatafeedRunningState() *DatafeedRunningState { return r } + +// false diff --git a/typedapi/types/datafeeds.go b/typedapi/types/datafeeds.go index 7c26831359..55d495b462 100644 --- a/typedapi/types/datafeeds.go +++ b/typedapi/types/datafeeds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Datafeeds type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/info/types.ts#L40-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/info/types.ts#L42-L44 type Datafeeds struct { ScrollSize int `json:"scroll_size"` } @@ -78,3 +78,5 @@ func NewDatafeeds() *Datafeeds { return r } + +// false diff --git a/typedapi/types/datafeedsrecord.go b/typedapi/types/datafeedsrecord.go index 8e0a5f46a1..6570ab3e55 100644 --- a/typedapi/types/datafeedsrecord.go +++ b/typedapi/types/datafeedsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // DatafeedsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/ml_datafeeds/types.ts#L22-L87 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/ml_datafeeds/types.ts#L22-L87 type DatafeedsRecord struct { // AssignmentExplanation For started datafeeds only, contains messages relating to the selection of a // node. @@ -233,3 +233,5 @@ func NewDatafeedsRecord() *DatafeedsRecord { return r } + +// false diff --git a/typedapi/types/datafeedstats.go b/typedapi/types/datafeedstats.go index c4b1c91512..7578e7b874 100644 --- a/typedapi/types/datafeedstats.go +++ b/typedapi/types/datafeedstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // DatafeedStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Datafeed.ts#L142-L171 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Datafeed.ts#L143-L172 type DatafeedStats struct { // AssignmentExplanation For started datafeeds only, contains messages relating to the selection of a // node. @@ -45,7 +45,7 @@ type DatafeedStats struct { DatafeedId string `json:"datafeed_id"` // Node For started datafeeds only, this information pertains to the node upon which // the datafeed is started. - Node *DiscoveryNode `json:"node,omitempty"` + Node *DiscoveryNodeCompact `json:"node,omitempty"` // RunningState An object containing the running state for this datafeed. // It is only provided if the datafeed is started. RunningState *DatafeedRunningState `json:"running_state,omitempty"` @@ -54,7 +54,7 @@ type DatafeedStats struct { State datafeedstate.DatafeedState `json:"state"` // TimingStats An object that provides statistical information about timing aspect of this // datafeed. - TimingStats DatafeedTimingStats `json:"timing_stats"` + TimingStats *DatafeedTimingStats `json:"timing_stats,omitempty"` } func (s *DatafeedStats) UnmarshalJSON(data []byte) error { @@ -120,3 +120,5 @@ func NewDatafeedStats() *DatafeedStats { return r } + +// false diff --git a/typedapi/types/datafeedtimingstats.go b/typedapi/types/datafeedtimingstats.go index 9b8e611e92..5efd9c1c04 100644 --- a/typedapi/types/datafeedtimingstats.go +++ b/typedapi/types/datafeedtimingstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,12 +31,13 @@ import ( // DatafeedTimingStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Datafeed.ts#L173-L198 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Datafeed.ts#L174-L202 type DatafeedTimingStats struct { // AverageSearchTimePerBucketMs The average search time per bucket, in milliseconds. AverageSearchTimePerBucketMs Float64 `json:"average_search_time_per_bucket_ms,omitempty"` // BucketCount The number of buckets processed. - BucketCount int64 `json:"bucket_count"` + BucketCount int64 `json:"bucket_count"` + ExponentialAverageCalculationContext *ExponentialAverageCalculationContext `json:"exponential_average_calculation_context,omitempty"` // ExponentialAverageSearchTimePerHourMs The exponential average search time per hour, in milliseconds. ExponentialAverageSearchTimePerHourMs Float64 `json:"exponential_average_search_time_per_hour_ms"` // JobId Identifier for the anomaly detection job. @@ -82,6 +83,11 @@ func (s *DatafeedTimingStats) UnmarshalJSON(data []byte) error { s.BucketCount = f } + case "exponential_average_calculation_context": + if err := dec.Decode(&s.ExponentialAverageCalculationContext); err != nil { + return fmt.Errorf("%s | %w", "ExponentialAverageCalculationContext", err) + } + case "exponential_average_search_time_per_hour_ms": if err := dec.Decode(&s.ExponentialAverageSearchTimePerHourMs); err != nil { return fmt.Errorf("%s | %w", "ExponentialAverageSearchTimePerHourMs", err) @@ -123,3 +129,5 @@ func NewDatafeedTimingStats() *DatafeedTimingStats { return r } + +// false diff --git a/typedapi/types/dataframeanalysis.go b/typedapi/types/dataframeanalysis.go deleted file mode 100644 index 0c179770e2..0000000000 --- a/typedapi/types/dataframeanalysis.go +++ /dev/null @@ -1,425 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// DataframeAnalysis type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L133-L212 -type DataframeAnalysis struct { - // Alpha Advanced configuration option. Machine learning uses loss guided tree - // growing, which means that the decision trees grow where the regularized loss - // decreases most quickly. This parameter affects loss calculations by acting as - // a multiplier of the tree depth. Higher alpha values result in shallower trees - // and faster training times. By default, this value is calculated during - // hyperparameter optimization. It must be greater than or equal to zero. - Alpha *Float64 `json:"alpha,omitempty"` - // DependentVariable Defines which field of the document is to be predicted. It must match one of - // the fields in the index being used to train. If this field is missing from a - // document, then that document will not be used for training, but a prediction - // with the trained model will be generated for it. It is also known as - // continuous target variable. - // For classification analysis, the data type of the field must be numeric - // (`integer`, `short`, `long`, `byte`), categorical (`ip` or `keyword`), or - // `boolean`. There must be no more than 30 different values in this field. - // For regression analysis, the data type of the field must be numeric. - DependentVariable string `json:"dependent_variable"` - // DownsampleFactor Advanced configuration option. Controls the fraction of data that is used to - // compute the derivatives of the loss function for tree training. A small value - // results in the use of a small fraction of the data. If this value is set to - // be less than 1, accuracy typically improves. However, too small a value may - // result in poor convergence for the ensemble and so require more trees. By - // default, this value is calculated during hyperparameter optimization. It must - // be greater than zero and less than or equal to 1. - DownsampleFactor *Float64 `json:"downsample_factor,omitempty"` - // EarlyStoppingEnabled Advanced configuration option. Specifies whether the training process should - // finish if it is not finding any better performing models. If disabled, the - // training process can take significantly longer and the chance of finding a - // better performing model is unremarkable. - EarlyStoppingEnabled *bool `json:"early_stopping_enabled,omitempty"` - // Eta Advanced configuration option. The shrinkage applied to the weights. Smaller - // values result in larger forests which have a better generalization error. - // However, larger forests cause slower training. By default, this value is - // calculated during hyperparameter optimization. It must be a value between - // 0.001 and 1. - Eta *Float64 `json:"eta,omitempty"` - // EtaGrowthRatePerTree Advanced configuration option. Specifies the rate at which `eta` increases - // for each new tree that is added to the forest. For example, a rate of 1.05 - // increases `eta` by 5% for each extra tree. By default, this value is - // calculated during hyperparameter optimization. It must be between 0.5 and 2. - EtaGrowthRatePerTree *Float64 `json:"eta_growth_rate_per_tree,omitempty"` - // FeatureBagFraction Advanced configuration option. Defines the fraction of features that will be - // used when selecting a random bag for each candidate split. By default, this - // value is calculated during hyperparameter optimization. - FeatureBagFraction *Float64 `json:"feature_bag_fraction,omitempty"` - // FeatureProcessors Advanced configuration option. A collection of feature preprocessors that - // modify one or more included fields. The analysis uses the resulting one or - // more features instead of the original document field. However, these features - // are ephemeral; they are not stored in the destination index. Multiple - // `feature_processors` entries can refer to the same document fields. Automatic - // categorical feature encoding still occurs for the fields that are unprocessed - // by a custom processor or that have categorical values. Use this property only - // if you want to override the automatic feature encoding of the specified - // fields. - FeatureProcessors []DataframeAnalysisFeatureProcessor `json:"feature_processors,omitempty"` - // Gamma Advanced configuration option. Regularization parameter to prevent - // overfitting on the training data set. Multiplies a linear penalty associated - // with the size of individual trees in the forest. A high gamma value causes - // training to prefer small trees. A small gamma value results in larger - // individual trees and slower training. By default, this value is calculated - // during hyperparameter optimization. It must be a nonnegative value. - Gamma *Float64 `json:"gamma,omitempty"` - // Lambda Advanced configuration option. Regularization parameter to prevent - // overfitting on the training data set. Multiplies an L2 regularization term - // which applies to leaf weights of the individual trees in the forest. A high - // lambda value causes training to favor small leaf weights. This behavior makes - // the prediction function smoother at the expense of potentially not being able - // to capture relevant relationships between the features and the dependent - // variable. A small lambda value results in large individual trees and slower - // training. By default, this value is calculated during hyperparameter - // optimization. It must be a nonnegative value. - Lambda *Float64 `json:"lambda,omitempty"` - // MaxOptimizationRoundsPerHyperparameter Advanced configuration option. A multiplier responsible for determining the - // maximum number of hyperparameter optimization steps in the Bayesian - // optimization procedure. The maximum number of steps is determined based on - // the number of undefined hyperparameters times the maximum optimization rounds - // per hyperparameter. By default, this value is calculated during - // hyperparameter optimization. - MaxOptimizationRoundsPerHyperparameter *int `json:"max_optimization_rounds_per_hyperparameter,omitempty"` - // MaxTrees Advanced configuration option. Defines the maximum number of decision trees - // in the forest. The maximum value is 2000. By default, this value is - // calculated during hyperparameter optimization. - MaxTrees *int `json:"max_trees,omitempty"` - // NumTopFeatureImportanceValues Advanced configuration option. Specifies the maximum number of feature - // importance values per document to return. By default, no feature importance - // calculation occurs. - NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` - // PredictionFieldName Defines the name of the prediction field in the results. Defaults to - // `_prediction`. - PredictionFieldName *string `json:"prediction_field_name,omitempty"` - // RandomizeSeed Defines the seed for the random generator that is used to pick training data. - // By default, it is randomly generated. Set it to a specific value to use the - // same training data each time you start a job (assuming other related - // parameters such as `source` and `analyzed_fields` are the same). - RandomizeSeed *Float64 `json:"randomize_seed,omitempty"` - // SoftTreeDepthLimit Advanced configuration option. Machine learning uses loss guided tree - // growing, which means that the decision trees grow where the regularized loss - // decreases most quickly. This soft limit combines with the - // `soft_tree_depth_tolerance` to penalize trees that exceed the specified - // depth; the regularized loss increases quickly beyond this depth. By default, - // this value is calculated during hyperparameter optimization. It must be - // greater than or equal to 0. - SoftTreeDepthLimit *int `json:"soft_tree_depth_limit,omitempty"` - // SoftTreeDepthTolerance Advanced configuration option. This option controls how quickly the - // regularized loss increases when the tree depth exceeds - // `soft_tree_depth_limit`. By default, this value is calculated during - // hyperparameter optimization. It must be greater than or equal to 0.01. - SoftTreeDepthTolerance *Float64 `json:"soft_tree_depth_tolerance,omitempty"` - // TrainingPercent Defines what percentage of the eligible documents that will be used for - // training. Documents that are ignored by the analysis (for example those that - // contain arrays with more than one value) won’t be included in the calculation - // for used percentage. - TrainingPercent Percentage `json:"training_percent,omitempty"` -} - -func (s *DataframeAnalysis) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "alpha": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Alpha", err) - } - f := Float64(value) - s.Alpha = &f - case float64: - f := Float64(v) - s.Alpha = &f - } - - case "dependent_variable": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "DependentVariable", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.DependentVariable = o - - case "downsample_factor": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DownsampleFactor", err) - } - f := Float64(value) - s.DownsampleFactor = &f - case float64: - f := Float64(v) - s.DownsampleFactor = &f - } - - case "early_stopping_enabled": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "EarlyStoppingEnabled", err) - } - s.EarlyStoppingEnabled = &value - case bool: - s.EarlyStoppingEnabled = &v - } - - case "eta": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Eta", err) - } - f := Float64(value) - s.Eta = &f - case float64: - f := Float64(v) - s.Eta = &f - } - - case "eta_growth_rate_per_tree": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "EtaGrowthRatePerTree", err) - } - f := Float64(value) - s.EtaGrowthRatePerTree = &f - case float64: - f := Float64(v) - s.EtaGrowthRatePerTree = &f - } - - case "feature_bag_fraction": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "FeatureBagFraction", err) - } - f := Float64(value) - s.FeatureBagFraction = &f - case float64: - f := Float64(v) - s.FeatureBagFraction = &f - } - - case "feature_processors": - if err := dec.Decode(&s.FeatureProcessors); err != nil { - return fmt.Errorf("%s | %w", "FeatureProcessors", err) - } - - case "gamma": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Gamma", err) - } - f := Float64(value) - s.Gamma = &f - case float64: - f := Float64(v) - s.Gamma = &f - } - - case "lambda": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Lambda", err) - } - f := Float64(value) - s.Lambda = &f - case float64: - f := Float64(v) - s.Lambda = &f - } - - case "max_optimization_rounds_per_hyperparameter": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "MaxOptimizationRoundsPerHyperparameter", err) - } - s.MaxOptimizationRoundsPerHyperparameter = &value - case float64: - f := int(v) - s.MaxOptimizationRoundsPerHyperparameter = &f - } - - case "max_trees", "maximum_number_trees": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "MaxTrees", err) - } - s.MaxTrees = &value - case float64: - f := int(v) - s.MaxTrees = &f - } - - case "num_top_feature_importance_values": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "NumTopFeatureImportanceValues", err) - } - s.NumTopFeatureImportanceValues = &value - case float64: - f := int(v) - s.NumTopFeatureImportanceValues = &f - } - - case "prediction_field_name": - if err := dec.Decode(&s.PredictionFieldName); err != nil { - return fmt.Errorf("%s | %w", "PredictionFieldName", err) - } - - case "randomize_seed": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "RandomizeSeed", err) - } - f := Float64(value) - s.RandomizeSeed = &f - case float64: - f := Float64(v) - s.RandomizeSeed = &f - } - - case "soft_tree_depth_limit": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "SoftTreeDepthLimit", err) - } - s.SoftTreeDepthLimit = &value - case float64: - f := int(v) - s.SoftTreeDepthLimit = &f - } - - case "soft_tree_depth_tolerance": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "SoftTreeDepthTolerance", err) - } - f := Float64(value) - s.SoftTreeDepthTolerance = &f - case float64: - f := Float64(v) - s.SoftTreeDepthTolerance = &f - } - - case "training_percent": - if err := dec.Decode(&s.TrainingPercent); err != nil { - return fmt.Errorf("%s | %w", "TrainingPercent", err) - } - - } - } - return nil -} - -// NewDataframeAnalysis returns a DataframeAnalysis. -func NewDataframeAnalysis() *DataframeAnalysis { - r := &DataframeAnalysis{} - - return r -} diff --git a/typedapi/types/dataframeanalysisanalyzedfields.go b/typedapi/types/dataframeanalysisanalyzedfields.go index 72b2003cf1..8ea48b6655 100644 --- a/typedapi/types/dataframeanalysisanalyzedfields.go +++ b/typedapi/types/dataframeanalysisanalyzedfields.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,15 +30,15 @@ import ( // DataframeAnalysisAnalyzedFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L237-L243 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L238-L244 type DataframeAnalysisAnalyzedFields struct { // Excludes An array of strings that defines the fields that will be included in the // analysis. - Excludes []string `json:"excludes"` + Excludes []string `json:"excludes,omitempty"` // Includes An array of strings that defines the fields that will be excluded from the // analysis. You do not need to add fields with unsupported data types to // excludes, these fields are excluded from the analysis automatically. - Includes []string `json:"includes"` + Includes []string `json:"includes,omitempty"` } func (s *DataframeAnalysisAnalyzedFields) UnmarshalJSON(data []byte) error { @@ -87,3 +87,13 @@ func NewDataframeAnalysisAnalyzedFields() *DataframeAnalysisAnalyzedFields { return r } + +// true + +type DataframeAnalysisAnalyzedFieldsVariant interface { + DataframeAnalysisAnalyzedFieldsCaster() *DataframeAnalysisAnalyzedFields +} + +func (s *DataframeAnalysisAnalyzedFields) DataframeAnalysisAnalyzedFieldsCaster() *DataframeAnalysisAnalyzedFields { + return s +} diff --git a/typedapi/types/dataframeanalysisclassification.go b/typedapi/types/dataframeanalysisclassification.go index 7d9caee07e..8dc6512e32 100644 --- a/typedapi/types/dataframeanalysisclassification.go +++ b/typedapi/types/dataframeanalysisclassification.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeAnalysisClassification type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L226-L235 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L227-L236 type DataframeAnalysisClassification struct { // Alpha Advanced configuration option. Machine learning uses loss guided tree // growing, which means that the decision trees grow where the regularized loss @@ -460,3 +460,13 @@ func NewDataframeAnalysisClassification() *DataframeAnalysisClassification { return r } + +// true + +type DataframeAnalysisClassificationVariant interface { + DataframeAnalysisClassificationCaster() *DataframeAnalysisClassification +} + +func (s *DataframeAnalysisClassification) DataframeAnalysisClassificationCaster() *DataframeAnalysisClassification { + return s +} diff --git a/typedapi/types/dataframeanalysiscontainer.go b/typedapi/types/dataframeanalysiscontainer.go index 8a6488e6a5..b1c127012d 100644 --- a/typedapi/types/dataframeanalysiscontainer.go +++ b/typedapi/types/dataframeanalysiscontainer.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // DataframeAnalysisContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L83-L100 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L84-L101 type DataframeAnalysisContainer struct { + AdditionalDataframeAnalysisContainerProperty map[string]json.RawMessage `json:"-"` // Classification The configuration information necessary to perform classification. Classification *DataframeAnalysisClassification `json:"classification,omitempty"` // OutlierDetection The configuration information necessary to perform outlier detection. NOTE: @@ -40,9 +46,50 @@ type DataframeAnalysisContainer struct { Regression *DataframeAnalysisRegression `json:"regression,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s DataframeAnalysisContainer) MarshalJSON() ([]byte, error) { + type opt DataframeAnalysisContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDataframeAnalysisContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDataframeAnalysisContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewDataframeAnalysisContainer returns a DataframeAnalysisContainer. func NewDataframeAnalysisContainer() *DataframeAnalysisContainer { - r := &DataframeAnalysisContainer{} + r := &DataframeAnalysisContainer{ + AdditionalDataframeAnalysisContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type DataframeAnalysisContainerVariant interface { + DataframeAnalysisContainerCaster() *DataframeAnalysisContainer +} + +func (s *DataframeAnalysisContainer) DataframeAnalysisContainerCaster() *DataframeAnalysisContainer { + return s +} diff --git a/typedapi/types/dataframeanalysisfeatureprocessor.go b/typedapi/types/dataframeanalysisfeatureprocessor.go index 8c9ca67e90..4e950f774c 100644 --- a/typedapi/types/dataframeanalysisfeatureprocessor.go +++ b/typedapi/types/dataframeanalysisfeatureprocessor.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // DataframeAnalysisFeatureProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L245-L257 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L246-L258 type DataframeAnalysisFeatureProcessor struct { + AdditionalDataframeAnalysisFeatureProcessorProperty map[string]json.RawMessage `json:"-"` // FrequencyEncoding The configuration information necessary to perform frequency encoding. FrequencyEncoding *DataframeAnalysisFeatureProcessorFrequencyEncoding `json:"frequency_encoding,omitempty"` // MultiEncoding The configuration information necessary to perform multi encoding. It allows @@ -41,9 +47,50 @@ type DataframeAnalysisFeatureProcessor struct { TargetMeanEncoding *DataframeAnalysisFeatureProcessorTargetMeanEncoding `json:"target_mean_encoding,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s DataframeAnalysisFeatureProcessor) MarshalJSON() ([]byte, error) { + type opt DataframeAnalysisFeatureProcessor + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDataframeAnalysisFeatureProcessorProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDataframeAnalysisFeatureProcessorProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewDataframeAnalysisFeatureProcessor returns a DataframeAnalysisFeatureProcessor. func NewDataframeAnalysisFeatureProcessor() *DataframeAnalysisFeatureProcessor { - r := &DataframeAnalysisFeatureProcessor{} + r := &DataframeAnalysisFeatureProcessor{ + AdditionalDataframeAnalysisFeatureProcessorProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type DataframeAnalysisFeatureProcessorVariant interface { + DataframeAnalysisFeatureProcessorCaster() *DataframeAnalysisFeatureProcessor +} + +func (s *DataframeAnalysisFeatureProcessor) DataframeAnalysisFeatureProcessorCaster() *DataframeAnalysisFeatureProcessor { + return s +} diff --git a/typedapi/types/dataframeanalysisfeatureprocessorfrequencyencoding.go b/typedapi/types/dataframeanalysisfeatureprocessorfrequencyencoding.go index 432b436174..063832e544 100644 --- a/typedapi/types/dataframeanalysisfeatureprocessorfrequencyencoding.go +++ b/typedapi/types/dataframeanalysisfeatureprocessorfrequencyencoding.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // DataframeAnalysisFeatureProcessorFrequencyEncoding type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L259-L266 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L260-L267 type DataframeAnalysisFeatureProcessorFrequencyEncoding struct { // FeatureName The resulting feature name. FeatureName string `json:"feature_name"` @@ -81,8 +81,18 @@ func (s *DataframeAnalysisFeatureProcessorFrequencyEncoding) UnmarshalJSON(data // NewDataframeAnalysisFeatureProcessorFrequencyEncoding returns a DataframeAnalysisFeatureProcessorFrequencyEncoding. func NewDataframeAnalysisFeatureProcessorFrequencyEncoding() *DataframeAnalysisFeatureProcessorFrequencyEncoding { r := &DataframeAnalysisFeatureProcessorFrequencyEncoding{ - FrequencyMap: make(map[string]Float64, 0), + FrequencyMap: make(map[string]Float64), } return r } + +// true + +type DataframeAnalysisFeatureProcessorFrequencyEncodingVariant interface { + DataframeAnalysisFeatureProcessorFrequencyEncodingCaster() *DataframeAnalysisFeatureProcessorFrequencyEncoding +} + +func (s *DataframeAnalysisFeatureProcessorFrequencyEncoding) DataframeAnalysisFeatureProcessorFrequencyEncodingCaster() *DataframeAnalysisFeatureProcessorFrequencyEncoding { + return s +} diff --git a/typedapi/types/dataframeanalysisfeatureprocessormultiencoding.go b/typedapi/types/dataframeanalysisfeatureprocessormultiencoding.go index 87ac539f18..d2c986df1b 100644 --- a/typedapi/types/dataframeanalysisfeatureprocessormultiencoding.go +++ b/typedapi/types/dataframeanalysisfeatureprocessormultiencoding.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // DataframeAnalysisFeatureProcessorMultiEncoding type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L268-L271 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L269-L272 type DataframeAnalysisFeatureProcessorMultiEncoding struct { // Processors The ordered array of custom processors to execute. Must be more than 1. Processors []int `json:"processors"` @@ -34,3 +34,13 @@ func NewDataframeAnalysisFeatureProcessorMultiEncoding() *DataframeAnalysisFeatu return r } + +// true + +type DataframeAnalysisFeatureProcessorMultiEncodingVariant interface { + DataframeAnalysisFeatureProcessorMultiEncodingCaster() *DataframeAnalysisFeatureProcessorMultiEncoding +} + +func (s *DataframeAnalysisFeatureProcessorMultiEncoding) DataframeAnalysisFeatureProcessorMultiEncodingCaster() *DataframeAnalysisFeatureProcessorMultiEncoding { + return s +} diff --git a/typedapi/types/dataframeanalysisfeatureprocessorngramencoding.go b/typedapi/types/dataframeanalysisfeatureprocessorngramencoding.go index 09c50ceb4a..47062f57b8 100644 --- a/typedapi/types/dataframeanalysisfeatureprocessorngramencoding.go +++ b/typedapi/types/dataframeanalysisfeatureprocessorngramencoding.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeAnalysisFeatureProcessorNGramEncoding type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L273-L285 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L274-L286 type DataframeAnalysisFeatureProcessorNGramEncoding struct { Custom *bool `json:"custom,omitempty"` // FeaturePrefix The feature name prefix. Defaults to ngram__. @@ -143,3 +143,13 @@ func NewDataframeAnalysisFeatureProcessorNGramEncoding() *DataframeAnalysisFeatu return r } + +// true + +type DataframeAnalysisFeatureProcessorNGramEncodingVariant interface { + DataframeAnalysisFeatureProcessorNGramEncodingCaster() *DataframeAnalysisFeatureProcessorNGramEncoding +} + +func (s *DataframeAnalysisFeatureProcessorNGramEncoding) DataframeAnalysisFeatureProcessorNGramEncodingCaster() *DataframeAnalysisFeatureProcessorNGramEncoding { + return s +} diff --git a/typedapi/types/dataframeanalysisfeatureprocessoronehotencoding.go b/typedapi/types/dataframeanalysisfeatureprocessoronehotencoding.go index 6dd4f222ad..70c1eeb18b 100644 --- a/typedapi/types/dataframeanalysisfeatureprocessoronehotencoding.go +++ b/typedapi/types/dataframeanalysisfeatureprocessoronehotencoding.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeAnalysisFeatureProcessorOneHotEncoding type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L287-L292 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L288-L293 type DataframeAnalysisFeatureProcessorOneHotEncoding struct { // Field The name of the field to encode. Field string `json:"field"` @@ -82,3 +82,13 @@ func NewDataframeAnalysisFeatureProcessorOneHotEncoding() *DataframeAnalysisFeat return r } + +// true + +type DataframeAnalysisFeatureProcessorOneHotEncodingVariant interface { + DataframeAnalysisFeatureProcessorOneHotEncodingCaster() *DataframeAnalysisFeatureProcessorOneHotEncoding +} + +func (s *DataframeAnalysisFeatureProcessorOneHotEncoding) DataframeAnalysisFeatureProcessorOneHotEncodingCaster() *DataframeAnalysisFeatureProcessorOneHotEncoding { + return s +} diff --git a/typedapi/types/dataframeanalysisfeatureprocessortargetmeanencoding.go b/typedapi/types/dataframeanalysisfeatureprocessortargetmeanencoding.go index 3d0af96ee0..6c11098d21 100644 --- a/typedapi/types/dataframeanalysisfeatureprocessortargetmeanencoding.go +++ b/typedapi/types/dataframeanalysisfeatureprocessortargetmeanencoding.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeAnalysisFeatureProcessorTargetMeanEncoding type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L294-L303 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L295-L304 type DataframeAnalysisFeatureProcessorTargetMeanEncoding struct { // DefaultValue The default value if field value is not found in the target_map. DefaultValue int `json:"default_value"` @@ -100,8 +100,18 @@ func (s *DataframeAnalysisFeatureProcessorTargetMeanEncoding) UnmarshalJSON(data // NewDataframeAnalysisFeatureProcessorTargetMeanEncoding returns a DataframeAnalysisFeatureProcessorTargetMeanEncoding. func NewDataframeAnalysisFeatureProcessorTargetMeanEncoding() *DataframeAnalysisFeatureProcessorTargetMeanEncoding { r := &DataframeAnalysisFeatureProcessorTargetMeanEncoding{ - TargetMap: make(map[string]json.RawMessage, 0), + TargetMap: make(map[string]json.RawMessage), } return r } + +// true + +type DataframeAnalysisFeatureProcessorTargetMeanEncodingVariant interface { + DataframeAnalysisFeatureProcessorTargetMeanEncodingCaster() *DataframeAnalysisFeatureProcessorTargetMeanEncoding +} + +func (s *DataframeAnalysisFeatureProcessorTargetMeanEncoding) DataframeAnalysisFeatureProcessorTargetMeanEncodingCaster() *DataframeAnalysisFeatureProcessorTargetMeanEncoding { + return s +} diff --git a/typedapi/types/dataframeanalysisoutlierdetection.go b/typedapi/types/dataframeanalysisoutlierdetection.go index d6d64e8d1d..653abf3f6d 100644 --- a/typedapi/types/dataframeanalysisoutlierdetection.go +++ b/typedapi/types/dataframeanalysisoutlierdetection.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeAnalysisOutlierDetection type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L102-L131 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L103-L132 type DataframeAnalysisOutlierDetection struct { // ComputeFeatureInfluence Specifies whether the feature influence calculation is enabled. ComputeFeatureInfluence *bool `json:"compute_feature_influence,omitempty"` @@ -173,3 +173,13 @@ func NewDataframeAnalysisOutlierDetection() *DataframeAnalysisOutlierDetection { return r } + +// true + +type DataframeAnalysisOutlierDetectionVariant interface { + DataframeAnalysisOutlierDetectionCaster() *DataframeAnalysisOutlierDetection +} + +func (s *DataframeAnalysisOutlierDetection) DataframeAnalysisOutlierDetectionCaster() *DataframeAnalysisOutlierDetection { + return s +} diff --git a/typedapi/types/dataframeanalysisregression.go b/typedapi/types/dataframeanalysisregression.go index 3e36f35689..6c11b65b1c 100644 --- a/typedapi/types/dataframeanalysisregression.go +++ b/typedapi/types/dataframeanalysisregression.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeAnalysisRegression type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L214-L224 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L215-L225 type DataframeAnalysisRegression struct { // Alpha Advanced configuration option. Machine learning uses loss guided tree // growing, which means that the decision trees grow where the regularized loss @@ -457,3 +457,13 @@ func NewDataframeAnalysisRegression() *DataframeAnalysisRegression { return r } + +// true + +type DataframeAnalysisRegressionVariant interface { + DataframeAnalysisRegressionCaster() *DataframeAnalysisRegression +} + +func (s *DataframeAnalysisRegression) DataframeAnalysisRegressionCaster() *DataframeAnalysisRegression { + return s +} diff --git a/typedapi/types/dataframeanalytics.go b/typedapi/types/dataframeanalytics.go index 195548995b..d28ce286df 100644 --- a/typedapi/types/dataframeanalytics.go +++ b/typedapi/types/dataframeanalytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // DataframeAnalytics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L323-L343 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L325-L345 type DataframeAnalytics struct { // AnalysisStats An object containing information about the analysis job. AnalysisStats *DataframeAnalyticsStatsContainer `json:"analysis_stats,omitempty"` @@ -131,3 +131,5 @@ func NewDataframeAnalytics() *DataframeAnalytics { return r } + +// false diff --git a/typedapi/types/dataframeanalyticsauthorization.go b/typedapi/types/dataframeanalyticsauthorization.go index 91b4a72d13..0ab8306309 100644 --- a/typedapi/types/dataframeanalyticsauthorization.go +++ b/typedapi/types/dataframeanalyticsauthorization.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeAnalyticsAuthorization type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Authorization.ts#L45-L57 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Authorization.ts#L45-L57 type DataframeAnalyticsAuthorization struct { // ApiKey If an API key was used for the most recent update to the job, its name and // identifier are listed in the response. @@ -92,3 +92,5 @@ func NewDataframeAnalyticsAuthorization() *DataframeAnalyticsAuthorization { return r } + +// false diff --git a/typedapi/types/dataframeanalyticsdestination.go b/typedapi/types/dataframeanalyticsdestination.go index b58417525c..99f2e23ac4 100644 --- a/typedapi/types/dataframeanalyticsdestination.go +++ b/typedapi/types/dataframeanalyticsdestination.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // DataframeAnalyticsDestination type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L76-L81 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L77-L82 type DataframeAnalyticsDestination struct { // Index Defines the destination index to store the results of the data frame // analytics job. @@ -76,3 +76,13 @@ func NewDataframeAnalyticsDestination() *DataframeAnalyticsDestination { return r } + +// true + +type DataframeAnalyticsDestinationVariant interface { + DataframeAnalyticsDestinationCaster() *DataframeAnalyticsDestination +} + +func (s *DataframeAnalyticsDestination) DataframeAnalyticsDestinationCaster() *DataframeAnalyticsDestination { + return s +} diff --git a/typedapi/types/dataframeanalyticsfieldselection.go b/typedapi/types/dataframeanalyticsfieldselection.go index ea9f8650a4..9967cde279 100644 --- a/typedapi/types/dataframeanalyticsfieldselection.go +++ b/typedapi/types/dataframeanalyticsfieldselection.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeAnalyticsFieldSelection type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L54-L67 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L55-L68 type DataframeAnalyticsFieldSelection struct { // FeatureType The feature type of this field for the analysis. May be categorical or // numerical. @@ -136,3 +136,5 @@ func NewDataframeAnalyticsFieldSelection() *DataframeAnalyticsFieldSelection { return r } + +// false diff --git a/typedapi/types/dataframeanalyticsmemoryestimation.go b/typedapi/types/dataframeanalyticsmemoryestimation.go index d204575346..436dd3270f 100644 --- a/typedapi/types/dataframeanalyticsmemoryestimation.go +++ b/typedapi/types/dataframeanalyticsmemoryestimation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeAnalyticsMemoryEstimation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L69-L74 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L70-L75 type DataframeAnalyticsMemoryEstimation struct { // ExpectedMemoryWithDisk Estimated memory usage under the assumption that overflowing to disk is // allowed during data frame analytics. expected_memory_with_disk is usually @@ -93,3 +93,5 @@ func NewDataframeAnalyticsMemoryEstimation() *DataframeAnalyticsMemoryEstimation return r } + +// false diff --git a/typedapi/types/dataframeanalyticsrecord.go b/typedapi/types/dataframeanalyticsrecord.go index 1a233ea7fb..9e1ea05570 100644 --- a/typedapi/types/dataframeanalyticsrecord.go +++ b/typedapi/types/dataframeanalyticsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataFrameAnalyticsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/ml_data_frame_analytics/types.ts#L22-L102 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/ml_data_frame_analytics/types.ts#L22-L102 type DataFrameAnalyticsRecord struct { // AssignmentExplanation Messages related to the selection of a node. AssignmentExplanation *string `json:"assignment_explanation,omitempty"` @@ -237,3 +237,5 @@ func NewDataFrameAnalyticsRecord() *DataFrameAnalyticsRecord { return r } + +// false diff --git a/typedapi/types/dataframeanalyticssource.go b/typedapi/types/dataframeanalyticssource.go index cd811723ae..e91f2cb19a 100644 --- a/typedapi/types/dataframeanalyticssource.go +++ b/typedapi/types/dataframeanalyticssource.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // DataframeAnalyticsSource type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L38-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L39-L53 type DataframeAnalyticsSource struct { // Index Index or indices on which to perform the analysis. It can be a single index // or index pattern as well as an array of indices or patterns. NOTE: If your @@ -109,3 +109,13 @@ func NewDataframeAnalyticsSource() *DataframeAnalyticsSource { return r } + +// true + +type DataframeAnalyticsSourceVariant interface { + DataframeAnalyticsSourceCaster() *DataframeAnalyticsSource +} + +func (s *DataframeAnalyticsSource) DataframeAnalyticsSourceCaster() *DataframeAnalyticsSource { + return s +} diff --git a/typedapi/types/dataframeanalyticsstatscontainer.go b/typedapi/types/dataframeanalyticsstatscontainer.go index 2e91c843b3..1fc6ad664a 100644 --- a/typedapi/types/dataframeanalyticsstatscontainer.go +++ b/typedapi/types/dataframeanalyticsstatscontainer.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // DataframeAnalyticsStatsContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L372-L380 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L374-L382 type DataframeAnalyticsStatsContainer struct { + AdditionalDataframeAnalyticsStatsContainerProperty map[string]json.RawMessage `json:"-"` // ClassificationStats An object containing information about the classification analysis job. ClassificationStats *DataframeAnalyticsStatsHyperparameters `json:"classification_stats,omitempty"` // OutlierDetectionStats An object containing information about the outlier detection job. @@ -32,9 +38,42 @@ type DataframeAnalyticsStatsContainer struct { RegressionStats *DataframeAnalyticsStatsHyperparameters `json:"regression_stats,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s DataframeAnalyticsStatsContainer) MarshalJSON() ([]byte, error) { + type opt DataframeAnalyticsStatsContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDataframeAnalyticsStatsContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDataframeAnalyticsStatsContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewDataframeAnalyticsStatsContainer returns a DataframeAnalyticsStatsContainer. func NewDataframeAnalyticsStatsContainer() *DataframeAnalyticsStatsContainer { - r := &DataframeAnalyticsStatsContainer{} + r := &DataframeAnalyticsStatsContainer{ + AdditionalDataframeAnalyticsStatsContainerProperty: make(map[string]json.RawMessage), + } return r } + +// false diff --git a/typedapi/types/dataframeanalyticsstatsdatacounts.go b/typedapi/types/dataframeanalyticsstatsdatacounts.go index 09ed056a62..40aee20da4 100644 --- a/typedapi/types/dataframeanalyticsstatsdatacounts.go +++ b/typedapi/types/dataframeanalyticsstatsdatacounts.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeAnalyticsStatsDataCounts type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L363-L370 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L365-L372 type DataframeAnalyticsStatsDataCounts struct { // SkippedDocsCount The number of documents that are skipped during the analysis because they // contained values that are not supported by the analysis. For example, outlier @@ -120,3 +120,5 @@ func NewDataframeAnalyticsStatsDataCounts() *DataframeAnalyticsStatsDataCounts { return r } + +// false diff --git a/typedapi/types/dataframeanalyticsstatshyperparameters.go b/typedapi/types/dataframeanalyticsstatshyperparameters.go index 07d7cdbeb1..fa69a677c4 100644 --- a/typedapi/types/dataframeanalyticsstatshyperparameters.go +++ b/typedapi/types/dataframeanalyticsstatshyperparameters.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeAnalyticsStatsHyperparameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L382-L401 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L384-L403 type DataframeAnalyticsStatsHyperparameters struct { // Hyperparameters An object containing the parameters of the classification analysis job. Hyperparameters Hyperparameters `json:"hyperparameters"` @@ -108,3 +108,5 @@ func NewDataframeAnalyticsStatsHyperparameters() *DataframeAnalyticsStatsHyperpa return r } + +// false diff --git a/typedapi/types/dataframeanalyticsstatsmemoryusage.go b/typedapi/types/dataframeanalyticsstatsmemoryusage.go index 799e78332c..1434e866dc 100644 --- a/typedapi/types/dataframeanalyticsstatsmemoryusage.go +++ b/typedapi/types/dataframeanalyticsstatsmemoryusage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeAnalyticsStatsMemoryUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L352-L361 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L354-L363 type DataframeAnalyticsStatsMemoryUsage struct { // MemoryReestimateBytes This value is present when the status is hard_limit and it is a new estimate // of how much memory the job needs. @@ -117,3 +117,5 @@ func NewDataframeAnalyticsStatsMemoryUsage() *DataframeAnalyticsStatsMemoryUsage return r } + +// false diff --git a/typedapi/types/dataframeanalyticsstatsoutlierdetection.go b/typedapi/types/dataframeanalyticsstatsoutlierdetection.go index ff3c3be0f8..21d0adce0b 100644 --- a/typedapi/types/dataframeanalyticsstatsoutlierdetection.go +++ b/typedapi/types/dataframeanalyticsstatsoutlierdetection.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // DataframeAnalyticsStatsOutlierDetection type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L403-L416 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L405-L418 type DataframeAnalyticsStatsOutlierDetection struct { // Parameters The list of job parameters specified by the user or determined by algorithmic // heuristics. @@ -83,3 +83,5 @@ func NewDataframeAnalyticsStatsOutlierDetection() *DataframeAnalyticsStatsOutlie return r } + +// false diff --git a/typedapi/types/dataframeanalyticsstatsprogress.go b/typedapi/types/dataframeanalyticsstatsprogress.go index 00cb0510e3..df4c58ec36 100644 --- a/typedapi/types/dataframeanalyticsstatsprogress.go +++ b/typedapi/types/dataframeanalyticsstatsprogress.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeAnalyticsStatsProgress type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L345-L350 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L347-L352 type DataframeAnalyticsStatsProgress struct { // Phase Defines the phase of the data frame analytics job. Phase string `json:"phase"` @@ -94,3 +94,5 @@ func NewDataframeAnalyticsStatsProgress() *DataframeAnalyticsStatsProgress { return r } + +// false diff --git a/typedapi/types/dataframeanalyticssummary.go b/typedapi/types/dataframeanalyticssummary.go index 4b4cabf489..5c1520cc2b 100644 --- a/typedapi/types/dataframeanalyticssummary.go +++ b/typedapi/types/dataframeanalyticssummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeAnalyticsSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L305-L321 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L306-L323 type DataframeAnalyticsSummary struct { AllowLazyStart *bool `json:"allow_lazy_start,omitempty"` Analysis DataframeAnalysisContainer `json:"analysis"` @@ -45,6 +45,7 @@ type DataframeAnalyticsSummary struct { Dest DataframeAnalyticsDestination `json:"dest"` Id string `json:"id"` MaxNumThreads *int `json:"max_num_threads,omitempty"` + Meta_ Metadata `json:"_meta,omitempty"` ModelMemoryLimit *string `json:"model_memory_limit,omitempty"` Source DataframeAnalyticsSource `json:"source"` Version *string `json:"version,omitempty"` @@ -137,6 +138,11 @@ func (s *DataframeAnalyticsSummary) UnmarshalJSON(data []byte) error { s.MaxNumThreads = &f } + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + case "model_memory_limit": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -170,3 +176,5 @@ func NewDataframeAnalyticsSummary() *DataframeAnalyticsSummary { return r } + +// false diff --git a/typedapi/types/dataframeclassificationsummary.go b/typedapi/types/dataframeclassificationsummary.go index f14a4b0b26..6c9fd8b9dd 100644 --- a/typedapi/types/dataframeclassificationsummary.go +++ b/typedapi/types/dataframeclassificationsummary.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // DataframeClassificationSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/evaluate_data_frame/types.ts#L44-L66 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/evaluate_data_frame/types.ts#L44-L66 type DataframeClassificationSummary struct { // Accuracy Accuracy of predictions (per-class and overall). Accuracy *DataframeClassificationSummaryAccuracy `json:"accuracy,omitempty"` @@ -45,3 +45,5 @@ func NewDataframeClassificationSummary() *DataframeClassificationSummary { return r } + +// false diff --git a/typedapi/types/dataframeclassificationsummaryaccuracy.go b/typedapi/types/dataframeclassificationsummaryaccuracy.go index 3f13fb2c85..e0384a208c 100644 --- a/typedapi/types/dataframeclassificationsummaryaccuracy.go +++ b/typedapi/types/dataframeclassificationsummaryaccuracy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeClassificationSummaryAccuracy type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/evaluate_data_frame/types.ts#L111-L114 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/evaluate_data_frame/types.ts#L111-L114 type DataframeClassificationSummaryAccuracy struct { Classes []DataframeEvaluationClass `json:"classes"` OverallAccuracy Float64 `json:"overall_accuracy"` @@ -84,3 +84,5 @@ func NewDataframeClassificationSummaryAccuracy() *DataframeClassificationSummary return r } + +// false diff --git a/typedapi/types/dataframeclassificationsummarymulticlassconfusionmatrix.go b/typedapi/types/dataframeclassificationsummarymulticlassconfusionmatrix.go index 76a93aa513..fd7c4034e9 100644 --- a/typedapi/types/dataframeclassificationsummarymulticlassconfusionmatrix.go +++ b/typedapi/types/dataframeclassificationsummarymulticlassconfusionmatrix.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeClassificationSummaryMulticlassConfusionMatrix type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/evaluate_data_frame/types.ts#L120-L123 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/evaluate_data_frame/types.ts#L120-L123 type DataframeClassificationSummaryMulticlassConfusionMatrix struct { ConfusionMatrix []ConfusionMatrixItem `json:"confusion_matrix"` OtherActualClassCount int `json:"other_actual_class_count"` @@ -84,3 +84,5 @@ func NewDataframeClassificationSummaryMulticlassConfusionMatrix() *DataframeClas return r } + +// false diff --git a/typedapi/types/dataframeclassificationsummaryprecision.go b/typedapi/types/dataframeclassificationsummaryprecision.go index 7fa079e73f..b61f172567 100644 --- a/typedapi/types/dataframeclassificationsummaryprecision.go +++ b/typedapi/types/dataframeclassificationsummaryprecision.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeClassificationSummaryPrecision type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/evaluate_data_frame/types.ts#L101-L104 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/evaluate_data_frame/types.ts#L101-L104 type DataframeClassificationSummaryPrecision struct { AvgPrecision Float64 `json:"avg_precision"` Classes []DataframeEvaluationClass `json:"classes"` @@ -84,3 +84,5 @@ func NewDataframeClassificationSummaryPrecision() *DataframeClassificationSummar return r } + +// false diff --git a/typedapi/types/dataframeclassificationsummaryrecall.go b/typedapi/types/dataframeclassificationsummaryrecall.go index 5371739730..86bf5744ac 100644 --- a/typedapi/types/dataframeclassificationsummaryrecall.go +++ b/typedapi/types/dataframeclassificationsummaryrecall.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeClassificationSummaryRecall type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/evaluate_data_frame/types.ts#L106-L109 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/evaluate_data_frame/types.ts#L106-L109 type DataframeClassificationSummaryRecall struct { AvgRecall Float64 `json:"avg_recall"` Classes []DataframeEvaluationClass `json:"classes"` @@ -84,3 +84,5 @@ func NewDataframeClassificationSummaryRecall() *DataframeClassificationSummaryRe return r } + +// false diff --git a/typedapi/types/dataframeevaluationclass.go b/typedapi/types/dataframeevaluationclass.go index 6e2dd3cf9c..53b990d3d1 100644 --- a/typedapi/types/dataframeevaluationclass.go +++ b/typedapi/types/dataframeevaluationclass.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeEvaluationClass type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/evaluate_data_frame/types.ts#L116-L118 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/evaluate_data_frame/types.ts#L116-L118 type DataframeEvaluationClass struct { ClassName string `json:"class_name"` Value Float64 `json:"value"` @@ -84,3 +84,5 @@ func NewDataframeEvaluationClass() *DataframeEvaluationClass { return r } + +// false diff --git a/typedapi/types/dataframeevaluationclassification.go b/typedapi/types/dataframeevaluationclassification.go index 0d31f90b06..f4b551ac5f 100644 --- a/typedapi/types/dataframeevaluationclassification.go +++ b/typedapi/types/dataframeevaluationclassification.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // DataframeEvaluationClassification type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeEvaluation.ts#L35-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeEvaluation.ts#L35-L44 type DataframeEvaluationClassification struct { // ActualField The field of the index which contains the ground truth. The data type of this // field can be boolean or integer. If the data type is integer, the value has @@ -93,3 +93,13 @@ func NewDataframeEvaluationClassification() *DataframeEvaluationClassification { return r } + +// true + +type DataframeEvaluationClassificationVariant interface { + DataframeEvaluationClassificationCaster() *DataframeEvaluationClassification +} + +func (s *DataframeEvaluationClassification) DataframeEvaluationClassificationCaster() *DataframeEvaluationClassification { + return s +} diff --git a/typedapi/types/dataframeevaluationclassificationmetrics.go b/typedapi/types/dataframeevaluationclassificationmetrics.go index db705895f6..d908342381 100644 --- a/typedapi/types/dataframeevaluationclassificationmetrics.go +++ b/typedapi/types/dataframeevaluationclassificationmetrics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // DataframeEvaluationClassificationMetrics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeEvaluation.ts#L73-L78 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeEvaluation.ts#L73-L78 type DataframeEvaluationClassificationMetrics struct { // Accuracy Accuracy of predictions (per-class and overall). Accuracy map[string]json.RawMessage `json:"accuracy,omitempty"` @@ -45,11 +45,21 @@ type DataframeEvaluationClassificationMetrics struct { // NewDataframeEvaluationClassificationMetrics returns a DataframeEvaluationClassificationMetrics. func NewDataframeEvaluationClassificationMetrics() *DataframeEvaluationClassificationMetrics { r := &DataframeEvaluationClassificationMetrics{ - Accuracy: make(map[string]json.RawMessage, 0), - MulticlassConfusionMatrix: make(map[string]json.RawMessage, 0), - Precision: make(map[string]json.RawMessage, 0), - Recall: make(map[string]json.RawMessage, 0), + Accuracy: make(map[string]json.RawMessage), + MulticlassConfusionMatrix: make(map[string]json.RawMessage), + Precision: make(map[string]json.RawMessage), + Recall: make(map[string]json.RawMessage), } return r } + +// true + +type DataframeEvaluationClassificationMetricsVariant interface { + DataframeEvaluationClassificationMetricsCaster() *DataframeEvaluationClassificationMetrics +} + +func (s *DataframeEvaluationClassificationMetrics) DataframeEvaluationClassificationMetricsCaster() *DataframeEvaluationClassificationMetrics { + return s +} diff --git a/typedapi/types/dataframeevaluationclassificationmetricsaucroc.go b/typedapi/types/dataframeevaluationclassificationmetricsaucroc.go index e1c71fab24..4ee88a9db0 100644 --- a/typedapi/types/dataframeevaluationclassificationmetricsaucroc.go +++ b/typedapi/types/dataframeevaluationclassificationmetricsaucroc.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeEvaluationClassificationMetricsAucRoc type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeEvaluation.ts#L85-L90 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeEvaluation.ts#L85-L90 type DataframeEvaluationClassificationMetricsAucRoc struct { // ClassName Name of the only class that is treated as positive during AUC ROC // calculation. Other classes are treated as negative ("one-vs-all" strategy). @@ -88,3 +88,13 @@ func NewDataframeEvaluationClassificationMetricsAucRoc() *DataframeEvaluationCla return r } + +// true + +type DataframeEvaluationClassificationMetricsAucRocVariant interface { + DataframeEvaluationClassificationMetricsAucRocCaster() *DataframeEvaluationClassificationMetricsAucRoc +} + +func (s *DataframeEvaluationClassificationMetricsAucRoc) DataframeEvaluationClassificationMetricsAucRocCaster() *DataframeEvaluationClassificationMetricsAucRoc { + return s +} diff --git a/typedapi/types/dataframeevaluationcontainer.go b/typedapi/types/dataframeevaluationcontainer.go index 80f98cd423..9c555ba7a1 100644 --- a/typedapi/types/dataframeevaluationcontainer.go +++ b/typedapi/types/dataframeevaluationcontainer.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // DataframeEvaluationContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeEvaluation.ts#L25-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeEvaluation.ts#L25-L33 type DataframeEvaluationContainer struct { + AdditionalDataframeEvaluationContainerProperty map[string]json.RawMessage `json:"-"` // Classification Classification evaluation evaluates the results of a classification analysis // which outputs a prediction that identifies to which of the classes each // document belongs. @@ -36,9 +42,50 @@ type DataframeEvaluationContainer struct { Regression *DataframeEvaluationRegression `json:"regression,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s DataframeEvaluationContainer) MarshalJSON() ([]byte, error) { + type opt DataframeEvaluationContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDataframeEvaluationContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDataframeEvaluationContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewDataframeEvaluationContainer returns a DataframeEvaluationContainer. func NewDataframeEvaluationContainer() *DataframeEvaluationContainer { - r := &DataframeEvaluationContainer{} + r := &DataframeEvaluationContainer{ + AdditionalDataframeEvaluationContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type DataframeEvaluationContainerVariant interface { + DataframeEvaluationContainerCaster() *DataframeEvaluationContainer +} + +func (s *DataframeEvaluationContainer) DataframeEvaluationContainerCaster() *DataframeEvaluationContainer { + return s +} diff --git a/typedapi/types/dataframeevaluationmetrics.go b/typedapi/types/dataframeevaluationmetrics.go deleted file mode 100644 index 5a34a3196b..0000000000 --- a/typedapi/types/dataframeevaluationmetrics.go +++ /dev/null @@ -1,49 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "encoding/json" -) - -// DataframeEvaluationMetrics type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeEvaluation.ts#L64-L71 -type DataframeEvaluationMetrics struct { - // AucRoc The AUC ROC (area under the curve of the receiver operating characteristic) - // score and optionally the curve. It is calculated for a specific class - // (provided as "class_name") treated as positive. - AucRoc *DataframeEvaluationClassificationMetricsAucRoc `json:"auc_roc,omitempty"` - // Precision Precision of predictions (per-class and average). - Precision map[string]json.RawMessage `json:"precision,omitempty"` - // Recall Recall of predictions (per-class and average). - Recall map[string]json.RawMessage `json:"recall,omitempty"` -} - -// NewDataframeEvaluationMetrics returns a DataframeEvaluationMetrics. -func NewDataframeEvaluationMetrics() *DataframeEvaluationMetrics { - r := &DataframeEvaluationMetrics{ - Precision: make(map[string]json.RawMessage, 0), - Recall: make(map[string]json.RawMessage, 0), - } - - return r -} diff --git a/typedapi/types/dataframeevaluationoutlierdetection.go b/typedapi/types/dataframeevaluationoutlierdetection.go index 430c85fd05..dd5070f721 100644 --- a/typedapi/types/dataframeevaluationoutlierdetection.go +++ b/typedapi/types/dataframeevaluationoutlierdetection.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // DataframeEvaluationOutlierDetection type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeEvaluation.ts#L46-L53 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeEvaluation.ts#L46-L53 type DataframeEvaluationOutlierDetection struct { // ActualField The field of the index which contains the ground truth. The data type of this // field can be boolean or integer. If the data type is integer, the value has @@ -85,3 +85,13 @@ func NewDataframeEvaluationOutlierDetection() *DataframeEvaluationOutlierDetecti return r } + +// true + +type DataframeEvaluationOutlierDetectionVariant interface { + DataframeEvaluationOutlierDetectionCaster() *DataframeEvaluationOutlierDetection +} + +func (s *DataframeEvaluationOutlierDetection) DataframeEvaluationOutlierDetectionCaster() *DataframeEvaluationOutlierDetection { + return s +} diff --git a/typedapi/types/dataframeevaluationoutlierdetectionmetrics.go b/typedapi/types/dataframeevaluationoutlierdetectionmetrics.go index cd85667b0c..7f898dba05 100644 --- a/typedapi/types/dataframeevaluationoutlierdetectionmetrics.go +++ b/typedapi/types/dataframeevaluationoutlierdetectionmetrics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // DataframeEvaluationOutlierDetectionMetrics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeEvaluation.ts#L80-L83 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeEvaluation.ts#L80-L83 type DataframeEvaluationOutlierDetectionMetrics struct { // AucRoc The AUC ROC (area under the curve of the receiver operating characteristic) // score and optionally the curve. It is calculated for a specific class @@ -43,10 +43,20 @@ type DataframeEvaluationOutlierDetectionMetrics struct { // NewDataframeEvaluationOutlierDetectionMetrics returns a DataframeEvaluationOutlierDetectionMetrics. func NewDataframeEvaluationOutlierDetectionMetrics() *DataframeEvaluationOutlierDetectionMetrics { r := &DataframeEvaluationOutlierDetectionMetrics{ - ConfusionMatrix: make(map[string]json.RawMessage, 0), - Precision: make(map[string]json.RawMessage, 0), - Recall: make(map[string]json.RawMessage, 0), + ConfusionMatrix: make(map[string]json.RawMessage), + Precision: make(map[string]json.RawMessage), + Recall: make(map[string]json.RawMessage), } return r } + +// true + +type DataframeEvaluationOutlierDetectionMetricsVariant interface { + DataframeEvaluationOutlierDetectionMetricsCaster() *DataframeEvaluationOutlierDetectionMetrics +} + +func (s *DataframeEvaluationOutlierDetectionMetrics) DataframeEvaluationOutlierDetectionMetricsCaster() *DataframeEvaluationOutlierDetectionMetrics { + return s +} diff --git a/typedapi/types/dataframeevaluationregression.go b/typedapi/types/dataframeevaluationregression.go index b2877643b1..6cb97aee1f 100644 --- a/typedapi/types/dataframeevaluationregression.go +++ b/typedapi/types/dataframeevaluationregression.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // DataframeEvaluationRegression type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeEvaluation.ts#L55-L62 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeEvaluation.ts#L55-L62 type DataframeEvaluationRegression struct { // ActualField The field of the index which contains the ground truth. The data type of this // field must be numerical. @@ -85,3 +85,13 @@ func NewDataframeEvaluationRegression() *DataframeEvaluationRegression { return r } + +// true + +type DataframeEvaluationRegressionVariant interface { + DataframeEvaluationRegressionCaster() *DataframeEvaluationRegression +} + +func (s *DataframeEvaluationRegression) DataframeEvaluationRegressionCaster() *DataframeEvaluationRegression { + return s +} diff --git a/typedapi/types/dataframeevaluationregressionmetrics.go b/typedapi/types/dataframeevaluationregressionmetrics.go index 29af6fce2a..42aecec06c 100644 --- a/typedapi/types/dataframeevaluationregressionmetrics.go +++ b/typedapi/types/dataframeevaluationregressionmetrics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // DataframeEvaluationRegressionMetrics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeEvaluation.ts#L92-L110 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeEvaluation.ts#L92-L110 type DataframeEvaluationRegressionMetrics struct { // Huber Pseudo Huber loss function. Huber *DataframeEvaluationRegressionMetricsHuber `json:"huber,omitempty"` @@ -44,9 +44,19 @@ type DataframeEvaluationRegressionMetrics struct { // NewDataframeEvaluationRegressionMetrics returns a DataframeEvaluationRegressionMetrics. func NewDataframeEvaluationRegressionMetrics() *DataframeEvaluationRegressionMetrics { r := &DataframeEvaluationRegressionMetrics{ - Mse: make(map[string]json.RawMessage, 0), - RSquared: make(map[string]json.RawMessage, 0), + Mse: make(map[string]json.RawMessage), + RSquared: make(map[string]json.RawMessage), } return r } + +// true + +type DataframeEvaluationRegressionMetricsVariant interface { + DataframeEvaluationRegressionMetricsCaster() *DataframeEvaluationRegressionMetrics +} + +func (s *DataframeEvaluationRegressionMetrics) DataframeEvaluationRegressionMetricsCaster() *DataframeEvaluationRegressionMetrics { + return s +} diff --git a/typedapi/types/dataframeevaluationregressionmetricshuber.go b/typedapi/types/dataframeevaluationregressionmetricshuber.go index b93f7c9699..640237b2b6 100644 --- a/typedapi/types/dataframeevaluationregressionmetricshuber.go +++ b/typedapi/types/dataframeevaluationregressionmetricshuber.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeEvaluationRegressionMetricsHuber type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeEvaluation.ts#L117-L120 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeEvaluation.ts#L117-L120 type DataframeEvaluationRegressionMetricsHuber struct { // Delta Approximates 1/2 (prediction - actual)2 for values much less than delta and // approximates a straight line with slope delta for values much larger than @@ -81,3 +81,13 @@ func NewDataframeEvaluationRegressionMetricsHuber() *DataframeEvaluationRegressi return r } + +// true + +type DataframeEvaluationRegressionMetricsHuberVariant interface { + DataframeEvaluationRegressionMetricsHuberCaster() *DataframeEvaluationRegressionMetricsHuber +} + +func (s *DataframeEvaluationRegressionMetricsHuber) DataframeEvaluationRegressionMetricsHuberCaster() *DataframeEvaluationRegressionMetricsHuber { + return s +} diff --git a/typedapi/types/dataframeevaluationregressionmetricsmsle.go b/typedapi/types/dataframeevaluationregressionmetricsmsle.go index 11e99bda52..ca73023b66 100644 --- a/typedapi/types/dataframeevaluationregressionmetricsmsle.go +++ b/typedapi/types/dataframeevaluationregressionmetricsmsle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeEvaluationRegressionMetricsMsle type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeEvaluation.ts#L112-L115 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeEvaluation.ts#L112-L115 type DataframeEvaluationRegressionMetricsMsle struct { // Offset Defines the transition point at which you switch from minimizing quadratic // error to minimizing quadratic log error. Defaults to 1. @@ -80,3 +80,13 @@ func NewDataframeEvaluationRegressionMetricsMsle() *DataframeEvaluationRegressio return r } + +// true + +type DataframeEvaluationRegressionMetricsMsleVariant interface { + DataframeEvaluationRegressionMetricsMsleCaster() *DataframeEvaluationRegressionMetricsMsle +} + +func (s *DataframeEvaluationRegressionMetricsMsle) DataframeEvaluationRegressionMetricsMsleCaster() *DataframeEvaluationRegressionMetricsMsle { + return s +} diff --git a/typedapi/types/dataframeevaluationsummaryaucroc.go b/typedapi/types/dataframeevaluationsummaryaucroc.go index b45344e08f..0c1bda9dde 100644 --- a/typedapi/types/dataframeevaluationsummaryaucroc.go +++ b/typedapi/types/dataframeevaluationsummaryaucroc.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeEvaluationSummaryAucRoc type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/evaluate_data_frame/types.ts#L91-L93 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/evaluate_data_frame/types.ts#L91-L93 type DataframeEvaluationSummaryAucRoc struct { Curve []DataframeEvaluationSummaryAucRocCurveItem `json:"curve,omitempty"` Value Float64 `json:"value"` @@ -84,3 +84,5 @@ func NewDataframeEvaluationSummaryAucRoc() *DataframeEvaluationSummaryAucRoc { return r } + +// false diff --git a/typedapi/types/dataframeevaluationsummaryaucroccurveitem.go b/typedapi/types/dataframeevaluationsummaryaucroccurveitem.go index b31b28b461..a1439f8382 100644 --- a/typedapi/types/dataframeevaluationsummaryaucroccurveitem.go +++ b/typedapi/types/dataframeevaluationsummaryaucroccurveitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeEvaluationSummaryAucRocCurveItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/evaluate_data_frame/types.ts#L95-L99 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/evaluate_data_frame/types.ts#L95-L99 type DataframeEvaluationSummaryAucRocCurveItem struct { Fpr Float64 `json:"fpr"` Threshold Float64 `json:"threshold"` @@ -112,3 +112,5 @@ func NewDataframeEvaluationSummaryAucRocCurveItem() *DataframeEvaluationSummaryA return r } + +// false diff --git a/typedapi/types/dataframeevaluationvalue.go b/typedapi/types/dataframeevaluationvalue.go index 7bb8d0922a..1d16f05e63 100644 --- a/typedapi/types/dataframeevaluationvalue.go +++ b/typedapi/types/dataframeevaluationvalue.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframeEvaluationValue type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/evaluate_data_frame/types.ts#L87-L89 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/evaluate_data_frame/types.ts#L87-L89 type DataframeEvaluationValue struct { Value Float64 `json:"value"` } @@ -78,3 +78,5 @@ func NewDataframeEvaluationValue() *DataframeEvaluationValue { return r } + +// false diff --git a/typedapi/types/dataframeoutlierdetectionsummary.go b/typedapi/types/dataframeoutlierdetectionsummary.go index 62b2d75d15..c940704798 100644 --- a/typedapi/types/dataframeoutlierdetectionsummary.go +++ b/typedapi/types/dataframeoutlierdetectionsummary.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // DataframeOutlierDetectionSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/evaluate_data_frame/types.ts#L24-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/evaluate_data_frame/types.ts#L24-L42 type DataframeOutlierDetectionSummary struct { // AucRoc The AUC ROC (area under the curve of the receiver operating characteristic) // score and optionally the curve. @@ -42,10 +42,12 @@ type DataframeOutlierDetectionSummary struct { // NewDataframeOutlierDetectionSummary returns a DataframeOutlierDetectionSummary. func NewDataframeOutlierDetectionSummary() *DataframeOutlierDetectionSummary { r := &DataframeOutlierDetectionSummary{ - ConfusionMatrix: make(map[string]ConfusionMatrixThreshold, 0), - Precision: make(map[string]Float64, 0), - Recall: make(map[string]Float64, 0), + ConfusionMatrix: make(map[string]ConfusionMatrixThreshold), + Precision: make(map[string]Float64), + Recall: make(map[string]Float64), } return r } + +// false diff --git a/typedapi/types/dataframepreviewconfig.go b/typedapi/types/dataframepreviewconfig.go index 538b7e8930..ba1df9bff0 100644 --- a/typedapi/types/dataframepreviewconfig.go +++ b/typedapi/types/dataframepreviewconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataframePreviewConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/preview_data_frame_analytics/types.ts#L27-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/preview_data_frame_analytics/types.ts#L27-L33 type DataframePreviewConfig struct { Analysis DataframeAnalysisContainer `json:"analysis"` AnalyzedFields *DataframeAnalysisAnalyzedFields `json:"analyzed_fields,omitempty"` @@ -109,3 +109,13 @@ func NewDataframePreviewConfig() *DataframePreviewConfig { return r } + +// true + +type DataframePreviewConfigVariant interface { + DataframePreviewConfigCaster() *DataframePreviewConfig +} + +func (s *DataframePreviewConfig) DataframePreviewConfigCaster() *DataframePreviewConfig { + return s +} diff --git a/typedapi/types/dataframeregressionsummary.go b/typedapi/types/dataframeregressionsummary.go index ec494dd412..eeb5028aa8 100644 --- a/typedapi/types/dataframeregressionsummary.go +++ b/typedapi/types/dataframeregressionsummary.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // DataframeRegressionSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/evaluate_data_frame/types.ts#L68-L85 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/evaluate_data_frame/types.ts#L68-L85 type DataframeRegressionSummary struct { // Huber Pseudo Huber loss function. Huber *DataframeEvaluationValue `json:"huber,omitempty"` @@ -43,3 +43,5 @@ func NewDataframeRegressionSummary() *DataframeRegressionSummary { return r } + +// false diff --git a/typedapi/types/datapathstats.go b/typedapi/types/datapathstats.go index 72cc5036b9..6ecd7401bf 100644 --- a/typedapi/types/datapathstats.go +++ b/typedapi/types/datapathstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataPathStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L586-L630 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L586-L630 type DataPathStats struct { // Available Total amount of disk space available to this Java virtual machine on this // file store. @@ -301,3 +301,5 @@ func NewDataPathStats() *DataPathStats { return r } + +// false diff --git a/typedapi/types/datastream.go b/typedapi/types/datastream.go index 9934d85125..324ea70190 100644 --- a/typedapi/types/datastream.go +++ b/typedapi/types/datastream.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // DataStream type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/DataStream.ts#L45-L127 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/DataStream.ts#L45-L127 type DataStream struct { // AllowCustomRouting If `true`, the data stream allows custom routing on write request. AllowCustomRouting *bool `json:"allow_custom_routing,omitempty"` @@ -271,3 +271,5 @@ func NewDataStream() *DataStream { return r } + +// false diff --git a/typedapi/types/datastreamindex.go b/typedapi/types/datastreamindex.go index 5c946a84ee..eace45aacd 100644 --- a/typedapi/types/datastreamindex.go +++ b/typedapi/types/datastreamindex.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // DataStreamIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/DataStream.ts#L136-L157 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/DataStream.ts#L136-L157 type DataStreamIndex struct { // IlmPolicy Name of the current ILM lifecycle policy configured for this backing index. IlmPolicy *string `json:"ilm_policy,omitempty"` @@ -108,3 +108,5 @@ func NewDataStreamIndex() *DataStreamIndex { return r } + +// false diff --git a/typedapi/types/datastreamlifecycle.go b/typedapi/types/datastreamlifecycle.go index 36b309aa1f..94585c468f 100644 --- a/typedapi/types/datastreamlifecycle.go +++ b/typedapi/types/datastreamlifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataStreamLifecycle type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/DataStreamLifecycle.ts#L25-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/DataStreamLifecycle.ts#L25-L45 type DataStreamLifecycle struct { // DataRetention If defined, every document added to this data stream will be stored at least // for this time frame. @@ -97,3 +97,13 @@ func NewDataStreamLifecycle() *DataStreamLifecycle { return r } + +// true + +type DataStreamLifecycleVariant interface { + DataStreamLifecycleCaster() *DataStreamLifecycle +} + +func (s *DataStreamLifecycle) DataStreamLifecycleCaster() *DataStreamLifecycle { + return s +} diff --git a/typedapi/types/datastreamlifecycledetails.go b/typedapi/types/datastreamlifecycledetails.go index 1d419f2940..b9a0dd1724 100644 --- a/typedapi/types/datastreamlifecycledetails.go +++ b/typedapi/types/datastreamlifecycledetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataStreamLifecycleDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L152-L156 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L153-L157 type DataStreamLifecycleDetails struct { StagnatingBackingIndices []StagnatingBackingIndices `json:"stagnating_backing_indices,omitempty"` StagnatingBackingIndicesCount int `json:"stagnating_backing_indices_count"` @@ -101,3 +101,5 @@ func NewDataStreamLifecycleDetails() *DataStreamLifecycleDetails { return r } + +// false diff --git a/typedapi/types/datastreamlifecycledownsampling.go b/typedapi/types/datastreamlifecycledownsampling.go index dccc42e5c9..614a35f6f2 100644 --- a/typedapi/types/datastreamlifecycledownsampling.go +++ b/typedapi/types/datastreamlifecycledownsampling.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // DataStreamLifecycleDownsampling type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/DataStreamLifecycleDownsampling.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/DataStreamLifecycleDownsampling.ts#L22-L27 type DataStreamLifecycleDownsampling struct { // Rounds The list of downsampling rounds to execute as part of this downsampling // configuration @@ -35,3 +35,13 @@ func NewDataStreamLifecycleDownsampling() *DataStreamLifecycleDownsampling { return r } + +// true + +type DataStreamLifecycleDownsamplingVariant interface { + DataStreamLifecycleDownsamplingCaster() *DataStreamLifecycleDownsampling +} + +func (s *DataStreamLifecycleDownsampling) DataStreamLifecycleDownsamplingCaster() *DataStreamLifecycleDownsampling { + return s +} diff --git a/typedapi/types/datastreamlifecycleexplain.go b/typedapi/types/datastreamlifecycleexplain.go index 7d2f559d0f..09debb23b6 100644 --- a/typedapi/types/datastreamlifecycleexplain.go +++ b/typedapi/types/datastreamlifecycleexplain.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataStreamLifecycleExplain type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L31-L41 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L31-L41 type DataStreamLifecycleExplain struct { Error *string `json:"error,omitempty"` GenerationTime Duration `json:"generation_time,omitempty"` @@ -131,3 +131,5 @@ func NewDataStreamLifecycleExplain() *DataStreamLifecycleExplain { return r } + +// false diff --git a/typedapi/types/datastreamlifecycleindicator.go b/typedapi/types/datastreamlifecycleindicator.go index af817fd668..1df41e107e 100644 --- a/typedapi/types/datastreamlifecycleindicator.go +++ b/typedapi/types/datastreamlifecycleindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // DataStreamLifecycleIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L147-L151 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L148-L152 type DataStreamLifecycleIndicator struct { Details *DataStreamLifecycleDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` @@ -100,3 +100,5 @@ func NewDataStreamLifecycleIndicator() *DataStreamLifecycleIndicator { return r } + +// false diff --git a/typedapi/types/datastreamlifecyclerolloverconditions.go b/typedapi/types/datastreamlifecyclerolloverconditions.go index 0e539f6adc..f7fe2711fe 100644 --- a/typedapi/types/datastreamlifecyclerolloverconditions.go +++ b/typedapi/types/datastreamlifecyclerolloverconditions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataStreamLifecycleRolloverConditions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/DataStreamLifecycle.ts#L60-L72 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/DataStreamLifecycle.ts#L60-L72 type DataStreamLifecycleRolloverConditions struct { MaxAge *string `json:"max_age,omitempty"` MaxDocs *int64 `json:"max_docs,omitempty"` @@ -168,3 +168,13 @@ func NewDataStreamLifecycleRolloverConditions() *DataStreamLifecycleRolloverCond return r } + +// true + +type DataStreamLifecycleRolloverConditionsVariant interface { + DataStreamLifecycleRolloverConditionsCaster() *DataStreamLifecycleRolloverConditions +} + +func (s *DataStreamLifecycleRolloverConditions) DataStreamLifecycleRolloverConditionsCaster() *DataStreamLifecycleRolloverConditions { + return s +} diff --git a/typedapi/types/datastreamlifecyclewithrollover.go b/typedapi/types/datastreamlifecyclewithrollover.go index ca2e30ec3c..19dde04783 100644 --- a/typedapi/types/datastreamlifecyclewithrollover.go +++ b/typedapi/types/datastreamlifecyclewithrollover.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataStreamLifecycleWithRollover type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/DataStreamLifecycle.ts#L47-L58 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/DataStreamLifecycle.ts#L47-L58 type DataStreamLifecycleWithRollover struct { // DataRetention If defined, every document added to this data stream will be stored at least // for this time frame. @@ -108,3 +108,13 @@ func NewDataStreamLifecycleWithRollover() *DataStreamLifecycleWithRollover { return r } + +// true + +type DataStreamLifecycleWithRolloverVariant interface { + DataStreamLifecycleWithRolloverCaster() *DataStreamLifecycleWithRollover +} + +func (s *DataStreamLifecycleWithRollover) DataStreamLifecycleWithRolloverCaster() *DataStreamLifecycleWithRollover { + return s +} diff --git a/typedapi/types/datastreamnames.go b/typedapi/types/datastreamnames.go index 8d0b2ea420..7d674bbfbf 100644 --- a/typedapi/types/datastreamnames.go +++ b/typedapi/types/datastreamnames.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // DataStreamNames type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L95-L95 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L88-L88 type DataStreamNames []string diff --git a/typedapi/types/datastreams.go b/typedapi/types/datastreams.go index 1a2d747935..f829234061 100644 --- a/typedapi/types/datastreams.go +++ b/typedapi/types/datastreams.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataStreams type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L79-L82 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L78-L81 type DataStreams struct { Available bool `json:"available"` DataStreams int64 `json:"data_streams"` @@ -123,3 +123,5 @@ func NewDataStreams() *DataStreams { return r } + +// false diff --git a/typedapi/types/datastreamsstatsitem.go b/typedapi/types/datastreamsstatsitem.go index d9c7ddc48e..40dde539b3 100644 --- a/typedapi/types/datastreamsstatsitem.go +++ b/typedapi/types/datastreamsstatsitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataStreamsStatsItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L45-L65 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L45-L65 type DataStreamsStatsItem struct { // BackingIndices Current number of backing indices for the data stream. BackingIndices int `json:"backing_indices"` @@ -124,3 +124,5 @@ func NewDataStreamsStatsItem() *DataStreamsStatsItem { return r } + +// false diff --git a/typedapi/types/rankevalmetricratingtreshold.go b/typedapi/types/datastreamstats.go similarity index 52% rename from typedapi/types/rankevalmetricratingtreshold.go rename to typedapi/types/datastreamstats.go index 406c6a10a6..fe15a47a57 100644 --- a/typedapi/types/rankevalmetricratingtreshold.go +++ b/typedapi/types/datastreamstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,19 +29,20 @@ import ( "strconv" ) -// RankEvalMetricRatingTreshold type. +// DataStreamStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/rank_eval/types.ts#L34-L40 -type RankEvalMetricRatingTreshold struct { - // K Sets the maximum number of documents retrieved per query. This value will act - // in place of the usual size parameter in the query. - K *int `json:"k,omitempty"` - // RelevantRatingThreshold Sets the rating threshold above which documents are considered to be - // "relevant". - RelevantRatingThreshold *int `json:"relevant_rating_threshold,omitempty"` +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_data_lifecycle_stats/IndicesGetDataLifecycleStatsResponse.ts#L46-L59 +type DataStreamStats struct { + // BackingIndicesInError The count of the backing indices for the data stream. + BackingIndicesInError int `json:"backing_indices_in_error"` + // BackingIndicesInTotal The count of the backing indices for the data stream that have encountered an + // error. + BackingIndicesInTotal int `json:"backing_indices_in_total"` + // Name The name of the data stream. + Name string `json:"name"` } -func (s *RankEvalMetricRatingTreshold) UnmarshalJSON(data []byte) error { +func (s *DataStreamStats) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -56,7 +57,7 @@ func (s *RankEvalMetricRatingTreshold) UnmarshalJSON(data []byte) error { switch t { - case "k": + case "backing_indices_in_error": var tmp any dec.Decode(&tmp) @@ -64,15 +65,15 @@ func (s *RankEvalMetricRatingTreshold) UnmarshalJSON(data []byte) error { case string: value, err := strconv.Atoi(v) if err != nil { - return fmt.Errorf("%s | %w", "K", err) + return fmt.Errorf("%s | %w", "BackingIndicesInError", err) } - s.K = &value + s.BackingIndicesInError = value case float64: f := int(v) - s.K = &f + s.BackingIndicesInError = f } - case "relevant_rating_threshold": + case "backing_indices_in_total": var tmp any dec.Decode(&tmp) @@ -80,12 +81,17 @@ func (s *RankEvalMetricRatingTreshold) UnmarshalJSON(data []byte) error { case string: value, err := strconv.Atoi(v) if err != nil { - return fmt.Errorf("%s | %w", "RelevantRatingThreshold", err) + return fmt.Errorf("%s | %w", "BackingIndicesInTotal", err) } - s.RelevantRatingThreshold = &value + s.BackingIndicesInTotal = value case float64: f := int(v) - s.RelevantRatingThreshold = &f + s.BackingIndicesInTotal = f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) } } @@ -93,9 +99,11 @@ func (s *RankEvalMetricRatingTreshold) UnmarshalJSON(data []byte) error { return nil } -// NewRankEvalMetricRatingTreshold returns a RankEvalMetricRatingTreshold. -func NewRankEvalMetricRatingTreshold() *RankEvalMetricRatingTreshold { - r := &RankEvalMetricRatingTreshold{} +// NewDataStreamStats returns a DataStreamStats. +func NewDataStreamStats() *DataStreamStats { + r := &DataStreamStats{} return r } + +// false diff --git a/typedapi/types/datastreamtimestamp.go b/typedapi/types/datastreamtimestamp.go index 6b10e55af8..88adf88698 100644 --- a/typedapi/types/datastreamtimestamp.go +++ b/typedapi/types/datastreamtimestamp.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataStreamTimestamp type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/TypeMapping.ts#L59-L61 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/TypeMapping.ts#L59-L61 type DataStreamTimestamp struct { Enabled bool `json:"enabled"` } @@ -76,3 +76,13 @@ func NewDataStreamTimestamp() *DataStreamTimestamp { return r } + +// true + +type DataStreamTimestampVariant interface { + DataStreamTimestampCaster() *DataStreamTimestamp +} + +func (s *DataStreamTimestamp) DataStreamTimestampCaster() *DataStreamTimestamp { + return s +} diff --git a/typedapi/types/datastreamtimestampfield.go b/typedapi/types/datastreamtimestampfield.go index 11050abd0f..122f817af0 100644 --- a/typedapi/types/datastreamtimestampfield.go +++ b/typedapi/types/datastreamtimestampfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // DataStreamTimestampField type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/DataStream.ts#L129-L134 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/DataStream.ts#L129-L134 type DataStreamTimestampField struct { // Name Name of the timestamp field for the data stream, which must be `@timestamp`. // The `@timestamp` field must be included in every document indexed to the data @@ -69,3 +69,5 @@ func NewDataStreamTimestampField() *DataStreamTimestampField { return r } + +// false diff --git a/typedapi/types/datastreamvisibility.go b/typedapi/types/datastreamvisibility.go index 07dfeb385f..54027f6cc9 100644 --- a/typedapi/types/datastreamvisibility.go +++ b/typedapi/types/datastreamvisibility.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataStreamVisibility type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/DataStream.ts#L159-L162 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/DataStream.ts#L159-L162 type DataStreamVisibility struct { AllowCustomRouting *bool `json:"allow_custom_routing,omitempty"` Hidden *bool `json:"hidden,omitempty"` @@ -91,3 +91,13 @@ func NewDataStreamVisibility() *DataStreamVisibility { return r } + +// true + +type DataStreamVisibilityVariant interface { + DataStreamVisibilityCaster() *DataStreamVisibility +} + +func (s *DataStreamVisibility) DataStreamVisibilityCaster() *DataStreamVisibility { + return s +} diff --git a/typedapi/types/datastreamwithlifecycle.go b/typedapi/types/datastreamwithlifecycle.go index b610543c1f..dd8611aefc 100644 --- a/typedapi/types/datastreamwithlifecycle.go +++ b/typedapi/types/datastreamwithlifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // DataStreamWithLifecycle type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L27-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L27-L30 type DataStreamWithLifecycle struct { Lifecycle *DataStreamLifecycleWithRollover `json:"lifecycle,omitempty"` Name string `json:"name"` @@ -72,3 +72,5 @@ func NewDataStreamWithLifecycle() *DataStreamWithLifecycle { return r } + +// false diff --git a/typedapi/types/datatierphasestatistics.go b/typedapi/types/datatierphasestatistics.go index 6e2cdbd429..a1d31360f7 100644 --- a/typedapi/types/datatierphasestatistics.go +++ b/typedapi/types/datatierphasestatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataTierPhaseStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L84-L95 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L83-L94 type DataTierPhaseStatistics struct { DocCount int64 `json:"doc_count"` IndexCount int64 `json:"index_count"` @@ -221,3 +221,5 @@ func NewDataTierPhaseStatistics() *DataTierPhaseStatistics { return r } + +// false diff --git a/typedapi/types/datatiers.go b/typedapi/types/datatiers.go index 5e7fdcda74..5d107707aa 100644 --- a/typedapi/types/datatiers.go +++ b/typedapi/types/datatiers.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DataTiers type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L337-L347 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L349-L359 type DataTiers struct { Available bool `json:"available"` DataCold DataTierPhaseStatistics `json:"data_cold"` @@ -121,3 +121,5 @@ func NewDataTiers() *DataTiers { return r } + +// false diff --git a/typedapi/types/datedecayfunction.go b/typedapi/types/datedecayfunction.go index a5e4a5cc9e..1d2dd1f432 100644 --- a/typedapi/types/datedecayfunction.go +++ b/typedapi/types/datedecayfunction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,7 +29,7 @@ import ( // DateDecayFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L209-L209 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L209-L209 type DateDecayFunction struct { DecayFunctionBaseDateMathDuration map[string]DecayPlacementDateMathDuration `json:"-"` // MultiValueMode Determines how the distance is calculated when a field used for computing the @@ -69,8 +69,18 @@ func (s DateDecayFunction) MarshalJSON() ([]byte, error) { // NewDateDecayFunction returns a DateDecayFunction. func NewDateDecayFunction() *DateDecayFunction { r := &DateDecayFunction{ - DecayFunctionBaseDateMathDuration: make(map[string]DecayPlacementDateMathDuration, 0), + DecayFunctionBaseDateMathDuration: make(map[string]DecayPlacementDateMathDuration), } return r } + +// true + +type DateDecayFunctionVariant interface { + DateDecayFunctionCaster() *DateDecayFunction +} + +func (s *DateDecayFunction) DateDecayFunctionCaster() *DateDecayFunction { + return s +} diff --git a/typedapi/types/datedistancefeaturequery.go b/typedapi/types/datedistancefeaturequery.go index 7619c32977..28c19d414d 100644 --- a/typedapi/types/datedistancefeaturequery.go +++ b/typedapi/types/datedistancefeaturequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DateDistanceFeatureQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L71-L74 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L71-L74 type DateDistanceFeatureQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -131,3 +131,13 @@ func NewDateDistanceFeatureQuery() *DateDistanceFeatureQuery { return r } + +// true + +type DateDistanceFeatureQueryVariant interface { + DateDistanceFeatureQueryCaster() *DateDistanceFeatureQuery +} + +func (s *DateDistanceFeatureQuery) DateDistanceFeatureQueryCaster() *DateDistanceFeatureQuery { + return s +} diff --git a/typedapi/types/datehistogramaggregate.go b/typedapi/types/datehistogramaggregate.go index 3955d85a60..5becf8b318 100644 --- a/typedapi/types/datehistogramaggregate.go +++ b/typedapi/types/datehistogramaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // DateHistogramAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L383-L386 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L383-L386 type DateHistogramAggregate struct { Buckets BucketsDateHistogramBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewDateHistogramAggregate() *DateHistogramAggregate { return r } + +// false diff --git a/typedapi/types/datehistogramaggregation.go b/typedapi/types/datehistogramaggregation.go index 3bb0376ee6..3e6c4fa97b 100644 --- a/typedapi/types/datehistogramaggregation.go +++ b/typedapi/types/datehistogramaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // DateHistogramAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L202-L260 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L202-L260 type DateHistogramAggregation struct { // CalendarInterval Calendar-aware interval. // Can be specified using the unit name, such as `month`, or as a single unit @@ -220,8 +220,18 @@ func (s *DateHistogramAggregation) UnmarshalJSON(data []byte) error { // NewDateHistogramAggregation returns a DateHistogramAggregation. func NewDateHistogramAggregation() *DateHistogramAggregation { r := &DateHistogramAggregation{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type DateHistogramAggregationVariant interface { + DateHistogramAggregationCaster() *DateHistogramAggregation +} + +func (s *DateHistogramAggregation) DateHistogramAggregationCaster() *DateHistogramAggregation { + return s +} diff --git a/typedapi/types/datehistogrambucket.go b/typedapi/types/datehistogrambucket.go index 3da282e079..064fe61504 100644 --- a/typedapi/types/datehistogrambucket.go +++ b/typedapi/types/datehistogrambucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // DateHistogramBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L388-L391 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L388-L391 type DateHistogramBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -644,8 +644,10 @@ func (s DateHistogramBucket) MarshalJSON() ([]byte, error) { // NewDateHistogramBucket returns a DateHistogramBucket. func NewDateHistogramBucket() *DateHistogramBucket { r := &DateHistogramBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/datehistogramgrouping.go b/typedapi/types/datehistogramgrouping.go index 162c7d45c2..37acb04621 100644 --- a/typedapi/types/datehistogramgrouping.go +++ b/typedapi/types/datehistogramgrouping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DateHistogramGrouping type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/_types/Groupings.ts#L42-L73 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/_types/Groupings.ts#L42-L73 type DateHistogramGrouping struct { // CalendarInterval The interval of time buckets to be generated when rolling up. CalendarInterval Duration `json:"calendar_interval,omitempty"` @@ -124,3 +124,13 @@ func NewDateHistogramGrouping() *DateHistogramGrouping { return r } + +// true + +type DateHistogramGroupingVariant interface { + DateHistogramGroupingCaster() *DateHistogramGrouping +} + +func (s *DateHistogramGrouping) DateHistogramGroupingCaster() *DateHistogramGrouping { + return s +} diff --git a/typedapi/types/dateindexnameprocessor.go b/typedapi/types/dateindexnameprocessor.go index d928bd9ff6..2b71f63049 100644 --- a/typedapi/types/dateindexnameprocessor.go +++ b/typedapi/types/dateindexnameprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,13 +31,13 @@ import ( // DateIndexNameProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L689-L727 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L730-L768 type DateIndexNameProcessor struct { // DateFormats An array of the expected date formats for parsing dates / timestamps in the // document being preprocessed. // Can be a java time pattern or one of the following formats: ISO8601, UNIX, // UNIX_MS, or TAI64N. - DateFormats []string `json:"date_formats"` + DateFormats []string `json:"date_formats,omitempty"` // DateRounding How to round the date when formatting the date into the index name. Valid // values are: // `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and @@ -50,7 +50,7 @@ type DateIndexNameProcessor struct { // Field The field to get the date or timestamp from. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IndexNameFormat The format to be used when printing the parsed date into the index name. @@ -123,16 +123,9 @@ func (s *DateIndexNameProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -224,3 +217,13 @@ func NewDateIndexNameProcessor() *DateIndexNameProcessor { return r } + +// true + +type DateIndexNameProcessorVariant interface { + DateIndexNameProcessorCaster() *DateIndexNameProcessor +} + +func (s *DateIndexNameProcessor) DateIndexNameProcessorCaster() *DateIndexNameProcessor { + return s +} diff --git a/typedapi/types/datenanosproperty.go b/typedapi/types/datenanosproperty.go index 74ef82e474..288f0408b1 100644 --- a/typedapi/types/datenanosproperty.go +++ b/typedapi/types/datenanosproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,13 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // DateNanosProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L78-L86 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L80-L90 type DateNanosProperty struct { Boost *Float64 `json:"boost,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -45,12 +47,15 @@ type DateNanosProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue DateTime `json:"null_value,omitempty"` - PrecisionStep *int `json:"precision_step,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue DateTime `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + PrecisionStep *int `json:"precision_step,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { @@ -138,301 +143,313 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -507,6 +524,11 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "NullValue", err) } + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + case "precision_step": var tmp any @@ -542,306 +564,323 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + case "store": var tmp any dec.Decode(&tmp) @@ -856,6 +895,11 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -870,21 +914,24 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { func (s DateNanosProperty) MarshalJSON() ([]byte, error) { type innerDateNanosProperty DateNanosProperty tmp := innerDateNanosProperty{ - Boost: s.Boost, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - Format: s.Format, - IgnoreAbove: s.IgnoreAbove, - IgnoreMalformed: s.IgnoreMalformed, - Index: s.Index, - Meta: s.Meta, - NullValue: s.NullValue, - PrecisionStep: s.PrecisionStep, - Properties: s.Properties, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + Format: s.Format, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + PrecisionStep: s.PrecisionStep, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "date_nanos" @@ -895,10 +942,20 @@ func (s DateNanosProperty) MarshalJSON() ([]byte, error) { // NewDateNanosProperty returns a DateNanosProperty. func NewDateNanosProperty() *DateNanosProperty { r := &DateNanosProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type DateNanosPropertyVariant interface { + DateNanosPropertyCaster() *DateNanosProperty +} + +func (s *DateNanosProperty) DateNanosPropertyCaster() *DateNanosProperty { + return s +} diff --git a/typedapi/types/dateprocessor.go b/typedapi/types/dateprocessor.go index e7b59b8c87..3aa57a1ef7 100644 --- a/typedapi/types/dateprocessor.go +++ b/typedapi/types/dateprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DateProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L729-L762 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L770-L803 type DateProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -43,7 +43,7 @@ type DateProcessor struct { // UNIX_MS, or TAI64N. Formats []string `json:"formats"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // Locale The locale to use when parsing the date, relevant when parsing month names or @@ -103,16 +103,9 @@ func (s *DateProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -197,3 +190,13 @@ func NewDateProcessor() *DateProcessor { return r } + +// true + +type DateProcessorVariant interface { + DateProcessorCaster() *DateProcessor +} + +func (s *DateProcessor) DateProcessorCaster() *DateProcessor { + return s +} diff --git a/typedapi/types/dateproperty.go b/typedapi/types/dateproperty.go index 18dcc0c3d9..4d597f1dc8 100644 --- a/typedapi/types/dateproperty.go +++ b/typedapi/types/dateproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,13 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // DateProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L66-L76 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L66-L78 type DateProperty struct { Boost *Float64 `json:"boost,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -47,12 +49,15 @@ type DateProperty struct { Index *bool `json:"index,omitempty"` Locale *string `json:"locale,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue DateTime `json:"null_value,omitempty"` - PrecisionStep *int `json:"precision_step,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue DateTime `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + PrecisionStep *int `json:"precision_step,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *DateProperty) UnmarshalJSON(data []byte) error { @@ -145,301 +150,313 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -526,6 +543,11 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "NullValue", err) } + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + case "precision_step": var tmp any @@ -561,306 +583,323 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + case "store": var tmp any dec.Decode(&tmp) @@ -875,6 +914,11 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -889,23 +933,26 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { func (s DateProperty) MarshalJSON() ([]byte, error) { type innerDateProperty DateProperty tmp := innerDateProperty{ - Boost: s.Boost, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fielddata: s.Fielddata, - Fields: s.Fields, - Format: s.Format, - IgnoreAbove: s.IgnoreAbove, - IgnoreMalformed: s.IgnoreMalformed, - Index: s.Index, - Locale: s.Locale, - Meta: s.Meta, - NullValue: s.NullValue, - PrecisionStep: s.PrecisionStep, - Properties: s.Properties, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fielddata: s.Fielddata, + Fields: s.Fields, + Format: s.Format, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Locale: s.Locale, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + PrecisionStep: s.PrecisionStep, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "date" @@ -916,10 +963,20 @@ func (s DateProperty) MarshalJSON() ([]byte, error) { // NewDateProperty returns a DateProperty. func NewDateProperty() *DateProperty { r := &DateProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type DatePropertyVariant interface { + DatePropertyCaster() *DateProperty +} + +func (s *DateProperty) DatePropertyCaster() *DateProperty { + return s +} diff --git a/typedapi/types/daterangeaggregate.go b/typedapi/types/daterangeaggregate.go index 128fa27d9d..e5f615a27f 100644 --- a/typedapi/types/daterangeaggregate.go +++ b/typedapi/types/daterangeaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // DateRangeAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L609-L615 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L609-L615 type DateRangeAggregate struct { Buckets BucketsRangeBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewDateRangeAggregate() *DateRangeAggregate { return r } + +// false diff --git a/typedapi/types/daterangeaggregation.go b/typedapi/types/daterangeaggregation.go index 3c74365d19..049197b87b 100644 --- a/typedapi/types/daterangeaggregation.go +++ b/typedapi/types/daterangeaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DateRangeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L281-L307 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L281-L307 type DateRangeAggregation struct { // Field The date field whose values are use to build ranges. Field *string `json:"field,omitempty"` @@ -121,3 +121,13 @@ func NewDateRangeAggregation() *DateRangeAggregation { return r } + +// true + +type DateRangeAggregationVariant interface { + DateRangeAggregationCaster() *DateRangeAggregation +} + +func (s *DateRangeAggregation) DateRangeAggregationCaster() *DateRangeAggregation { + return s +} diff --git a/typedapi/types/daterangeexpression.go b/typedapi/types/daterangeexpression.go index 23ec47e0b3..c15ac3d267 100644 --- a/typedapi/types/daterangeexpression.go +++ b/typedapi/types/daterangeexpression.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DateRangeExpression type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L318-L331 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L318-L331 type DateRangeExpression struct { // From Start of the range (inclusive). From FieldDateMath `json:"from,omitempty"` @@ -89,3 +89,13 @@ func NewDateRangeExpression() *DateRangeExpression { return r } + +// true + +type DateRangeExpressionVariant interface { + DateRangeExpressionCaster() *DateRangeExpression +} + +func (s *DateRangeExpression) DateRangeExpressionCaster() *DateRangeExpression { + return s +} diff --git a/typedapi/types/daterangeproperty.go b/typedapi/types/daterangeproperty.go index b5699bdc51..0f6f2590d6 100644 --- a/typedapi/types/daterangeproperty.go +++ b/typedapi/types/daterangeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // DateRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/range.ts#L29-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/range.ts#L29-L32 type DateRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -45,10 +46,11 @@ type DateRangeProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { @@ -150,301 +152,313 @@ func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -519,301 +533,313 @@ func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -833,6 +859,11 @@ func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -847,19 +878,20 @@ func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { func (s DateRangeProperty) MarshalJSON() ([]byte, error) { type innerDateRangeProperty DateRangeProperty tmp := innerDateRangeProperty{ - Boost: s.Boost, - Coerce: s.Coerce, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - Format: s.Format, - IgnoreAbove: s.IgnoreAbove, - Index: s.Index, - Meta: s.Meta, - Properties: s.Properties, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + Format: s.Format, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "date_range" @@ -870,10 +902,20 @@ func (s DateRangeProperty) MarshalJSON() ([]byte, error) { // NewDateRangeProperty returns a DateRangeProperty. func NewDateRangeProperty() *DateRangeProperty { r := &DateRangeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type DateRangePropertyVariant interface { + DateRangePropertyCaster() *DateRangeProperty +} + +func (s *DateRangeProperty) DateRangePropertyCaster() *DateRangeProperty { + return s +} diff --git a/typedapi/types/daterangequery.go b/typedapi/types/daterangequery.go index 6263575d97..4331343dd7 100644 --- a/typedapi/types/daterangequery.go +++ b/typedapi/types/daterangequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // DateRangeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L161-L170 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L161-L170 type DateRangeQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -160,3 +160,13 @@ func NewDateRangeQuery() *DateRangeQuery { return r } + +// true + +type DateRangeQueryVariant interface { + DateRangeQueryCaster() *DateRangeQuery +} + +func (s *DateRangeQuery) DateRangeQueryCaster() *DateRangeQuery { + return s +} diff --git a/typedapi/types/datetime.go b/typedapi/types/datetime.go index fde90e6b05..b26c062473 100644 --- a/typedapi/types/datetime.go +++ b/typedapi/types/datetime.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // string // int64 // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Time.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Time.ts#L22-L27 type DateTime any + +type DateTimeVariant interface { + DateTimeCaster() *DateTime +} diff --git a/typedapi/types/decayfunction.go b/typedapi/types/decayfunction.go index 4783be927b..d8a5767656 100644 --- a/typedapi/types/decayfunction.go +++ b/typedapi/types/decayfunction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -27,5 +27,9 @@ package types // NumericDecayFunction // GeoDecayFunction // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L215-L224 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L215-L224 type DecayFunction any + +type DecayFunctionVariant interface { + DecayFunctionCaster() *DecayFunction +} diff --git a/typedapi/types/decayfunctionbasedatemathduration.go b/typedapi/types/decayfunctionbasedatemathduration.go deleted file mode 100644 index ed57aa5241..0000000000 --- a/typedapi/types/decayfunctionbasedatemathduration.go +++ /dev/null @@ -1,76 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "encoding/json" - "fmt" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode" -) - -// DecayFunctionBaseDateMathDuration type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L191-L202 -type DecayFunctionBaseDateMathDuration struct { - DecayFunctionBaseDateMathDuration map[string]DecayPlacementDateMathDuration `json:"-"` - // MultiValueMode Determines how the distance is calculated when a field used for computing the - // decay contains multiple values. - MultiValueMode *multivaluemode.MultiValueMode `json:"multi_value_mode,omitempty"` -} - -// MarhsalJSON overrides marshalling for types with additional properties -func (s DecayFunctionBaseDateMathDuration) MarshalJSON() ([]byte, error) { - type opt DecayFunctionBaseDateMathDuration - // We transform the struct to a map without the embedded additional properties map - tmp := make(map[string]any, 0) - - data, err := json.Marshal(opt(s)) - if err != nil { - return nil, err - } - err = json.Unmarshal(data, &tmp) - if err != nil { - return nil, err - } - - // We inline the additional fields from the underlying map - for key, value := range s.DecayFunctionBaseDateMathDuration { - tmp[fmt.Sprintf("%s", key)] = value - } - delete(tmp, "DecayFunctionBaseDateMathDuration") - - data, err = json.Marshal(tmp) - if err != nil { - return nil, err - } - - return data, nil -} - -// NewDecayFunctionBaseDateMathDuration returns a DecayFunctionBaseDateMathDuration. -func NewDecayFunctionBaseDateMathDuration() *DecayFunctionBaseDateMathDuration { - r := &DecayFunctionBaseDateMathDuration{ - DecayFunctionBaseDateMathDuration: make(map[string]DecayPlacementDateMathDuration, 0), - } - - return r -} diff --git a/typedapi/types/decayfunctionbasedoubledouble.go b/typedapi/types/decayfunctionbasedoubledouble.go deleted file mode 100644 index c071ddc469..0000000000 --- a/typedapi/types/decayfunctionbasedoubledouble.go +++ /dev/null @@ -1,76 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "encoding/json" - "fmt" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode" -) - -// DecayFunctionBasedoubledouble type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L191-L202 -type DecayFunctionBasedoubledouble struct { - DecayFunctionBasedoubledouble map[string]DecayPlacementdoubledouble `json:"-"` - // MultiValueMode Determines how the distance is calculated when a field used for computing the - // decay contains multiple values. - MultiValueMode *multivaluemode.MultiValueMode `json:"multi_value_mode,omitempty"` -} - -// MarhsalJSON overrides marshalling for types with additional properties -func (s DecayFunctionBasedoubledouble) MarshalJSON() ([]byte, error) { - type opt DecayFunctionBasedoubledouble - // We transform the struct to a map without the embedded additional properties map - tmp := make(map[string]any, 0) - - data, err := json.Marshal(opt(s)) - if err != nil { - return nil, err - } - err = json.Unmarshal(data, &tmp) - if err != nil { - return nil, err - } - - // We inline the additional fields from the underlying map - for key, value := range s.DecayFunctionBasedoubledouble { - tmp[fmt.Sprintf("%s", key)] = value - } - delete(tmp, "DecayFunctionBasedoubledouble") - - data, err = json.Marshal(tmp) - if err != nil { - return nil, err - } - - return data, nil -} - -// NewDecayFunctionBasedoubledouble returns a DecayFunctionBasedoubledouble. -func NewDecayFunctionBasedoubledouble() *DecayFunctionBasedoubledouble { - r := &DecayFunctionBasedoubledouble{ - DecayFunctionBasedoubledouble: make(map[string]DecayPlacementdoubledouble, 0), - } - - return r -} diff --git a/typedapi/types/decayfunctionbasegeolocationdistance.go b/typedapi/types/decayfunctionbasegeolocationdistance.go deleted file mode 100644 index ee8f1da8f5..0000000000 --- a/typedapi/types/decayfunctionbasegeolocationdistance.go +++ /dev/null @@ -1,76 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "encoding/json" - "fmt" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode" -) - -// DecayFunctionBaseGeoLocationDistance type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L191-L202 -type DecayFunctionBaseGeoLocationDistance struct { - DecayFunctionBaseGeoLocationDistance map[string]DecayPlacementGeoLocationDistance `json:"-"` - // MultiValueMode Determines how the distance is calculated when a field used for computing the - // decay contains multiple values. - MultiValueMode *multivaluemode.MultiValueMode `json:"multi_value_mode,omitempty"` -} - -// MarhsalJSON overrides marshalling for types with additional properties -func (s DecayFunctionBaseGeoLocationDistance) MarshalJSON() ([]byte, error) { - type opt DecayFunctionBaseGeoLocationDistance - // We transform the struct to a map without the embedded additional properties map - tmp := make(map[string]any, 0) - - data, err := json.Marshal(opt(s)) - if err != nil { - return nil, err - } - err = json.Unmarshal(data, &tmp) - if err != nil { - return nil, err - } - - // We inline the additional fields from the underlying map - for key, value := range s.DecayFunctionBaseGeoLocationDistance { - tmp[fmt.Sprintf("%s", key)] = value - } - delete(tmp, "DecayFunctionBaseGeoLocationDistance") - - data, err = json.Marshal(tmp) - if err != nil { - return nil, err - } - - return data, nil -} - -// NewDecayFunctionBaseGeoLocationDistance returns a DecayFunctionBaseGeoLocationDistance. -func NewDecayFunctionBaseGeoLocationDistance() *DecayFunctionBaseGeoLocationDistance { - r := &DecayFunctionBaseGeoLocationDistance{ - DecayFunctionBaseGeoLocationDistance: make(map[string]DecayPlacementGeoLocationDistance, 0), - } - - return r -} diff --git a/typedapi/types/decayplacement.go b/typedapi/types/decayplacement.go index 73cc71e0f4..12e934afbd 100644 --- a/typedapi/types/decayplacement.go +++ b/typedapi/types/decayplacement.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DecayPlacement type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L170-L189 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L170-L189 type DecayPlacement struct { // Decay Defines how documents are scored at the distance given at scale. Decay *Float64 `json:"decay,omitempty"` @@ -103,3 +103,13 @@ func NewDecayPlacement() *DecayPlacement { return r } + +// true + +type DecayPlacementVariant interface { + DecayPlacementCaster() *DecayPlacement +} + +func (s *DecayPlacement) DecayPlacementCaster() *DecayPlacement { + return s +} diff --git a/typedapi/types/decayplacementdatemathduration.go b/typedapi/types/decayplacementdatemathduration.go index 105bef13f5..f7b7b24ec6 100644 --- a/typedapi/types/decayplacementdatemathduration.go +++ b/typedapi/types/decayplacementdatemathduration.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DecayPlacementDateMathDuration type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L170-L189 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L170-L189 type DecayPlacementDateMathDuration struct { // Decay Defines how documents are scored at the distance given at scale. Decay *Float64 `json:"decay,omitempty"` @@ -103,3 +103,13 @@ func NewDecayPlacementDateMathDuration() *DecayPlacementDateMathDuration { return r } + +// true + +type DecayPlacementDateMathDurationVariant interface { + DecayPlacementDateMathDurationCaster() *DecayPlacementDateMathDuration +} + +func (s *DecayPlacementDateMathDuration) DecayPlacementDateMathDurationCaster() *DecayPlacementDateMathDuration { + return s +} diff --git a/typedapi/types/decayplacementdoubledouble.go b/typedapi/types/decayplacementdoubledouble.go index ecf4418c7a..103d58c6f6 100644 --- a/typedapi/types/decayplacementdoubledouble.go +++ b/typedapi/types/decayplacementdoubledouble.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DecayPlacementdoubledouble type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L170-L189 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L170-L189 type DecayPlacementdoubledouble struct { // Decay Defines how documents are scored at the distance given at scale. Decay *Float64 `json:"decay,omitempty"` @@ -136,3 +136,13 @@ func NewDecayPlacementdoubledouble() *DecayPlacementdoubledouble { return r } + +// true + +type DecayPlacementdoubledoubleVariant interface { + DecayPlacementdoubledoubleCaster() *DecayPlacementdoubledouble +} + +func (s *DecayPlacementdoubledouble) DecayPlacementdoubledoubleCaster() *DecayPlacementdoubledouble { + return s +} diff --git a/typedapi/types/decayplacementgeolocationdistance.go b/typedapi/types/decayplacementgeolocationdistance.go index b957c7b06c..8963dc6ba5 100644 --- a/typedapi/types/decayplacementgeolocationdistance.go +++ b/typedapi/types/decayplacementgeolocationdistance.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DecayPlacementGeoLocationDistance type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L170-L189 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L170-L189 type DecayPlacementGeoLocationDistance struct { // Decay Defines how documents are scored at the distance given at scale. Decay *Float64 `json:"decay,omitempty"` @@ -143,3 +143,13 @@ func NewDecayPlacementGeoLocationDistance() *DecayPlacementGeoLocationDistance { return r } + +// true + +type DecayPlacementGeoLocationDistanceVariant interface { + DecayPlacementGeoLocationDistanceCaster() *DecayPlacementGeoLocationDistance +} + +func (s *DecayPlacementGeoLocationDistance) DecayPlacementGeoLocationDistanceCaster() *DecayPlacementGeoLocationDistance { + return s +} diff --git a/typedapi/types/defaults.go b/typedapi/types/defaults.go index a07a616852..626d32ec18 100644 --- a/typedapi/types/defaults.go +++ b/typedapi/types/defaults.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Defaults type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/info/types.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/info/types.ts#L24-L27 type Defaults struct { AnomalyDetectors AnomalyDetectors `json:"anomaly_detectors"` Datafeeds Datafeeds `json:"datafeeds"` @@ -34,3 +34,5 @@ func NewDefaults() *Defaults { return r } + +// false diff --git a/typedapi/types/definition.go b/typedapi/types/definition.go index f9ecc63198..e982b78fff 100644 --- a/typedapi/types/definition.go +++ b/typedapi/types/definition.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Definition type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model/types.ts#L24-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model/types.ts#L24-L29 type Definition struct { // Preprocessors Collection of preprocessors Preprocessors []Preprocessor `json:"preprocessors,omitempty"` @@ -36,3 +36,13 @@ func NewDefinition() *Definition { return r } + +// true + +type DefinitionVariant interface { + DefinitionCaster() *Definition +} + +func (s *Definition) DefinitionCaster() *Definition { + return s +} diff --git a/typedapi/types/delayeddatacheckconfig.go b/typedapi/types/delayeddatacheckconfig.go index 0093f0a0dc..12227e8755 100644 --- a/typedapi/types/delayeddatacheckconfig.go +++ b/typedapi/types/delayeddatacheckconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DelayedDataCheckConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Datafeed.ts#L121-L132 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Datafeed.ts#L122-L133 type DelayedDataCheckConfig struct { // CheckWindow The window of time that is searched for late data. This window of time ends // with the latest finalized bucket. @@ -89,3 +89,13 @@ func NewDelayedDataCheckConfig() *DelayedDataCheckConfig { return r } + +// true + +type DelayedDataCheckConfigVariant interface { + DelayedDataCheckConfigCaster() *DelayedDataCheckConfig +} + +func (s *DelayedDataCheckConfig) DelayedDataCheckConfigCaster() *DelayedDataCheckConfig { + return s +} diff --git a/typedapi/types/deleteaction.go b/typedapi/types/deleteaction.go index 19920bdfee..58c798d853 100644 --- a/typedapi/types/deleteaction.go +++ b/typedapi/types/deleteaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DeleteAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/_types/Phase.ts#L152-L154 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/_types/Phase.ts#L149-L151 type DeleteAction struct { DeleteSearchableSnapshot *bool `json:"delete_searchable_snapshot,omitempty"` } @@ -76,3 +76,13 @@ func NewDeleteAction() *DeleteAction { return r } + +// true + +type DeleteActionVariant interface { + DeleteActionCaster() *DeleteAction +} + +func (s *DeleteAction) DeleteActionCaster() *DeleteAction { + return s +} diff --git a/typedapi/types/deleteoperation.go b/typedapi/types/deleteoperation.go index c4facdbd24..0e591b3528 100644 --- a/typedapi/types/deleteoperation.go +++ b/typedapi/types/deleteoperation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,15 +33,15 @@ import ( // DeleteOperation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/bulk/types.ts#L134-L134 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/bulk/types.ts#L144-L144 type DeleteOperation struct { // Id_ The document ID. Id_ *string `json:"_id,omitempty"` IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` IfSeqNo *int64 `json:"if_seq_no,omitempty"` - // Index_ Name of the index or index alias to perform the action on. + // Index_ The name of the index or index alias to perform the action on. Index_ *string `json:"_index,omitempty"` - // Routing Custom value used to route operations to a specific shard. + // Routing A custom value used to route operations to a specific shard. Routing *string `json:"routing,omitempty"` Version *int64 `json:"version,omitempty"` VersionType *versiontype.VersionType `json:"version_type,omitempty"` @@ -118,3 +118,13 @@ func NewDeleteOperation() *DeleteOperation { return r } + +// true + +type DeleteOperationVariant interface { + DeleteOperationCaster() *DeleteOperation +} + +func (s *DeleteOperation) DeleteOperationCaster() *DeleteOperation { + return s +} diff --git a/typedapi/types/delimitedpayloadtokenfilter.go b/typedapi/types/delimitedpayloadtokenfilter.go index 1d8995ca72..0d75cfd473 100644 --- a/typedapi/types/delimitedpayloadtokenfilter.go +++ b/typedapi/types/delimitedpayloadtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // DelimitedPayloadTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L67-L71 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L67-L71 type DelimitedPayloadTokenFilter struct { Delimiter *string `json:"delimiter,omitempty"` Encoding *delimitedpayloadencoding.DelimitedPayloadEncoding `json:"encoding,omitempty"` @@ -109,3 +109,13 @@ func NewDelimitedPayloadTokenFilter() *DelimitedPayloadTokenFilter { return r } + +// true + +type DelimitedPayloadTokenFilterVariant interface { + DelimitedPayloadTokenFilterCaster() *DelimitedPayloadTokenFilter +} + +func (s *DelimitedPayloadTokenFilter) DelimitedPayloadTokenFilterCaster() *DelimitedPayloadTokenFilter { + return s +} diff --git a/typedapi/types/densevectorindexoptions.go b/typedapi/types/densevectorindexoptions.go index e5aae82025..e849996f83 100644 --- a/typedapi/types/densevectorindexoptions.go +++ b/typedapi/types/densevectorindexoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // DenseVectorIndexOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/DenseVectorProperty.ts#L129-L162 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/DenseVectorProperty.ts#L129-L162 type DenseVectorIndexOptions struct { // ConfidenceInterval The confidence interval to use when quantizing the vectors. Can be any value // between and including `0.90` and @@ -145,3 +145,13 @@ func NewDenseVectorIndexOptions() *DenseVectorIndexOptions { return r } + +// true + +type DenseVectorIndexOptionsVariant interface { + DenseVectorIndexOptionsCaster() *DenseVectorIndexOptions +} + +func (s *DenseVectorIndexOptions) DenseVectorIndexOptionsCaster() *DenseVectorIndexOptions { + return s +} diff --git a/typedapi/types/densevectorproperty.go b/typedapi/types/densevectorproperty.go index 01b03bf68f..f688b9ade6 100644 --- a/typedapi/types/densevectorproperty.go +++ b/typedapi/types/densevectorproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,11 +31,12 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/densevectorelementtype" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/densevectorsimilarity" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // DenseVectorProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/DenseVectorProperty.ts#L23-L62 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/DenseVectorProperty.ts#L23-L62 type DenseVectorProperty struct { // Dims Number of vector dimensions. Can't exceed `4096`. If `dims` is not specified, // it will be set to the length of @@ -74,8 +75,9 @@ type DenseVectorProperty struct { // `bit` vectors only support `l2_norm` as their similarity metric. // // This parameter can only be specified when `index` is `true`. - Similarity *densevectorsimilarity.DenseVectorSimilarity `json:"similarity,omitempty"` - Type string `json:"type,omitempty"` + Similarity *densevectorsimilarity.DenseVectorSimilarity `json:"similarity,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { @@ -138,301 +140,313 @@ func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -500,301 +514,313 @@ func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -805,6 +831,11 @@ func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Similarity", err) } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -819,17 +850,18 @@ func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { func (s DenseVectorProperty) MarshalJSON() ([]byte, error) { type innerDenseVectorProperty DenseVectorProperty tmp := innerDenseVectorProperty{ - Dims: s.Dims, - Dynamic: s.Dynamic, - ElementType: s.ElementType, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Index: s.Index, - IndexOptions: s.IndexOptions, - Meta: s.Meta, - Properties: s.Properties, - Similarity: s.Similarity, - Type: s.Type, + Dims: s.Dims, + Dynamic: s.Dynamic, + ElementType: s.ElementType, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + IndexOptions: s.IndexOptions, + Meta: s.Meta, + Properties: s.Properties, + Similarity: s.Similarity, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "dense_vector" @@ -840,10 +872,20 @@ func (s DenseVectorProperty) MarshalJSON() ([]byte, error) { // NewDenseVectorProperty returns a DenseVectorProperty. func NewDenseVectorProperty() *DenseVectorProperty { r := &DenseVectorProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type DenseVectorPropertyVariant interface { + DenseVectorPropertyCaster() *DenseVectorProperty +} + +func (s *DenseVectorProperty) DenseVectorPropertyCaster() *DenseVectorProperty { + return s +} diff --git a/typedapi/types/dependency.go b/typedapi/types/dependency.go index 335cf8dc3e..add859591e 100644 --- a/typedapi/types/dependency.go +++ b/typedapi/types/dependency.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Dependency type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L30-L33 type Dependency struct { Field string `json:"field"` Value ScalarValue `json:"value"` @@ -80,3 +80,13 @@ func NewDependency() *Dependency { return r } + +// true + +type DependencyVariant interface { + DependencyCaster() *Dependency +} + +func (s *Dependency) DependencyCaster() *Dependency { + return s +} diff --git a/typedapi/types/deprecation.go b/typedapi/types/deprecation.go index 53c4532173..c712386ee7 100644 --- a/typedapi/types/deprecation.go +++ b/typedapi/types/deprecation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,13 +33,19 @@ import ( // Deprecation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/migration/deprecations/types.ts#L29-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/migration/deprecations/types.ts#L32-L47 type Deprecation struct { - Details string `json:"details"` + // Details Optional details about the deprecation warning. + Details *string `json:"details,omitempty"` // Level The level property describes the significance of the issue. - Level deprecationlevel.DeprecationLevel `json:"level"` - Message string `json:"message"` - Url string `json:"url"` + Level deprecationlevel.DeprecationLevel `json:"level"` + // Message Descriptive information about the deprecation warning. + Message string `json:"message"` + Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` + ResolveDuringRollingUpgrade bool `json:"resolve_during_rolling_upgrade"` + // Url A link to the breaking change documentation, where you can find more + // information about this change. + Url string `json:"url"` } func (s *Deprecation) UnmarshalJSON(data []byte) error { @@ -67,7 +73,7 @@ func (s *Deprecation) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Details = o + s.Details = &o case "level": if err := dec.Decode(&s.Level); err != nil { @@ -86,6 +92,28 @@ func (s *Deprecation) UnmarshalJSON(data []byte) error { } s.Message = o + case "_meta": + if s.Meta_ == nil { + s.Meta_ = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "resolve_during_rolling_upgrade": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ResolveDuringRollingUpgrade", err) + } + s.ResolveDuringRollingUpgrade = value + case bool: + s.ResolveDuringRollingUpgrade = v + } + case "url": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -105,7 +133,11 @@ func (s *Deprecation) UnmarshalJSON(data []byte) error { // NewDeprecation returns a Deprecation. func NewDeprecation() *Deprecation { - r := &Deprecation{} + r := &Deprecation{ + Meta_: make(map[string]json.RawMessage), + } return r } + +// false diff --git a/typedapi/types/deprecationindexing.go b/typedapi/types/deprecationindexing.go index 1217a38546..1cecca04e3 100644 --- a/typedapi/types/deprecationindexing.go +++ b/typedapi/types/deprecationindexing.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DeprecationIndexing type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L144-L146 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L144-L146 type DeprecationIndexing struct { Enabled string `json:"enabled"` } @@ -74,3 +74,5 @@ func NewDeprecationIndexing() *DeprecationIndexing { return r } + +// false diff --git a/typedapi/types/derivativeaggregate.go b/typedapi/types/derivativeaggregate.go index 5137d02845..4f5cee0815 100644 --- a/typedapi/types/derivativeaggregate.go +++ b/typedapi/types/derivativeaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DerivativeAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L241-L248 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L241-L248 type DerivativeAggregate struct { Meta Metadata `json:"meta,omitempty"` NormalizedValue *Float64 `json:"normalized_value,omitempty"` @@ -119,3 +119,5 @@ func NewDerivativeAggregate() *DerivativeAggregate { return r } + +// false diff --git a/typedapi/types/derivativeaggregation.go b/typedapi/types/derivativeaggregation.go index 156b435c88..ace11a3bd3 100644 --- a/typedapi/types/derivativeaggregation.go +++ b/typedapi/types/derivativeaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // DerivativeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L216-L216 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L216-L216 type DerivativeAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -93,3 +93,13 @@ func NewDerivativeAggregation() *DerivativeAggregation { return r } + +// true + +type DerivativeAggregationVariant interface { + DerivativeAggregationCaster() *DerivativeAggregation +} + +func (s *DerivativeAggregation) DerivativeAggregationCaster() *DerivativeAggregation { + return s +} diff --git a/typedapi/types/detailsinfo.go b/typedapi/types/detailsinfo.go new file mode 100644 index 0000000000..e17df40cae --- /dev/null +++ b/typedapi/types/detailsinfo.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DetailsInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L286-L321 +type DetailsInfo struct { + // Blob A description of the blob that was written and read. + Blob BlobDetails `json:"blob"` + // OverwriteElapsed The elapsed time spent overwriting the blob. + // If the blob was not overwritten, this information is omitted. + OverwriteElapsed Duration `json:"overwrite_elapsed,omitempty"` + // OverwriteElapsedNanos The elapsed time spent overwriting the blob, in nanoseconds. + // If the blob was not overwritten, this information is omitted. + OverwriteElapsedNanos *int64 `json:"overwrite_elapsed_nanos,omitempty"` + // WriteElapsed The elapsed time spent writing the blob. + WriteElapsed Duration `json:"write_elapsed"` + // WriteElapsedNanos The elapsed time spent writing the blob, in nanoseconds. + WriteElapsedNanos int64 `json:"write_elapsed_nanos"` + // WriteThrottled The length of time spent waiting for the `max_snapshot_bytes_per_sec` (or + // `indices.recovery.max_bytes_per_sec` if the recovery settings for managed + // services are set) throttle while writing the blob. + WriteThrottled Duration `json:"write_throttled"` + // WriteThrottledNanos The length of time spent waiting for the `max_snapshot_bytes_per_sec` (or + // `indices.recovery.max_bytes_per_sec` if the recovery settings for managed + // services are set) throttle while writing the blob, in nanoseconds. + WriteThrottledNanos int64 `json:"write_throttled_nanos"` + // WriterNode The node which wrote the blob and coordinated the read operations. + WriterNode SnapshotNodeInfo `json:"writer_node"` +} + +func (s *DetailsInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "blob": + if err := dec.Decode(&s.Blob); err != nil { + return fmt.Errorf("%s | %w", "Blob", err) + } + + case "overwrite_elapsed": + if err := dec.Decode(&s.OverwriteElapsed); err != nil { + return fmt.Errorf("%s | %w", "OverwriteElapsed", err) + } + + case "overwrite_elapsed_nanos": + if err := dec.Decode(&s.OverwriteElapsedNanos); err != nil { + return fmt.Errorf("%s | %w", "OverwriteElapsedNanos", err) + } + + case "write_elapsed": + if err := dec.Decode(&s.WriteElapsed); err != nil { + return fmt.Errorf("%s | %w", "WriteElapsed", err) + } + + case "write_elapsed_nanos": + if err := dec.Decode(&s.WriteElapsedNanos); err != nil { + return fmt.Errorf("%s | %w", "WriteElapsedNanos", err) + } + + case "write_throttled": + if err := dec.Decode(&s.WriteThrottled); err != nil { + return fmt.Errorf("%s | %w", "WriteThrottled", err) + } + + case "write_throttled_nanos": + if err := dec.Decode(&s.WriteThrottledNanos); err != nil { + return fmt.Errorf("%s | %w", "WriteThrottledNanos", err) + } + + case "writer_node": + if err := dec.Decode(&s.WriterNode); err != nil { + return fmt.Errorf("%s | %w", "WriterNode", err) + } + + } + } + return nil +} + +// NewDetailsInfo returns a DetailsInfo. +func NewDetailsInfo() *DetailsInfo { + r := &DetailsInfo{} + + return r +} + +// false diff --git a/typedapi/types/detectionrule.go b/typedapi/types/detectionrule.go index f660db256a..55dead9c4f 100644 --- a/typedapi/types/detectionrule.go +++ b/typedapi/types/detectionrule.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // DetectionRule type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Rule.ts#L25-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Rule.ts#L25-L39 type DetectionRule struct { // Actions The set of actions to be triggered when the rule applies. If more than one // action is specified the effects of all actions are combined. @@ -45,8 +45,18 @@ type DetectionRule struct { // NewDetectionRule returns a DetectionRule. func NewDetectionRule() *DetectionRule { r := &DetectionRule{ - Scope: make(map[string]FilterRef, 0), + Scope: make(map[string]FilterRef), } return r } + +// true + +type DetectionRuleVariant interface { + DetectionRuleCaster() *DetectionRule +} + +func (s *DetectionRule) DetectionRuleCaster() *DetectionRule { + return s +} diff --git a/typedapi/types/detector.go b/typedapi/types/detector.go index dfc0a98c66..44f4778532 100644 --- a/typedapi/types/detector.go +++ b/typedapi/types/detector.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // Detector type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Detector.ts#L25-L67 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Detector.ts#L25-L67 type Detector struct { // ByFieldName The field used to split the data. In particular, this property is used for // analyzing the splits with respect to their own history. It is used for @@ -183,3 +183,13 @@ func NewDetector() *Detector { return r } + +// true + +type DetectorVariant interface { + DetectorCaster() *Detector +} + +func (s *Detector) DetectorCaster() *Detector { + return s +} diff --git a/typedapi/types/detectorread.go b/typedapi/types/detectorread.go index 4621329bc6..50c91052bb 100644 --- a/typedapi/types/detectorread.go +++ b/typedapi/types/detectorread.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // DetectorRead type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Detector.ts#L69-L125 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Detector.ts#L69-L125 type DetectorRead struct { // ByFieldName The field used to split the data. // In particular, this property is used for analyzing the splits with respect to @@ -190,3 +190,5 @@ func NewDetectorRead() *DetectorRead { return r } + +// false diff --git a/typedapi/types/detectorupdate.go b/typedapi/types/detectorupdate.go new file mode 100644 index 0000000000..855716e476 --- /dev/null +++ b/typedapi/types/detectorupdate.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DetectorUpdate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Detector.ts#L127-L143 +type DetectorUpdate struct { + // CustomRules An array of custom rule objects, which enable you to customize the way + // detectors operate. + // For example, a rule may dictate to the detector conditions under which + // results should be skipped. + // Kibana refers to custom rules as job rules. + CustomRules []DetectionRule `json:"custom_rules,omitempty"` + // Description A description of the detector. + Description *string `json:"description,omitempty"` + // DetectorIndex A unique identifier for the detector. + // This identifier is based on the order of the detectors in the + // `analysis_config`, starting at zero. + DetectorIndex int `json:"detector_index"` +} + +func (s *DetectorUpdate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "custom_rules": + if err := dec.Decode(&s.CustomRules); err != nil { + return fmt.Errorf("%s | %w", "CustomRules", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "detector_index": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DetectorIndex", err) + } + s.DetectorIndex = value + case float64: + f := int(v) + s.DetectorIndex = f + } + + } + } + return nil +} + +// NewDetectorUpdate returns a DetectorUpdate. +func NewDetectorUpdate() *DetectorUpdate { + r := &DetectorUpdate{} + + return r +} + +// true + +type DetectorUpdateVariant interface { + DetectorUpdateCaster() *DetectorUpdate +} + +func (s *DetectorUpdate) DetectorUpdateCaster() *DetectorUpdate { + return s +} diff --git a/typedapi/types/dfsknnprofile.go b/typedapi/types/dfsknnprofile.go index 86b1b1b776..9aab28b445 100644 --- a/typedapi/types/dfsknnprofile.go +++ b/typedapi/types/dfsknnprofile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DfsKnnProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L181-L186 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L181-L186 type DfsKnnProfile struct { Collector []KnnCollectorResult `json:"collector"` Query []KnnQueryProfileResult `json:"query"` @@ -105,3 +105,5 @@ func NewDfsKnnProfile() *DfsKnnProfile { return r } + +// false diff --git a/typedapi/types/dfsprofile.go b/typedapi/types/dfsprofile.go index 91ec2b80bd..7e0bced998 100644 --- a/typedapi/types/dfsprofile.go +++ b/typedapi/types/dfsprofile.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // DfsProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L154-L157 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L154-L157 type DfsProfile struct { Knn []DfsKnnProfile `json:"knn,omitempty"` Statistics *DfsStatisticsProfile `json:"statistics,omitempty"` @@ -34,3 +34,5 @@ func NewDfsProfile() *DfsProfile { return r } + +// false diff --git a/typedapi/types/dfsstatisticsbreakdown.go b/typedapi/types/dfsstatisticsbreakdown.go index e142fdf533..b3d204fed0 100644 --- a/typedapi/types/dfsstatisticsbreakdown.go +++ b/typedapi/types/dfsstatisticsbreakdown.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DfsStatisticsBreakdown type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L170-L179 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L170-L179 type DfsStatisticsBreakdown struct { CollectionStatistics int64 `json:"collection_statistics"` CollectionStatisticsCount int64 `json:"collection_statistics_count"` @@ -189,3 +189,5 @@ func NewDfsStatisticsBreakdown() *DfsStatisticsBreakdown { return r } + +// false diff --git a/typedapi/types/dfsstatisticsprofile.go b/typedapi/types/dfsstatisticsprofile.go index d7a8964cee..2a22c69ed0 100644 --- a/typedapi/types/dfsstatisticsprofile.go +++ b/typedapi/types/dfsstatisticsprofile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DfsStatisticsProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L159-L167 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L159-L167 type DfsStatisticsProfile struct { Breakdown DfsStatisticsBreakdown `json:"breakdown"` Children []DfsStatisticsProfile `json:"children,omitempty"` @@ -117,8 +117,10 @@ func (s *DfsStatisticsProfile) UnmarshalJSON(data []byte) error { // NewDfsStatisticsProfile returns a DfsStatisticsProfile. func NewDfsStatisticsProfile() *DfsStatisticsProfile { r := &DfsStatisticsProfile{ - Debug: make(map[string]json.RawMessage, 0), + Debug: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/diagnosis.go b/typedapi/types/diagnosis.go index 2dd3bc4f64..c27404405f 100644 --- a/typedapi/types/diagnosis.go +++ b/typedapi/types/diagnosis.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Diagnosis type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L50-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L51-L57 type Diagnosis struct { Action string `json:"action"` AffectedResources DiagnosisAffectedResources `json:"affected_resources"` @@ -119,3 +119,5 @@ func NewDiagnosis() *Diagnosis { return r } + +// false diff --git a/typedapi/types/diagnosisaffectedresources.go b/typedapi/types/diagnosisaffectedresources.go index a5501fabca..ae414504e7 100644 --- a/typedapi/types/diagnosisaffectedresources.go +++ b/typedapi/types/diagnosisaffectedresources.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // DiagnosisAffectedResources type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L58-L64 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L59-L65 type DiagnosisAffectedResources struct { FeatureStates []string `json:"feature_states,omitempty"` Indices []string `json:"indices,omitempty"` @@ -101,3 +101,5 @@ func NewDiagnosisAffectedResources() *DiagnosisAffectedResources { return r } + +// false diff --git a/typedapi/types/dictionarydecompoundertokenfilter.go b/typedapi/types/dictionarydecompoundertokenfilter.go index 28bcd091ea..14b38d3c25 100644 --- a/typedapi/types/dictionarydecompoundertokenfilter.go +++ b/typedapi/types/dictionarydecompoundertokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DictionaryDecompounderTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L53-L55 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L53-L55 type DictionaryDecompounderTokenFilter struct { HyphenationPatternsPath *string `json:"hyphenation_patterns_path,omitempty"` MaxSubwordSize *int `json:"max_subword_size,omitempty"` @@ -191,3 +191,13 @@ func NewDictionaryDecompounderTokenFilter() *DictionaryDecompounderTokenFilter { return r } + +// true + +type DictionaryDecompounderTokenFilterVariant interface { + DictionaryDecompounderTokenFilterCaster() *DictionaryDecompounderTokenFilter +} + +func (s *DictionaryDecompounderTokenFilter) DictionaryDecompounderTokenFilterCaster() *DictionaryDecompounderTokenFilter { + return s +} diff --git a/typedapi/types/directgenerator.go b/typedapi/types/directgenerator.go index 3fb597b385..d2f7e1e81c 100644 --- a/typedapi/types/directgenerator.go +++ b/typedapi/types/directgenerator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // DirectGenerator type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L268-L331 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L268-L331 type DirectGenerator struct { // Field The field to fetch the candidate suggestions from. // Needs to be set globally or per suggestion. @@ -252,3 +252,13 @@ func NewDirectGenerator() *DirectGenerator { return r } + +// true + +type DirectGeneratorVariant interface { + DirectGeneratorCaster() *DirectGenerator +} + +func (s *DirectGenerator) DirectGeneratorCaster() *DirectGenerator { + return s +} diff --git a/typedapi/types/discovery.go b/typedapi/types/discovery.go index caca2c335d..c4d00f0644 100644 --- a/typedapi/types/discovery.go +++ b/typedapi/types/discovery.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Discovery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L201-L219 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L201-L219 type Discovery struct { ClusterApplierStats *ClusterAppliedStats `json:"cluster_applier_stats,omitempty"` // ClusterStateQueue Contains statistics for the cluster state queue of the node. @@ -43,8 +43,10 @@ type Discovery struct { // NewDiscovery returns a Discovery. func NewDiscovery() *Discovery { r := &Discovery{ - ClusterStateUpdate: make(map[string]ClusterStateUpdate, 0), + ClusterStateUpdate: make(map[string]ClusterStateUpdate), } return r } + +// false diff --git a/typedapi/types/discoverynode.go b/typedapi/types/discoverynode.go index ccaa53254f..3aca389835 100644 --- a/typedapi/types/discoverynode.go +++ b/typedapi/types/discoverynode.go @@ -16,82 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// DiscoveryNode type. +// DiscoveryNode type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DiscoveryNode.ts#L24-L30 -type DiscoveryNode struct { - Attributes map[string]string `json:"attributes"` - EphemeralId string `json:"ephemeral_id"` - Id string `json:"id"` - Name string `json:"name"` - TransportAddress string `json:"transport_address"` -} - -func (s *DiscoveryNode) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "attributes": - if s.Attributes == nil { - s.Attributes = make(map[string]string, 0) - } - if err := dec.Decode(&s.Attributes); err != nil { - return fmt.Errorf("%s | %w", "Attributes", err) - } - - case "ephemeral_id": - if err := dec.Decode(&s.EphemeralId); err != nil { - return fmt.Errorf("%s | %w", "EphemeralId", err) - } - - case "id": - if err := dec.Decode(&s.Id); err != nil { - return fmt.Errorf("%s | %w", "Id", err) - } - - case "name": - if err := dec.Decode(&s.Name); err != nil { - return fmt.Errorf("%s | %w", "Name", err) - } - - case "transport_address": - if err := dec.Decode(&s.TransportAddress); err != nil { - return fmt.Errorf("%s | %w", "TransportAddress", err) - } - - } - } - return nil -} - -// NewDiscoveryNode returns a DiscoveryNode. -func NewDiscoveryNode() *DiscoveryNode { - r := &DiscoveryNode{ - Attributes: make(map[string]string, 0), - } - - return r -} +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DiscoveryNode.ts#L25-L25 +type DiscoveryNode map[string]DiscoveryNodeContent diff --git a/typedapi/types/basenode.go b/typedapi/types/discoverynodecompact.go similarity index 57% rename from typedapi/types/basenode.go rename to typedapi/types/discoverynodecompact.go index 779e4a535d..bb8ce50f9f 100644 --- a/typedapi/types/basenode.go +++ b/typedapi/types/discoverynodecompact.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,23 +26,20 @@ import ( "errors" "fmt" "io" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole" ) -// BaseNode type. +// DiscoveryNodeCompact type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_spec_utils/BaseNode.ts#L25-L32 -type BaseNode struct { - Attributes map[string]string `json:"attributes"` - Host string `json:"host"` - Ip string `json:"ip"` - Name string `json:"name"` - Roles []noderole.NodeRole `json:"roles,omitempty"` - TransportAddress string `json:"transport_address"` +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DiscoveryNode.ts#L39-L48 +type DiscoveryNodeCompact struct { + Attributes map[string]string `json:"attributes"` + EphemeralId string `json:"ephemeral_id"` + Id string `json:"id"` + Name string `json:"name"` + TransportAddress string `json:"transport_address"` } -func (s *BaseNode) UnmarshalJSON(data []byte) error { +func (s *DiscoveryNodeCompact) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -65,14 +62,14 @@ func (s *BaseNode) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Attributes", err) } - case "host": - if err := dec.Decode(&s.Host); err != nil { - return fmt.Errorf("%s | %w", "Host", err) + case "ephemeral_id": + if err := dec.Decode(&s.EphemeralId); err != nil { + return fmt.Errorf("%s | %w", "EphemeralId", err) } - case "ip": - if err := dec.Decode(&s.Ip); err != nil { - return fmt.Errorf("%s | %w", "Ip", err) + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) } case "name": @@ -80,11 +77,6 @@ func (s *BaseNode) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Name", err) } - case "roles": - if err := dec.Decode(&s.Roles); err != nil { - return fmt.Errorf("%s | %w", "Roles", err) - } - case "transport_address": if err := dec.Decode(&s.TransportAddress); err != nil { return fmt.Errorf("%s | %w", "TransportAddress", err) @@ -95,11 +87,13 @@ func (s *BaseNode) UnmarshalJSON(data []byte) error { return nil } -// NewBaseNode returns a BaseNode. -func NewBaseNode() *BaseNode { - r := &BaseNode{ - Attributes: make(map[string]string, 0), +// NewDiscoveryNodeCompact returns a DiscoveryNodeCompact. +func NewDiscoveryNodeCompact() *DiscoveryNodeCompact { + r := &DiscoveryNodeCompact{ + Attributes: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/discoverynodecontent.go b/typedapi/types/discoverynodecontent.go new file mode 100644 index 0000000000..33c8de77c9 --- /dev/null +++ b/typedapi/types/discoverynodecontent.go @@ -0,0 +1,153 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DiscoveryNodeContent type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DiscoveryNode.ts#L27-L37 +type DiscoveryNodeContent struct { + Attributes map[string]string `json:"attributes"` + EphemeralId string `json:"ephemeral_id"` + ExternalId string `json:"external_id"` + MaxIndexVersion int `json:"max_index_version"` + MinIndexVersion int `json:"min_index_version"` + Name *string `json:"name,omitempty"` + Roles []string `json:"roles"` + TransportAddress string `json:"transport_address"` + Version string `json:"version"` +} + +func (s *DiscoveryNodeContent) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "ephemeral_id": + if err := dec.Decode(&s.EphemeralId); err != nil { + return fmt.Errorf("%s | %w", "EphemeralId", err) + } + + case "external_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ExternalId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ExternalId = o + + case "max_index_version": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxIndexVersion", err) + } + s.MaxIndexVersion = value + case float64: + f := int(v) + s.MaxIndexVersion = f + } + + case "min_index_version": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinIndexVersion", err) + } + s.MinIndexVersion = value + case float64: + f := int(v) + s.MinIndexVersion = f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewDiscoveryNodeContent returns a DiscoveryNodeContent. +func NewDiscoveryNodeContent() *DiscoveryNodeContent { + r := &DiscoveryNodeContent{ + Attributes: make(map[string]string), + } + + return r +} + +// false diff --git a/typedapi/types/diskindicator.go b/typedapi/types/diskindicator.go index 5540b67cbf..db9a0a95b9 100644 --- a/typedapi/types/diskindicator.go +++ b/typedapi/types/diskindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // DiskIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L123-L127 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L124-L128 type DiskIndicator struct { Details *DiskIndicatorDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` @@ -100,3 +100,5 @@ func NewDiskIndicator() *DiskIndicator { return r } + +// false diff --git a/typedapi/types/diskindicatordetails.go b/typedapi/types/diskindicatordetails.go index d74d4aa8a8..00817050df 100644 --- a/typedapi/types/diskindicatordetails.go +++ b/typedapi/types/diskindicatordetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DiskIndicatorDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L128-L134 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L129-L135 type DiskIndicatorDetails struct { IndicesWithReadonlyBlock int64 `json:"indices_with_readonly_block"` NodesOverFloodStageWatermark int64 `json:"nodes_over_flood_stage_watermark"` @@ -141,3 +141,5 @@ func NewDiskIndicatorDetails() *DiskIndicatorDetails { return r } + +// false diff --git a/typedapi/types/diskusage.go b/typedapi/types/diskusage.go index b30b3d9956..0e961ff4f1 100644 --- a/typedapi/types/diskusage.go +++ b/typedapi/types/diskusage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DiskUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/allocation_explain/types.ts#L63-L70 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/allocation_explain/types.ts#L63-L70 type DiskUsage struct { FreeBytes int64 `json:"free_bytes"` FreeDiskPercent Float64 `json:"free_disk_percent"` @@ -156,3 +156,5 @@ func NewDiskUsage() *DiskUsage { return r } + +// false diff --git a/typedapi/types/dismaxquery.go b/typedapi/types/dismaxquery.go index d9310ba4f8..581443db1c 100644 --- a/typedapi/types/dismaxquery.go +++ b/typedapi/types/dismaxquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DisMaxQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L88-L103 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L88-L103 type DisMaxQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -125,3 +125,13 @@ func NewDisMaxQuery() *DisMaxQuery { return r } + +// true + +type DisMaxQueryVariant interface { + DisMaxQueryCaster() *DisMaxQuery +} + +func (s *DisMaxQuery) DisMaxQueryCaster() *DisMaxQuery { + return s +} diff --git a/typedapi/types/dissectprocessor.go b/typedapi/types/dissectprocessor.go index 07460ce0d0..74b2416358 100644 --- a/typedapi/types/dissectprocessor.go +++ b/typedapi/types/dissectprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DissectProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L764-L783 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L805-L824 type DissectProcessor struct { // AppendSeparator The character(s) that separate the appended fields. AppendSeparator *string `json:"append_separator,omitempty"` @@ -41,7 +41,7 @@ type DissectProcessor struct { // Field The field to dissect. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly @@ -101,16 +101,9 @@ func (s *DissectProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -180,3 +173,13 @@ func NewDissectProcessor() *DissectProcessor { return r } + +// true + +type DissectProcessorVariant interface { + DissectProcessorCaster() *DissectProcessor +} + +func (s *DissectProcessor) DissectProcessorCaster() *DissectProcessor { + return s +} diff --git a/typedapi/types/distancefeaturequery.go b/typedapi/types/distancefeaturequery.go index 4cefbbcf28..f06f816f0f 100644 --- a/typedapi/types/distancefeaturequery.go +++ b/typedapi/types/distancefeaturequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,5 +26,9 @@ package types // GeoDistanceFeatureQuery // DateDistanceFeatureQuery // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L76-L85 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L76-L85 type DistanceFeatureQuery any + +type DistanceFeatureQueryVariant interface { + DistanceFeatureQueryCaster() *DistanceFeatureQuery +} diff --git a/typedapi/types/distancefeaturequerybasedatemathduration.go b/typedapi/types/distancefeaturequerybasedatemathduration.go deleted file mode 100644 index a47402cdd8..0000000000 --- a/typedapi/types/distancefeaturequerybasedatemathduration.go +++ /dev/null @@ -1,133 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// DistanceFeatureQueryBaseDateMathDuration type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L39-L59 -type DistanceFeatureQueryBaseDateMathDuration struct { - // Boost Floating point number used to decrease or increase the relevance scores of - // the query. - // Boost values are relative to the default value of 1.0. - // A boost value between 0 and 1.0 decreases the relevance score. - // A value greater than 1.0 increases the relevance score. - Boost *float32 `json:"boost,omitempty"` - // Field Name of the field used to calculate distances. This field must meet the - // following criteria: - // be a `date`, `date_nanos` or `geo_point` field; - // have an `index` mapping parameter value of `true`, which is the default; - // have an `doc_values` mapping parameter value of `true`, which is the default. - Field string `json:"field"` - // Origin Date or point of origin used to calculate distances. - // If the `field` value is a `date` or `date_nanos` field, the `origin` value - // must be a date. - // Date Math, such as `now-1h`, is supported. - // If the field value is a `geo_point` field, the `origin` value must be a - // geopoint. - Origin string `json:"origin"` - // Pivot Distance from the `origin` at which relevance scores receive half of the - // `boost` value. - // If the `field` value is a `date` or `date_nanos` field, the `pivot` value - // must be a time unit, such as `1h` or `10d`. If the `field` value is a - // `geo_point` field, the `pivot` value must be a distance unit, such as `1km` - // or `12m`. - Pivot Duration `json:"pivot"` - QueryName_ *string `json:"_name,omitempty"` -} - -func (s *DistanceFeatureQueryBaseDateMathDuration) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "boost": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 32) - if err != nil { - return fmt.Errorf("%s | %w", "Boost", err) - } - f := float32(value) - s.Boost = &f - case float64: - f := float32(v) - s.Boost = &f - } - - case "field": - if err := dec.Decode(&s.Field); err != nil { - return fmt.Errorf("%s | %w", "Field", err) - } - - case "origin": - if err := dec.Decode(&s.Origin); err != nil { - return fmt.Errorf("%s | %w", "Origin", err) - } - - case "pivot": - if err := dec.Decode(&s.Pivot); err != nil { - return fmt.Errorf("%s | %w", "Pivot", err) - } - - case "_name": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "QueryName_", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.QueryName_ = &o - - } - } - return nil -} - -// NewDistanceFeatureQueryBaseDateMathDuration returns a DistanceFeatureQueryBaseDateMathDuration. -func NewDistanceFeatureQueryBaseDateMathDuration() *DistanceFeatureQueryBaseDateMathDuration { - r := &DistanceFeatureQueryBaseDateMathDuration{} - - return r -} diff --git a/typedapi/types/distancefeaturequerybasegeolocationdistance.go b/typedapi/types/distancefeaturequerybasegeolocationdistance.go deleted file mode 100644 index 4417b06149..0000000000 --- a/typedapi/types/distancefeaturequerybasegeolocationdistance.go +++ /dev/null @@ -1,173 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// DistanceFeatureQueryBaseGeoLocationDistance type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L39-L59 -type DistanceFeatureQueryBaseGeoLocationDistance struct { - // Boost Floating point number used to decrease or increase the relevance scores of - // the query. - // Boost values are relative to the default value of 1.0. - // A boost value between 0 and 1.0 decreases the relevance score. - // A value greater than 1.0 increases the relevance score. - Boost *float32 `json:"boost,omitempty"` - // Field Name of the field used to calculate distances. This field must meet the - // following criteria: - // be a `date`, `date_nanos` or `geo_point` field; - // have an `index` mapping parameter value of `true`, which is the default; - // have an `doc_values` mapping parameter value of `true`, which is the default. - Field string `json:"field"` - // Origin Date or point of origin used to calculate distances. - // If the `field` value is a `date` or `date_nanos` field, the `origin` value - // must be a date. - // Date Math, such as `now-1h`, is supported. - // If the field value is a `geo_point` field, the `origin` value must be a - // geopoint. - Origin GeoLocation `json:"origin"` - // Pivot Distance from the `origin` at which relevance scores receive half of the - // `boost` value. - // If the `field` value is a `date` or `date_nanos` field, the `pivot` value - // must be a time unit, such as `1h` or `10d`. If the `field` value is a - // `geo_point` field, the `pivot` value must be a distance unit, such as `1km` - // or `12m`. - Pivot string `json:"pivot"` - QueryName_ *string `json:"_name,omitempty"` -} - -func (s *DistanceFeatureQueryBaseGeoLocationDistance) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "boost": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 32) - if err != nil { - return fmt.Errorf("%s | %w", "Boost", err) - } - f := float32(value) - s.Boost = &f - case float64: - f := float32(v) - s.Boost = &f - } - - case "field": - if err := dec.Decode(&s.Field); err != nil { - return fmt.Errorf("%s | %w", "Field", err) - } - - case "origin": - message := json.RawMessage{} - if err := dec.Decode(&message); err != nil { - return fmt.Errorf("%s | %w", "Origin", err) - } - keyDec := json.NewDecoder(bytes.NewReader(message)) - origin_field: - for { - t, err := keyDec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return fmt.Errorf("%s | %w", "Origin", err) - } - - switch t { - - case "lat", "lon": - o := NewLatLonGeoLocation() - localDec := json.NewDecoder(bytes.NewReader(message)) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Origin", err) - } - s.Origin = o - break origin_field - - case "geohash": - o := NewGeoHashLocation() - localDec := json.NewDecoder(bytes.NewReader(message)) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Origin", err) - } - s.Origin = o - break origin_field - - } - } - if s.Origin == nil { - localDec := json.NewDecoder(bytes.NewReader(message)) - if err := localDec.Decode(&s.Origin); err != nil { - return fmt.Errorf("%s | %w", "Origin", err) - } - } - - case "pivot": - if err := dec.Decode(&s.Pivot); err != nil { - return fmt.Errorf("%s | %w", "Pivot", err) - } - - case "_name": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "QueryName_", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.QueryName_ = &o - - } - } - return nil -} - -// NewDistanceFeatureQueryBaseGeoLocationDistance returns a DistanceFeatureQueryBaseGeoLocationDistance. -func NewDistanceFeatureQueryBaseGeoLocationDistance() *DistanceFeatureQueryBaseGeoLocationDistance { - r := &DistanceFeatureQueryBaseGeoLocationDistance{} - - return r -} diff --git a/typedapi/types/diversifiedsampleraggregation.go b/typedapi/types/diversifiedsampleraggregation.go index 1c31db3b65..a92f3b3323 100644 --- a/typedapi/types/diversifiedsampleraggregation.go +++ b/typedapi/types/diversifiedsampleraggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // DiversifiedSamplerAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L333-L357 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L333-L357 type DiversifiedSamplerAggregation struct { // ExecutionHint The type of value used for de-duplication. ExecutionHint *sampleraggregationexecutionhint.SamplerAggregationExecutionHint `json:"execution_hint,omitempty"` @@ -120,3 +120,13 @@ func NewDiversifiedSamplerAggregation() *DiversifiedSamplerAggregation { return r } + +// true + +type DiversifiedSamplerAggregationVariant interface { + DiversifiedSamplerAggregationCaster() *DiversifiedSamplerAggregation +} + +func (s *DiversifiedSamplerAggregation) DiversifiedSamplerAggregationCaster() *DiversifiedSamplerAggregation { + return s +} diff --git a/typedapi/types/docstats.go b/typedapi/types/docstats.go index a31f917809..ed2b1c7e3b 100644 --- a/typedapi/types/docstats.go +++ b/typedapi/types/docstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DocStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L97-L109 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L100-L112 type DocStats struct { // Count Total number of non-deleted documents across all primary shards assigned to // selected nodes. @@ -102,3 +102,5 @@ func NewDocStats() *DocStats { return r } + +// false diff --git a/typedapi/types/document.go b/typedapi/types/document.go index cbf99825f1..69f0213f8a 100644 --- a/typedapi/types/document.go +++ b/typedapi/types/document.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // Document type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/simulate/types.ts#L62-L76 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Simulation.ts#L62-L76 type Document struct { // Id_ Unique identifier for the document. // This ID must be unique within the `_index`. @@ -82,3 +82,13 @@ func NewDocument() *Document { return r } + +// true + +type DocumentVariant interface { + DocumentCaster() *Document +} + +func (s *Document) DocumentCaster() *Document { + return s +} diff --git a/typedapi/types/documentrating.go b/typedapi/types/documentrating.go index 9376fd20fd..4c01e49993 100644 --- a/typedapi/types/documentrating.go +++ b/typedapi/types/documentrating.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DocumentRating type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/rank_eval/types.ts#L119-L126 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/rank_eval/types.ts#L119-L126 type DocumentRating struct { // Id_ The document ID. Id_ string `json:"_id"` @@ -94,3 +94,13 @@ func NewDocumentRating() *DocumentRating { return r } + +// true + +type DocumentRatingVariant interface { + DocumentRatingCaster() *DocumentRating +} + +func (s *DocumentRating) DocumentRatingCaster() *DocumentRating { + return s +} diff --git a/typedapi/types/documentsimulation.go b/typedapi/types/documentsimulation.go index 5283569989..30031df8d2 100644 --- a/typedapi/types/documentsimulation.go +++ b/typedapi/types/documentsimulation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,15 +33,15 @@ import ( // DocumentSimulation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/simulate/types.ts#L78-L108 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Simulation.ts#L78-L108 type DocumentSimulation struct { DocumentSimulation map[string]string `json:"-"` // Id_ Unique identifier for the document. This ID must be unique within the // `_index`. Id_ string `json:"_id"` // Index_ Name of the index containing the document. - Index_ string `json:"_index"` - Ingest_ SimulateIngest `json:"_ingest"` + Index_ string `json:"_index"` + Ingest_ Ingest `json:"_ingest"` // Routing_ Value used to send the document to a specific primary shard. Routing_ *string `json:"_routing,omitempty"` // Source_ JSON body for the document. @@ -160,9 +160,11 @@ func (s DocumentSimulation) MarshalJSON() ([]byte, error) { // NewDocumentSimulation returns a DocumentSimulation. func NewDocumentSimulation() *DocumentSimulation { r := &DocumentSimulation{ - DocumentSimulation: make(map[string]string, 0), - Source_: make(map[string]json.RawMessage, 0), + DocumentSimulation: make(map[string]string), + Source_: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/dotexpanderprocessor.go b/typedapi/types/dotexpanderprocessor.go index ae322f07c2..8fb2c7907d 100644 --- a/typedapi/types/dotexpanderprocessor.go +++ b/typedapi/types/dotexpanderprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DotExpanderProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L785-L803 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L826-L844 type DotExpanderProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -40,7 +40,7 @@ type DotExpanderProcessor struct { // If set to `*`, all top-level fields will be expanded. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // OnFailure Handle failures for the processor. @@ -94,16 +94,9 @@ func (s *DotExpanderProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -173,3 +166,13 @@ func NewDotExpanderProcessor() *DotExpanderProcessor { return r } + +// true + +type DotExpanderProcessorVariant interface { + DotExpanderProcessorCaster() *DotExpanderProcessor +} + +func (s *DotExpanderProcessor) DotExpanderProcessorCaster() *DotExpanderProcessor { + return s +} diff --git a/typedapi/types/doublenumberproperty.go b/typedapi/types/doublenumberproperty.go index ae4017f3a6..dbff287aeb 100644 --- a/typedapi/types/doublenumberproperty.go +++ b/typedapi/types/doublenumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // DoubleNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L152-L155 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L156-L159 type DoubleNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,12 +48,13 @@ type DoubleNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *Float64 `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *Float64 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -161,301 +163,313 @@ func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -553,301 +567,313 @@ func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -872,6 +898,11 @@ func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -920,6 +951,7 @@ func (s DoubleNumberProperty) MarshalJSON() ([]byte, error) { Properties: s.Properties, Script: s.Script, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -933,10 +965,20 @@ func (s DoubleNumberProperty) MarshalJSON() ([]byte, error) { // NewDoubleNumberProperty returns a DoubleNumberProperty. func NewDoubleNumberProperty() *DoubleNumberProperty { r := &DoubleNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type DoubleNumberPropertyVariant interface { + DoubleNumberPropertyCaster() *DoubleNumberProperty +} + +func (s *DoubleNumberProperty) DoubleNumberPropertyCaster() *DoubleNumberProperty { + return s +} diff --git a/typedapi/types/doublerangeproperty.go b/typedapi/types/doublerangeproperty.go index d756cfe8a6..7a6f10f678 100644 --- a/typedapi/types/doublerangeproperty.go +++ b/typedapi/types/doublerangeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // DoubleRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/range.ts#L34-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/range.ts#L34-L36 type DoubleRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -44,10 +45,11 @@ type DoubleRangeProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { @@ -149,301 +151,313 @@ func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -506,301 +520,313 @@ func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -820,6 +846,11 @@ func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -834,18 +865,19 @@ func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { func (s DoubleRangeProperty) MarshalJSON() ([]byte, error) { type innerDoubleRangeProperty DoubleRangeProperty tmp := innerDoubleRangeProperty{ - Boost: s.Boost, - Coerce: s.Coerce, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Index: s.Index, - Meta: s.Meta, - Properties: s.Properties, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "double_range" @@ -856,10 +888,20 @@ func (s DoubleRangeProperty) MarshalJSON() ([]byte, error) { // NewDoubleRangeProperty returns a DoubleRangeProperty. func NewDoubleRangeProperty() *DoubleRangeProperty { r := &DoubleRangeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type DoubleRangePropertyVariant interface { + DoubleRangePropertyCaster() *DoubleRangeProperty +} + +func (s *DoubleRangeProperty) DoubleRangePropertyCaster() *DoubleRangeProperty { + return s +} diff --git a/typedapi/types/doubletermsaggregate.go b/typedapi/types/doubletermsaggregate.go index be73fbbaca..6f85a7573f 100644 --- a/typedapi/types/doubletermsaggregate.go +++ b/typedapi/types/doubletermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DoubleTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L451-L456 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L451-L456 type DoubleTermsAggregate struct { Buckets BucketsDoubleTermsBucket `json:"buckets"` DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` @@ -121,3 +121,5 @@ func NewDoubleTermsAggregate() *DoubleTermsAggregate { return r } + +// false diff --git a/typedapi/types/doubletermsbucket.go b/typedapi/types/doubletermsbucket.go index 56f3fa6a30..b9654a7e4f 100644 --- a/typedapi/types/doubletermsbucket.go +++ b/typedapi/types/doubletermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // DoubleTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L458-L461 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L458-L461 type DoubleTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -671,8 +671,10 @@ func (s DoubleTermsBucket) MarshalJSON() ([]byte, error) { // NewDoubleTermsBucket returns a DoubleTermsBucket. func NewDoubleTermsBucket() *DoubleTermsBucket { r := &DoubleTermsBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/downsampleaction.go b/typedapi/types/downsampleaction.go index dc1dacaeb3..d00923b7de 100644 --- a/typedapi/types/downsampleaction.go +++ b/typedapi/types/downsampleaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // DownsampleAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/_types/Phase.ts#L115-L118 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/_types/Phase.ts#L112-L115 type DownsampleAction struct { FixedInterval string `json:"fixed_interval"` WaitTimeout Duration `json:"wait_timeout,omitempty"` @@ -72,3 +72,13 @@ func NewDownsampleAction() *DownsampleAction { return r } + +// true + +type DownsampleActionVariant interface { + DownsampleActionCaster() *DownsampleAction +} + +func (s *DownsampleAction) DownsampleActionCaster() *DownsampleAction { + return s +} diff --git a/typedapi/types/downsampleconfig.go b/typedapi/types/downsampleconfig.go index 3205d2fbd1..e27a25c7e4 100644 --- a/typedapi/types/downsampleconfig.go +++ b/typedapi/types/downsampleconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // DownsampleConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/Downsample.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/Downsample.ts#L22-L27 type DownsampleConfig struct { // FixedInterval The interval at which to aggregate the original time series index. FixedInterval string `json:"fixed_interval"` @@ -67,3 +67,13 @@ func NewDownsampleConfig() *DownsampleConfig { return r } + +// true + +type DownsampleConfigVariant interface { + DownsampleConfigCaster() *DownsampleConfig +} + +func (s *DownsampleConfig) DownsampleConfigCaster() *DownsampleConfig { + return s +} diff --git a/typedapi/types/downsamplinground.go b/typedapi/types/downsamplinground.go index aec8e87bfb..098a492543 100644 --- a/typedapi/types/downsamplinground.go +++ b/typedapi/types/downsamplinground.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // DownsamplingRound type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/DownsamplingRound.ts#L23-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/DownsamplingRound.ts#L23-L32 type DownsamplingRound struct { // After The duration since rollover when this downsampling round should execute After Duration `json:"after"` @@ -74,3 +74,13 @@ func NewDownsamplingRound() *DownsamplingRound { return r } + +// true + +type DownsamplingRoundVariant interface { + DownsamplingRoundCaster() *DownsamplingRound +} + +func (s *DownsamplingRound) DownsamplingRoundCaster() *DownsamplingRound { + return s +} diff --git a/typedapi/types/dropprocessor.go b/typedapi/types/dropprocessor.go index 44870d3ed0..0688d450be 100644 --- a/typedapi/types/dropprocessor.go +++ b/typedapi/types/dropprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,13 +31,13 @@ import ( // DropProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L805-L805 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L846-L846 type DropProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. Description *string `json:"description,omitempty"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // OnFailure Handle failures for the processor. @@ -75,16 +75,9 @@ func (s *DropProcessor) UnmarshalJSON(data []byte) error { s.Description = &o case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -128,3 +121,13 @@ func NewDropProcessor() *DropProcessor { return r } + +// true + +type DropProcessorVariant interface { + DropProcessorCaster() *DropProcessor +} + +func (s *DropProcessor) DropProcessorCaster() *DropProcessor { + return s +} diff --git a/typedapi/types/duration.go b/typedapi/types/duration.go index e146d9e394..0e737f3c48 100644 --- a/typedapi/types/duration.go +++ b/typedapi/types/duration.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -24,5 +24,9 @@ package types // // string // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Time.ts#L52-L58 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Time.ts#L52-L58 type Duration any + +type DurationVariant interface { + DurationCaster() *Duration +} diff --git a/typedapi/types/durationvalueunitfloatmillis.go b/typedapi/types/durationvalueunitfloatmillis.go index 3fab04ac8a..9f06e846b8 100644 --- a/typedapi/types/durationvalueunitfloatmillis.go +++ b/typedapi/types/durationvalueunitfloatmillis.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // DurationValueUnitFloatMillis type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Time.ts#L67-L67 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Time.ts#L67-L67 type DurationValueUnitFloatMillis Float64 diff --git a/typedapi/types/durationvalueunitmillis.go b/typedapi/types/durationvalueunitmillis.go index f4bb61e67d..d5beb5f2e7 100644 --- a/typedapi/types/durationvalueunitmillis.go +++ b/typedapi/types/durationvalueunitmillis.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // DurationValueUnitMillis type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Time.ts#L67-L67 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Time.ts#L67-L67 type DurationValueUnitMillis int64 + +type DurationValueUnitMillisVariant interface { + DurationValueUnitMillisCaster() *DurationValueUnitMillis +} diff --git a/typedapi/types/durationvalueunitnanos.go b/typedapi/types/durationvalueunitnanos.go index aec7ca255a..9f837af46d 100644 --- a/typedapi/types/durationvalueunitnanos.go +++ b/typedapi/types/durationvalueunitnanos.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // DurationValueUnitNanos type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Time.ts#L67-L67 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Time.ts#L67-L67 type DurationValueUnitNanos int64 diff --git a/typedapi/types/durationvalueunitseconds.go b/typedapi/types/durationvalueunitseconds.go index 8161def23c..2369b2e847 100644 --- a/typedapi/types/durationvalueunitseconds.go +++ b/typedapi/types/durationvalueunitseconds.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // DurationValueUnitSeconds type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Time.ts#L67-L67 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Time.ts#L67-L67 type DurationValueUnitSeconds int64 diff --git a/typedapi/types/dutchanalyzer.go b/typedapi/types/dutchanalyzer.go index 092541e420..408bfb738a 100644 --- a/typedapi/types/dutchanalyzer.go +++ b/typedapi/types/dutchanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // DutchAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L134-L139 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L145-L150 type DutchAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewDutchAnalyzer() *DutchAnalyzer { return r } + +// true + +type DutchAnalyzerVariant interface { + DutchAnalyzerCaster() *DutchAnalyzer +} + +func (s *DutchAnalyzer) DutchAnalyzerCaster() *DutchAnalyzer { + return s +} diff --git a/typedapi/types/dynamicproperty.go b/typedapi/types/dynamicproperty.go index 36330c5c04..1923252e74 100644 --- a/typedapi/types/dynamicproperty.go +++ b/typedapi/types/dynamicproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,13 +31,14 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // DynamicProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L302-L333 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L318-L349 type DynamicProperty struct { Analyzer *string `json:"analyzer,omitempty"` Boost *Float64 `json:"boost,omitempty"` @@ -57,20 +58,21 @@ type DynamicProperty struct { IndexPrefixes *TextIndexPrefixes `json:"index_prefixes,omitempty"` Locale *string `json:"locale,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Norms *bool `json:"norms,omitempty"` - NullValue FieldValue `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - PositionIncrementGap *int `json:"position_increment_gap,omitempty"` - PrecisionStep *int `json:"precision_step,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - SearchAnalyzer *string `json:"search_analyzer,omitempty"` - SearchQuoteAnalyzer *string `json:"search_quote_analyzer,omitempty"` - Store *bool `json:"store,omitempty"` - TermVector *termvectoroption.TermVectorOption `json:"term_vector,omitempty"` - TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Norms *bool `json:"norms,omitempty"` + NullValue FieldValue `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + PositionIncrementGap *int `json:"position_increment_gap,omitempty"` + PrecisionStep *int `json:"precision_step,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + SearchAnalyzer *string `json:"search_analyzer,omitempty"` + SearchQuoteAnalyzer *string `json:"search_quote_analyzer,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TermVector *termvectoroption.TermVectorOption `json:"term_vector,omitempty"` + TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type,omitempty"` } func (s *DynamicProperty) UnmarshalJSON(data []byte) error { @@ -212,301 +214,313 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -687,301 +701,313 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -1030,6 +1056,11 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "term_vector": if err := dec.Decode(&s.TermVector); err != nil { return fmt.Errorf("%s | %w", "TermVector", err) @@ -1082,6 +1113,7 @@ func (s DynamicProperty) MarshalJSON() ([]byte, error) { SearchAnalyzer: s.SearchAnalyzer, SearchQuoteAnalyzer: s.SearchQuoteAnalyzer, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TermVector: s.TermVector, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -1095,10 +1127,20 @@ func (s DynamicProperty) MarshalJSON() ([]byte, error) { // NewDynamicProperty returns a DynamicProperty. func NewDynamicProperty() *DynamicProperty { r := &DynamicProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type DynamicPropertyVariant interface { + DynamicPropertyCaster() *DynamicProperty +} + +func (s *DynamicProperty) DynamicPropertyCaster() *DynamicProperty { + return s +} diff --git a/typedapi/types/dynamictemplate.go b/typedapi/types/dynamictemplate.go index 5eaa5e952a..8ad6ad7874 100644 --- a/typedapi/types/dynamictemplate.go +++ b/typedapi/types/dynamictemplate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,17 +32,18 @@ import ( // DynamicTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/dynamic-template.ts#L22-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/dynamic-template.ts#L23-L43 type DynamicTemplate struct { - Mapping Property `json:"mapping,omitempty"` - Match []string `json:"match,omitempty"` - MatchMappingType []string `json:"match_mapping_type,omitempty"` - MatchPattern *matchtype.MatchType `json:"match_pattern,omitempty"` - PathMatch []string `json:"path_match,omitempty"` - PathUnmatch []string `json:"path_unmatch,omitempty"` - Runtime Property `json:"runtime,omitempty"` - Unmatch []string `json:"unmatch,omitempty"` - UnmatchMappingType []string `json:"unmatch_mapping_type,omitempty"` + AdditionalDynamicTemplateProperty map[string]json.RawMessage `json:"-"` + Mapping Property `json:"mapping,omitempty"` + Match []string `json:"match,omitempty"` + MatchMappingType []string `json:"match_mapping_type,omitempty"` + MatchPattern *matchtype.MatchType `json:"match_pattern,omitempty"` + PathMatch []string `json:"path_match,omitempty"` + PathUnmatch []string `json:"path_unmatch,omitempty"` + Runtime *RuntimeField `json:"runtime,omitempty"` + Unmatch []string `json:"unmatch,omitempty"` + UnmatchMappingType []string `json:"unmatch_mapping_type,omitempty"` } func (s *DynamicTemplate) UnmarshalJSON(data []byte) error { @@ -77,300 +78,312 @@ func (s *DynamicTemplate) UnmarshalJSON(data []byte) error { case "binary": o := NewBinaryProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "binary", err) } s.Mapping = *o case "boolean": o := NewBooleanProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "boolean", err) } s.Mapping = *o case "{dynamic_type}": o := NewDynamicProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "{dynamic_type}", err) } s.Mapping = *o case "join": o := NewJoinProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "join", err) } s.Mapping = *o case "keyword": o := NewKeywordProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keyword", err) } s.Mapping = *o case "match_only_text": o := NewMatchOnlyTextProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "match_only_text", err) } s.Mapping = *o case "percolator": o := NewPercolatorProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "percolator", err) } s.Mapping = *o case "rank_feature": o := NewRankFeatureProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "rank_feature", err) } s.Mapping = *o case "rank_features": o := NewRankFeaturesProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "rank_features", err) } s.Mapping = *o case "search_as_you_type": o := NewSearchAsYouTypeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "search_as_you_type", err) } s.Mapping = *o case "text": o := NewTextProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "text", err) } s.Mapping = *o case "version": o := NewVersionProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "version", err) } s.Mapping = *o case "wildcard": o := NewWildcardProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "wildcard", err) } s.Mapping = *o case "date_nanos": o := NewDateNanosProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "date_nanos", err) } s.Mapping = *o case "date": o := NewDateProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "date", err) } s.Mapping = *o case "aggregate_metric_double": o := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "aggregate_metric_double", err) } s.Mapping = *o case "dense_vector": o := NewDenseVectorProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "dense_vector", err) } s.Mapping = *o case "flattened": o := NewFlattenedProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "flattened", err) } s.Mapping = *o case "nested": o := NewNestedProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "nested", err) } s.Mapping = *o case "object": o := NewObjectProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "object", err) + } + s.Mapping = *o + case "passthrough": + o := NewPassthroughObjectProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "passthrough", err) } s.Mapping = *o case "semantic_text": o := NewSemanticTextProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "semantic_text", err) } s.Mapping = *o case "sparse_vector": o := NewSparseVectorProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "sparse_vector", err) } s.Mapping = *o case "completion": o := NewCompletionProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "completion", err) } s.Mapping = *o case "constant_keyword": o := NewConstantKeywordProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "constant_keyword", err) + } + s.Mapping = *o + case "counted_keyword": + o := NewCountedKeywordProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "counted_keyword", err) } s.Mapping = *o case "alias": o := NewFieldAliasProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "alias", err) } s.Mapping = *o case "histogram": o := NewHistogramProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "histogram", err) } s.Mapping = *o case "ip": o := NewIpProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "ip", err) } s.Mapping = *o case "murmur3": o := NewMurmur3HashProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "murmur3", err) } s.Mapping = *o case "token_count": o := NewTokenCountProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "token_count", err) } s.Mapping = *o case "geo_point": o := NewGeoPointProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "geo_point", err) } s.Mapping = *o case "geo_shape": o := NewGeoShapeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "geo_shape", err) } s.Mapping = *o case "point": o := NewPointProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "point", err) } s.Mapping = *o case "shape": o := NewShapeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "shape", err) } s.Mapping = *o case "byte": o := NewByteNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "byte", err) } s.Mapping = *o case "double": o := NewDoubleNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "double", err) } s.Mapping = *o case "float": o := NewFloatNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "float", err) } s.Mapping = *o case "half_float": o := NewHalfFloatNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "half_float", err) } s.Mapping = *o case "integer": o := NewIntegerNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "integer", err) } s.Mapping = *o case "long": o := NewLongNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "long", err) } s.Mapping = *o case "scaled_float": o := NewScaledFloatNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "scaled_float", err) } s.Mapping = *o case "short": o := NewShortNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "short", err) } s.Mapping = *o case "unsigned_long": o := NewUnsignedLongNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "unsigned_long", err) } s.Mapping = *o case "date_range": o := NewDateRangeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "date_range", err) } s.Mapping = *o case "double_range": o := NewDoubleRangeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "double_range", err) } s.Mapping = *o case "float_range": o := NewFloatRangeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "float_range", err) } s.Mapping = *o case "integer_range": o := NewIntegerRangeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "integer_range", err) } s.Mapping = *o case "ip_range": o := NewIpRangeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "ip_range", err) } s.Mapping = *o case "long_range": o := NewLongRangeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "long_range", err) } s.Mapping = *o case "icu_collation_keyword": o := NewIcuCollationProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_collation_keyword", err) } s.Mapping = *o default: if err := localDec.Decode(&s.Mapping); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } } @@ -444,317 +457,8 @@ func (s *DynamicTemplate) UnmarshalJSON(data []byte) error { } case "runtime": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - kind := make(map[string]string, 0) - localDec := json.NewDecoder(source) - localDec.Decode(&kind) - source.Seek(0, io.SeekStart) - if _, ok := kind["type"]; !ok { - kind["type"] = "object" - } - switch kind["type"] { - - case "binary": - o := NewBinaryProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "boolean": - o := NewBooleanProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "{dynamic_type}": - o := NewDynamicProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "join": - o := NewJoinProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "keyword": - o := NewKeywordProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "match_only_text": - o := NewMatchOnlyTextProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "percolator": - o := NewPercolatorProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "rank_feature": - o := NewRankFeatureProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "rank_features": - o := NewRankFeaturesProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "search_as_you_type": - o := NewSearchAsYouTypeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "text": - o := NewTextProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "version": - o := NewVersionProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "wildcard": - o := NewWildcardProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "date_nanos": - o := NewDateNanosProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "date": - o := NewDateProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "aggregate_metric_double": - o := NewAggregateMetricDoubleProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "dense_vector": - o := NewDenseVectorProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "flattened": - o := NewFlattenedProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "nested": - o := NewNestedProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "object": - o := NewObjectProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "semantic_text": - o := NewSemanticTextProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "sparse_vector": - o := NewSparseVectorProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "completion": - o := NewCompletionProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "constant_keyword": - o := NewConstantKeywordProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "alias": - o := NewFieldAliasProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "histogram": - o := NewHistogramProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "ip": - o := NewIpProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "murmur3": - o := NewMurmur3HashProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "token_count": - o := NewTokenCountProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "geo_point": - o := NewGeoPointProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "geo_shape": - o := NewGeoShapeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "point": - o := NewPointProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "shape": - o := NewShapeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "byte": - o := NewByteNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "double": - o := NewDoubleNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "float": - o := NewFloatNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "half_float": - o := NewHalfFloatNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "integer": - o := NewIntegerNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "long": - o := NewLongNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "scaled_float": - o := NewScaledFloatNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "short": - o := NewShortNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "unsigned_long": - o := NewUnsignedLongNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "date_range": - o := NewDateRangeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "double_range": - o := NewDoubleRangeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "float_range": - o := NewFloatRangeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "integer_range": - o := NewIntegerRangeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "ip_range": - o := NewIpRangeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "long_range": - o := NewLongRangeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "icu_collation_keyword": - o := NewIcuCollationProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - default: - if err := localDec.Decode(&s.Runtime); err != nil { - return err - } + if err := dec.Decode(&s.Runtime); err != nil { + return fmt.Errorf("%s | %w", "Runtime", err) } case "unmatch": @@ -789,14 +493,68 @@ func (s *DynamicTemplate) UnmarshalJSON(data []byte) error { } } + default: + + if key, ok := t.(string); ok { + if s.AdditionalDynamicTemplateProperty == nil { + s.AdditionalDynamicTemplateProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalDynamicTemplateProperty", err) + } + s.AdditionalDynamicTemplateProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s DynamicTemplate) MarshalJSON() ([]byte, error) { + type opt DynamicTemplate + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDynamicTemplateProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDynamicTemplateProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewDynamicTemplate returns a DynamicTemplate. func NewDynamicTemplate() *DynamicTemplate { - r := &DynamicTemplate{} + r := &DynamicTemplate{ + AdditionalDynamicTemplateProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type DynamicTemplateVariant interface { + DynamicTemplateCaster() *DynamicTemplate +} + +func (s *DynamicTemplate) DynamicTemplateCaster() *DynamicTemplate { + return s +} diff --git a/typedapi/types/edgengramtokenfilter.go b/typedapi/types/edgengramtokenfilter.go index 12e959d9aa..9cd2ceb7e0 100644 --- a/typedapi/types/edgengramtokenfilter.go +++ b/typedapi/types/edgengramtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // EdgeNGramTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L78-L84 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L78-L84 type EdgeNGramTokenFilter struct { MaxGram *int `json:"max_gram,omitempty"` MinGram *int `json:"min_gram,omitempty"` @@ -138,3 +138,13 @@ func NewEdgeNGramTokenFilter() *EdgeNGramTokenFilter { return r } + +// true + +type EdgeNGramTokenFilterVariant interface { + EdgeNGramTokenFilterCaster() *EdgeNGramTokenFilter +} + +func (s *EdgeNGramTokenFilter) EdgeNGramTokenFilterCaster() *EdgeNGramTokenFilter { + return s +} diff --git a/typedapi/types/edgengramtokenizer.go b/typedapi/types/edgengramtokenizer.go index 3dbc9c2b4e..f13c0c563f 100644 --- a/typedapi/types/edgengramtokenizer.go +++ b/typedapi/types/edgengramtokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // EdgeNGramTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L48-L57 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L48-L57 type EdgeNGramTokenizer struct { CustomTokenChars *string `json:"custom_token_chars,omitempty"` MaxGram *int `json:"max_gram,omitempty"` @@ -145,3 +145,13 @@ func NewEdgeNGramTokenizer() *EdgeNGramTokenizer { return r } + +// true + +type EdgeNGramTokenizerVariant interface { + EdgeNGramTokenizerCaster() *EdgeNGramTokenizer +} + +func (s *EdgeNGramTokenizer) EdgeNGramTokenizerCaster() *EdgeNGramTokenizer { + return s +} diff --git a/typedapi/types/elasticsearchversioninfo.go b/typedapi/types/elasticsearchversioninfo.go index 0ce2641236..aae3593812 100644 --- a/typedapi/types/elasticsearchversioninfo.go +++ b/typedapi/types/elasticsearchversioninfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,17 +31,28 @@ import ( // ElasticsearchVersionInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Base.ts#L54-L64 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Base.ts#L76-L115 type ElasticsearchVersionInfo struct { - BuildDate DateTime `json:"build_date"` - BuildFlavor string `json:"build_flavor"` - BuildHash string `json:"build_hash"` - BuildSnapshot bool `json:"build_snapshot"` - BuildType string `json:"build_type"` - Int string `json:"number"` - LuceneVersion string `json:"lucene_version"` - MinimumIndexCompatibilityVersion string `json:"minimum_index_compatibility_version"` - MinimumWireCompatibilityVersion string `json:"minimum_wire_compatibility_version"` + // BuildDate The Elasticsearch Git commit's date. + BuildDate DateTime `json:"build_date"` + // BuildFlavor The build flavor. For example, `default`. + BuildFlavor string `json:"build_flavor"` + // BuildHash The Elasticsearch Git commit's SHA hash. + BuildHash string `json:"build_hash"` + // BuildSnapshot Indicates whether the Elasticsearch build was a snapshot. + BuildSnapshot bool `json:"build_snapshot"` + // BuildType The build type that corresponds to how Elasticsearch was installed. + // For example, `docker`, `rpm`, or `tar`. + BuildType string `json:"build_type"` + // Int The Elasticsearch version number. + Int string `json:"number"` + // LuceneVersion The version number of Elasticsearch's underlying Lucene software. + LuceneVersion string `json:"lucene_version"` + // MinimumIndexCompatibilityVersion The minimum index version with which the responding node can read from disk. + MinimumIndexCompatibilityVersion string `json:"minimum_index_compatibility_version"` + // MinimumWireCompatibilityVersion The minimum node version with which the responding node can communicate. + // Also the minimum version from which you can perform a rolling upgrade. + MinimumWireCompatibilityVersion string `json:"minimum_wire_compatibility_version"` } func (s *ElasticsearchVersionInfo) UnmarshalJSON(data []byte) error { @@ -152,3 +163,5 @@ func NewElasticsearchVersionInfo() *ElasticsearchVersionInfo { return r } + +// false diff --git a/typedapi/types/elasticsearchversionmininfo.go b/typedapi/types/elasticsearchversionmininfo.go index 1b93e75ed9..dc64a1923b 100644 --- a/typedapi/types/elasticsearchversionmininfo.go +++ b/typedapi/types/elasticsearchversionmininfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ElasticsearchVersionMinInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Base.ts#L66-L74 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Base.ts#L117-L125 type ElasticsearchVersionMinInfo struct { BuildFlavor string `json:"build_flavor"` Int string `json:"number"` @@ -99,3 +99,5 @@ func NewElasticsearchVersionMinInfo() *ElasticsearchVersionMinInfo { return r } + +// false diff --git a/typedapi/types/elisiontokenfilter.go b/typedapi/types/elisiontokenfilter.go index fedf16d9f0..84dca5093d 100644 --- a/typedapi/types/elisiontokenfilter.go +++ b/typedapi/types/elisiontokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ElisionTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L188-L193 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L188-L193 type ElisionTokenFilter struct { Articles []string `json:"articles,omitempty"` ArticlesCase Stringifiedboolean `json:"articles_case,omitempty"` @@ -114,3 +114,13 @@ func NewElisionTokenFilter() *ElisionTokenFilter { return r } + +// true + +type ElisionTokenFilterVariant interface { + ElisionTokenFilterCaster() *ElisionTokenFilter +} + +func (s *ElisionTokenFilter) ElisionTokenFilterCaster() *ElisionTokenFilter { + return s +} diff --git a/typedapi/types/email.go b/typedapi/types/email.go index 95dcd01d9f..a683ad65f8 100644 --- a/typedapi/types/email.go +++ b/typedapi/types/email.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // Email type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L238-L250 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L238-L250 type Email struct { Attachments map[string]EmailAttachmentContainer `json:"attachments,omitempty"` Bcc []string `json:"bcc,omitempty"` @@ -72,8 +72,19 @@ func (s *Email) UnmarshalJSON(data []byte) error { } case "bcc": - if err := dec.Decode(&s.Bcc); err != nil { - return fmt.Errorf("%s | %w", "Bcc", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bcc", err) + } + + s.Bcc = append(s.Bcc, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Bcc); err != nil { + return fmt.Errorf("%s | %w", "Bcc", err) + } } case "body": @@ -82,8 +93,19 @@ func (s *Email) UnmarshalJSON(data []byte) error { } case "cc": - if err := dec.Decode(&s.Cc); err != nil { - return fmt.Errorf("%s | %w", "Cc", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Cc", err) + } + + s.Cc = append(s.Cc, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Cc); err != nil { + return fmt.Errorf("%s | %w", "Cc", err) + } } case "from": @@ -109,8 +131,19 @@ func (s *Email) UnmarshalJSON(data []byte) error { } case "reply_to": - if err := dec.Decode(&s.ReplyTo); err != nil { - return fmt.Errorf("%s | %w", "ReplyTo", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ReplyTo", err) + } + + s.ReplyTo = append(s.ReplyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.ReplyTo); err != nil { + return fmt.Errorf("%s | %w", "ReplyTo", err) + } } case "sent_date": @@ -131,8 +164,19 @@ func (s *Email) UnmarshalJSON(data []byte) error { s.Subject = o case "to": - if err := dec.Decode(&s.To); err != nil { - return fmt.Errorf("%s | %w", "To", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } + + s.To = append(s.To, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.To); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } } } @@ -143,8 +187,10 @@ func (s *Email) UnmarshalJSON(data []byte) error { // NewEmail returns a Email. func NewEmail() *Email { r := &Email{ - Attachments: make(map[string]EmailAttachmentContainer, 0), + Attachments: make(map[string]EmailAttachmentContainer), } return r } + +// false diff --git a/typedapi/types/emailaction.go b/typedapi/types/emailaction.go index 288b3310f5..48fcfe68e9 100644 --- a/typedapi/types/emailaction.go +++ b/typedapi/types/emailaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // EmailAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L252-L252 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L252-L252 type EmailAction struct { Attachments map[string]EmailAttachmentContainer `json:"attachments,omitempty"` Bcc []string `json:"bcc,omitempty"` @@ -72,8 +72,19 @@ func (s *EmailAction) UnmarshalJSON(data []byte) error { } case "bcc": - if err := dec.Decode(&s.Bcc); err != nil { - return fmt.Errorf("%s | %w", "Bcc", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bcc", err) + } + + s.Bcc = append(s.Bcc, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Bcc); err != nil { + return fmt.Errorf("%s | %w", "Bcc", err) + } } case "body": @@ -82,8 +93,19 @@ func (s *EmailAction) UnmarshalJSON(data []byte) error { } case "cc": - if err := dec.Decode(&s.Cc); err != nil { - return fmt.Errorf("%s | %w", "Cc", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Cc", err) + } + + s.Cc = append(s.Cc, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Cc); err != nil { + return fmt.Errorf("%s | %w", "Cc", err) + } } case "from": @@ -109,8 +131,19 @@ func (s *EmailAction) UnmarshalJSON(data []byte) error { } case "reply_to": - if err := dec.Decode(&s.ReplyTo); err != nil { - return fmt.Errorf("%s | %w", "ReplyTo", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ReplyTo", err) + } + + s.ReplyTo = append(s.ReplyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.ReplyTo); err != nil { + return fmt.Errorf("%s | %w", "ReplyTo", err) + } } case "sent_date": @@ -131,8 +164,19 @@ func (s *EmailAction) UnmarshalJSON(data []byte) error { s.Subject = o case "to": - if err := dec.Decode(&s.To); err != nil { - return fmt.Errorf("%s | %w", "To", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } + + s.To = append(s.To, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.To); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } } } @@ -143,8 +187,18 @@ func (s *EmailAction) UnmarshalJSON(data []byte) error { // NewEmailAction returns a EmailAction. func NewEmailAction() *EmailAction { r := &EmailAction{ - Attachments: make(map[string]EmailAttachmentContainer, 0), + Attachments: make(map[string]EmailAttachmentContainer), } return r } + +// true + +type EmailActionVariant interface { + EmailActionCaster() *EmailAction +} + +func (s *EmailAction) EmailActionCaster() *EmailAction { + return s +} diff --git a/typedapi/types/emailattachmentcontainer.go b/typedapi/types/emailattachmentcontainer.go index f06370ab50..0a51116134 100644 --- a/typedapi/types/emailattachmentcontainer.go +++ b/typedapi/types/emailattachmentcontainer.go @@ -16,22 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // EmailAttachmentContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L211-L216 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L211-L216 type EmailAttachmentContainer struct { - Data *DataEmailAttachment `json:"data,omitempty"` - Http *HttpEmailAttachment `json:"http,omitempty"` - Reporting *ReportingEmailAttachment `json:"reporting,omitempty"` + AdditionalEmailAttachmentContainerProperty map[string]json.RawMessage `json:"-"` + Data *DataEmailAttachment `json:"data,omitempty"` + Http *HttpEmailAttachment `json:"http,omitempty"` + Reporting *ReportingEmailAttachment `json:"reporting,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s EmailAttachmentContainer) MarshalJSON() ([]byte, error) { + type opt EmailAttachmentContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalEmailAttachmentContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalEmailAttachmentContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewEmailAttachmentContainer returns a EmailAttachmentContainer. func NewEmailAttachmentContainer() *EmailAttachmentContainer { - r := &EmailAttachmentContainer{} + r := &EmailAttachmentContainer{ + AdditionalEmailAttachmentContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type EmailAttachmentContainerVariant interface { + EmailAttachmentContainerCaster() *EmailAttachmentContainer +} + +func (s *EmailAttachmentContainer) EmailAttachmentContainerCaster() *EmailAttachmentContainer { + return s +} diff --git a/typedapi/types/emailbody.go b/typedapi/types/emailbody.go index 4b8391d925..2813d1a78e 100644 --- a/typedapi/types/emailbody.go +++ b/typedapi/types/emailbody.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // EmailBody type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L192-L195 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L192-L195 type EmailBody struct { Html *string `json:"html,omitempty"` Text *string `json:"text,omitempty"` @@ -87,3 +87,13 @@ func NewEmailBody() *EmailBody { return r } + +// true + +type EmailBodyVariant interface { + EmailBodyCaster() *EmailBody +} + +func (s *EmailBody) EmailBodyCaster() *EmailBody { + return s +} diff --git a/typedapi/types/emailresult.go b/typedapi/types/emailresult.go index cd7ecbd497..689dd26adf 100644 --- a/typedapi/types/emailresult.go +++ b/typedapi/types/emailresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // EmailResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L205-L209 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L205-L209 type EmailResult struct { Account *string `json:"account,omitempty"` Message Email `json:"message"` @@ -93,3 +93,5 @@ func NewEmailResult() *EmailResult { return r } + +// false diff --git a/typedapi/types/emptyobject.go b/typedapi/types/emptyobject.go index d8c995a5c3..a6707689f0 100644 --- a/typedapi/types/emptyobject.go +++ b/typedapi/types/emptyobject.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // EmptyObject type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L161-L162 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L157-L158 type EmptyObject struct { } @@ -32,3 +32,13 @@ func NewEmptyObject() *EmptyObject { return r } + +// true + +type EmptyObjectVariant interface { + EmptyObjectCaster() *EmptyObject +} + +func (s *EmptyObject) EmptyObjectCaster() *EmptyObject { + return s +} diff --git a/typedapi/types/englishanalyzer.go b/typedapi/types/englishanalyzer.go index fa41900e58..bc7ddad6e6 100644 --- a/typedapi/types/englishanalyzer.go +++ b/typedapi/types/englishanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // EnglishAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L141-L146 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L152-L157 type EnglishAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewEnglishAnalyzer() *EnglishAnalyzer { return r } + +// true + +type EnglishAnalyzerVariant interface { + EnglishAnalyzerCaster() *EnglishAnalyzer +} + +func (s *EnglishAnalyzer) EnglishAnalyzerCaster() *EnglishAnalyzer { + return s +} diff --git a/typedapi/types/enrichpolicy.go b/typedapi/types/enrichpolicy.go index 6d95a7cfb7..26b3422b00 100644 --- a/typedapi/types/enrichpolicy.go +++ b/typedapi/types/enrichpolicy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // EnrichPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/enrich/_types/Policy.ts#L34-L41 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/enrich/_types/Policy.ts#L34-L41 type EnrichPolicy struct { ElasticsearchVersion *string `json:"elasticsearch_version,omitempty"` EnrichFields []string `json:"enrich_fields"` @@ -126,3 +126,13 @@ func NewEnrichPolicy() *EnrichPolicy { return r } + +// true + +type EnrichPolicyVariant interface { + EnrichPolicyCaster() *EnrichPolicy +} + +func (s *EnrichPolicy) EnrichPolicyCaster() *EnrichPolicy { + return s +} diff --git a/typedapi/types/enrichprocessor.go b/typedapi/types/enrichprocessor.go index e3283967de..9fcc51cf15 100644 --- a/typedapi/types/enrichprocessor.go +++ b/typedapi/types/enrichprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // EnrichProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L807-L846 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L848-L887 type EnrichProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -43,7 +43,7 @@ type EnrichProcessor struct { // Supports template snippets. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without @@ -109,16 +109,9 @@ func (s *EnrichProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -228,3 +221,13 @@ func NewEnrichProcessor() *EnrichProcessor { return r } + +// true + +type EnrichProcessorVariant interface { + EnrichProcessorCaster() *EnrichProcessor +} + +func (s *EnrichProcessor) EnrichProcessorCaster() *EnrichProcessor { + return s +} diff --git a/typedapi/types/ensemble.go b/typedapi/types/ensemble.go index 2abc89f5dc..bd9e35cee4 100644 --- a/typedapi/types/ensemble.go +++ b/typedapi/types/ensemble.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Ensemble type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model/types.ts#L93-L99 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model/types.ts#L93-L99 type Ensemble struct { AggregateOutput *AggregateOutput `json:"aggregate_output,omitempty"` ClassificationLabels []string `json:"classification_labels,omitempty"` @@ -98,3 +98,13 @@ func NewEnsemble() *Ensemble { return r } + +// true + +type EnsembleVariant interface { + EnsembleCaster() *Ensemble +} + +func (s *Ensemble) EnsembleCaster() *Ensemble { + return s +} diff --git a/typedapi/types/enums/accesstokengranttype/accesstokengranttype.go b/typedapi/types/enums/accesstokengranttype/accesstokengranttype.go index c6d6fe077e..0b087983b5 100644 --- a/typedapi/types/enums/accesstokengranttype/accesstokengranttype.go +++ b/typedapi/types/enums/accesstokengranttype/accesstokengranttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package accesstokengranttype package accesstokengranttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_token/types.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_token/types.ts#L23-L48 type AccessTokenGrantType struct { Name string } diff --git a/typedapi/types/enums/acknowledgementoptions/acknowledgementoptions.go b/typedapi/types/enums/acknowledgementoptions/acknowledgementoptions.go index 5cb06c1805..e33168ce86 100644 --- a/typedapi/types/enums/acknowledgementoptions/acknowledgementoptions.go +++ b/typedapi/types/enums/acknowledgementoptions/acknowledgementoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package acknowledgementoptions package acknowledgementoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Action.ts#L103-L107 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Action.ts#L103-L107 type AcknowledgementOptions struct { Name string } diff --git a/typedapi/types/enums/actionexecutionmode/actionexecutionmode.go b/typedapi/types/enums/actionexecutionmode/actionexecutionmode.go index 6bf3b7c62e..178a0ee678 100644 --- a/typedapi/types/enums/actionexecutionmode/actionexecutionmode.go +++ b/typedapi/types/enums/actionexecutionmode/actionexecutionmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package actionexecutionmode package actionexecutionmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Action.ts#L67-L88 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Action.ts#L67-L88 type ActionExecutionMode struct { Name string } diff --git a/typedapi/types/enums/actionstatusoptions/actionstatusoptions.go b/typedapi/types/enums/actionstatusoptions/actionstatusoptions.go index ef3eae1221..2950881d7e 100644 --- a/typedapi/types/enums/actionstatusoptions/actionstatusoptions.go +++ b/typedapi/types/enums/actionstatusoptions/actionstatusoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package actionstatusoptions package actionstatusoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Action.ts#L96-L101 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Action.ts#L96-L101 type ActionStatusOptions struct { Name string } diff --git a/typedapi/types/enums/actiontype/actiontype.go b/typedapi/types/enums/actiontype/actiontype.go index d549164188..cbcb3bd824 100644 --- a/typedapi/types/enums/actiontype/actiontype.go +++ b/typedapi/types/enums/actiontype/actiontype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package actiontype package actiontype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Action.ts#L58-L65 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Action.ts#L58-L65 type ActionType struct { Name string } diff --git a/typedapi/types/enums/allocationexplaindecision/allocationexplaindecision.go b/typedapi/types/enums/allocationexplaindecision/allocationexplaindecision.go index bc183ef347..26c9d85df1 100644 --- a/typedapi/types/enums/allocationexplaindecision/allocationexplaindecision.go +++ b/typedapi/types/enums/allocationexplaindecision/allocationexplaindecision.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package allocationexplaindecision package allocationexplaindecision import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/allocation_explain/types.ts#L33-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/allocation_explain/types.ts#L33-L38 type AllocationExplainDecision struct { Name string } @@ -45,13 +45,13 @@ func (a AllocationExplainDecision) MarshalText() (text []byte, err error) { func (a *AllocationExplainDecision) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "NO": + case "no": *a = NO - case "YES": + case "yes": *a = YES - case "THROTTLE": + case "throttle": *a = THROTTLE - case "ALWAYS": + case "always": *a = ALWAYS default: *a = AllocationExplainDecision{string(text)} diff --git a/typedapi/types/enums/apikeygranttype/apikeygranttype.go b/typedapi/types/enums/apikeygranttype/apikeygranttype.go index 7399b5e994..22e8d0df36 100644 --- a/typedapi/types/enums/apikeygranttype/apikeygranttype.go +++ b/typedapi/types/enums/apikeygranttype/apikeygranttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package apikeygranttype package apikeygranttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/grant_api_key/types.ts#L48-L51 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/grant_api_key/types.ts#L47-L50 type ApiKeyGrantType struct { Name string } diff --git a/typedapi/types/enums/apikeytype/apikeytype.go b/typedapi/types/enums/apikeytype/apikeytype.go new file mode 100644 index 0000000000..6958a2c774 --- /dev/null +++ b/typedapi/types/enums/apikeytype/apikeytype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Package apikeytype +package apikeytype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/ApiKey.ts#L115-L118 +type ApiKeyType struct { + Name string +} + +var ( + Rest = ApiKeyType{"rest"} + + Crosscluster = ApiKeyType{"cross_cluster"} +) + +func (a ApiKeyType) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *ApiKeyType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "rest": + *a = Rest + case "cross_cluster": + *a = Crosscluster + default: + *a = ApiKeyType{string(text)} + } + + return nil +} + +func (a ApiKeyType) String() string { + return a.Name +} diff --git a/typedapi/types/enums/appliesto/appliesto.go b/typedapi/types/enums/appliesto/appliesto.go index f2d00f5ac3..729c868067 100644 --- a/typedapi/types/enums/appliesto/appliesto.go +++ b/typedapi/types/enums/appliesto/appliesto.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package appliesto package appliesto import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Rule.ts#L67-L72 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Rule.ts#L67-L72 type AppliesTo struct { Name string } diff --git a/typedapi/types/enums/boundaryscanner/boundaryscanner.go b/typedapi/types/enums/boundaryscanner/boundaryscanner.go index 2eb8565dd6..ff1706e946 100644 --- a/typedapi/types/enums/boundaryscanner/boundaryscanner.go +++ b/typedapi/types/enums/boundaryscanner/boundaryscanner.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package boundaryscanner package boundaryscanner import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/highlighting.ts#L26-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/highlighting.ts#L26-L45 type BoundaryScanner struct { Name string } diff --git a/typedapi/types/enums/bytes/bytes.go b/typedapi/types/enums/bytes/bytes.go index f25b899f50..4b95ff1b78 100644 --- a/typedapi/types/enums/bytes/bytes.go +++ b/typedapi/types/enums/bytes/bytes.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package bytes package bytes import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L170-L182 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L166-L178 type Bytes struct { Name string } diff --git a/typedapi/types/enums/calendarinterval/calendarinterval.go b/typedapi/types/enums/calendarinterval/calendarinterval.go index 07072bcc9a..3938198856 100644 --- a/typedapi/types/enums/calendarinterval/calendarinterval.go +++ b/typedapi/types/enums/calendarinterval/calendarinterval.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package calendarinterval package calendarinterval import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L262-L279 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L262-L279 type CalendarInterval struct { Name string } diff --git a/typedapi/types/enums/cardinalityexecutionmode/cardinalityexecutionmode.go b/typedapi/types/enums/cardinalityexecutionmode/cardinalityexecutionmode.go index eed2fbb497..5c3c519443 100644 --- a/typedapi/types/enums/cardinalityexecutionmode/cardinalityexecutionmode.go +++ b/typedapi/types/enums/cardinalityexecutionmode/cardinalityexecutionmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package cardinalityexecutionmode package cardinalityexecutionmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L64-L85 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L64-L85 type CardinalityExecutionMode struct { Name string } diff --git a/typedapi/types/enums/catanomalydetectorcolumn/catanomalydetectorcolumn.go b/typedapi/types/enums/catanomalydetectorcolumn/catanomalydetectorcolumn.go index 8223c1e5c2..8a62cce84e 100644 --- a/typedapi/types/enums/catanomalydetectorcolumn/catanomalydetectorcolumn.go +++ b/typedapi/types/enums/catanomalydetectorcolumn/catanomalydetectorcolumn.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package catanomalydetectorcolumn package catanomalydetectorcolumn import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/_types/CatBase.ts#L32-L401 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/_types/CatBase.ts#L32-L401 type CatAnomalyDetectorColumn struct { Name string } diff --git a/typedapi/types/enums/catdatafeedcolumn/catdatafeedcolumn.go b/typedapi/types/enums/catdatafeedcolumn/catdatafeedcolumn.go index ae3c5fbbd9..a2f6c26d7e 100644 --- a/typedapi/types/enums/catdatafeedcolumn/catdatafeedcolumn.go +++ b/typedapi/types/enums/catdatafeedcolumn/catdatafeedcolumn.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package catdatafeedcolumn package catdatafeedcolumn import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/_types/CatBase.ts#L405-L471 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/_types/CatBase.ts#L405-L471 type CatDatafeedColumn struct { Name string } diff --git a/typedapi/types/enums/catdfacolumn/catdfacolumn.go b/typedapi/types/enums/catdfacolumn/catdfacolumn.go index c42d2eea30..a82c078094 100644 --- a/typedapi/types/enums/catdfacolumn/catdfacolumn.go +++ b/typedapi/types/enums/catdfacolumn/catdfacolumn.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package catdfacolumn package catdfacolumn import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/_types/CatBase.ts#L472-L557 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/_types/CatBase.ts#L472-L557 type CatDfaColumn struct { Name string } diff --git a/typedapi/types/enums/categorizationstatus/categorizationstatus.go b/typedapi/types/enums/categorizationstatus/categorizationstatus.go index 49699e1eef..f29c7e37d0 100644 --- a/typedapi/types/enums/categorizationstatus/categorizationstatus.go +++ b/typedapi/types/enums/categorizationstatus/categorizationstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package categorizationstatus package categorizationstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Model.ts#L83-L86 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Model.ts#L84-L87 type CategorizationStatus struct { Name string } diff --git a/typedapi/types/enums/cattrainedmodelscolumn/cattrainedmodelscolumn.go b/typedapi/types/enums/cattrainedmodelscolumn/cattrainedmodelscolumn.go index 4d549b6960..9ca1ea3516 100644 --- a/typedapi/types/enums/cattrainedmodelscolumn/cattrainedmodelscolumn.go +++ b/typedapi/types/enums/cattrainedmodelscolumn/cattrainedmodelscolumn.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package cattrainedmodelscolumn package cattrainedmodelscolumn import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/_types/CatBase.ts#L561-L635 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/_types/CatBase.ts#L561-L635 type CatTrainedModelsColumn struct { Name string } diff --git a/typedapi/types/enums/cattransformcolumn/cattransformcolumn.go b/typedapi/types/enums/cattransformcolumn/cattransformcolumn.go index fc78799467..03ec500a20 100644 --- a/typedapi/types/enums/cattransformcolumn/cattransformcolumn.go +++ b/typedapi/types/enums/cattransformcolumn/cattransformcolumn.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package cattransformcolumn package cattransformcolumn import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/_types/CatBase.ts#L640-L844 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/_types/CatBase.ts#L640-L844 type CatTransformColumn struct { Name string } diff --git a/typedapi/types/enums/childscoremode/childscoremode.go b/typedapi/types/enums/childscoremode/childscoremode.go index ab484b6eca..efad6435c2 100644 --- a/typedapi/types/enums/childscoremode/childscoremode.go +++ b/typedapi/types/enums/childscoremode/childscoremode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package childscoremode package childscoremode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/joining.ts#L25-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/joining.ts#L25-L39 type ChildScoreMode struct { Name string } diff --git a/typedapi/types/enums/chunkingmode/chunkingmode.go b/typedapi/types/enums/chunkingmode/chunkingmode.go index 4bcba92050..ae7fe0b695 100644 --- a/typedapi/types/enums/chunkingmode/chunkingmode.go +++ b/typedapi/types/enums/chunkingmode/chunkingmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package chunkingmode package chunkingmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Datafeed.ts#L235-L239 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Datafeed.ts#L245-L249 type ChunkingMode struct { Name string } diff --git a/typedapi/types/enums/clusterinfotarget/clusterinfotarget.go b/typedapi/types/enums/clusterinfotarget/clusterinfotarget.go index 74c2bca4ba..42db0fc971 100644 --- a/typedapi/types/enums/clusterinfotarget/clusterinfotarget.go +++ b/typedapi/types/enums/clusterinfotarget/clusterinfotarget.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package clusterinfotarget package clusterinfotarget import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L382-L388 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L378-L384 type ClusterInfoTarget struct { Name string } diff --git a/typedapi/types/enums/clusterprivilege/clusterprivilege.go b/typedapi/types/enums/clusterprivilege/clusterprivilege.go index d37e0d6c41..8d6e6f1493 100644 --- a/typedapi/types/enums/clusterprivilege/clusterprivilege.go +++ b/typedapi/types/enums/clusterprivilege/clusterprivilege.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package clusterprivilege package clusterprivilege import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Privileges.ts#L42-L195 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L42-L199 type ClusterPrivilege struct { Name string } @@ -117,6 +117,8 @@ var ( Monitorsnapshot = ClusterPrivilege{"monitor_snapshot"} + Monitorstats = ClusterPrivilege{"monitor_stats"} + Monitortextstructure = ClusterPrivilege{"monitor_text_structure"} Monitortransform = ClusterPrivilege{"monitor_transform"} @@ -241,6 +243,8 @@ func (c *ClusterPrivilege) UnmarshalText(text []byte) error { *c = Monitorrollup case "monitor_snapshot": *c = Monitorsnapshot + case "monitor_stats": + *c = Monitorstats case "monitor_text_structure": *c = Monitortextstructure case "monitor_transform": diff --git a/typedapi/types/enums/clustersearchstatus/clustersearchstatus.go b/typedapi/types/enums/clustersearchstatus/clustersearchstatus.go index 3252c5164d..c74f34e27a 100644 --- a/typedapi/types/enums/clustersearchstatus/clustersearchstatus.go +++ b/typedapi/types/enums/clustersearchstatus/clustersearchstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package clustersearchstatus package clustersearchstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L37-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L37-L43 type ClusterSearchStatus struct { Name string } diff --git a/typedapi/types/enums/combinedfieldsoperator/combinedfieldsoperator.go b/typedapi/types/enums/combinedfieldsoperator/combinedfieldsoperator.go index f84cefc052..4b24691aef 100644 --- a/typedapi/types/enums/combinedfieldsoperator/combinedfieldsoperator.go +++ b/typedapi/types/enums/combinedfieldsoperator/combinedfieldsoperator.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package combinedfieldsoperator package combinedfieldsoperator import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/abstractions.ts#L512-L515 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/abstractions.ts#L519-L522 type CombinedFieldsOperator struct { Name string } diff --git a/typedapi/types/enums/combinedfieldszeroterms/combinedfieldszeroterms.go b/typedapi/types/enums/combinedfieldszeroterms/combinedfieldszeroterms.go index ad468f801d..9ede326787 100644 --- a/typedapi/types/enums/combinedfieldszeroterms/combinedfieldszeroterms.go +++ b/typedapi/types/enums/combinedfieldszeroterms/combinedfieldszeroterms.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package combinedfieldszeroterms package combinedfieldszeroterms import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/abstractions.ts#L517-L526 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/abstractions.ts#L524-L533 type CombinedFieldsZeroTerms struct { Name string } diff --git a/typedapi/types/enums/conditionop/conditionop.go b/typedapi/types/enums/conditionop/conditionop.go index 92ce3a93be..a3c9d61e82 100644 --- a/typedapi/types/enums/conditionop/conditionop.go +++ b/typedapi/types/enums/conditionop/conditionop.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package conditionop package conditionop import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Conditions.ts#L41-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Conditions.ts#L41-L48 type ConditionOp struct { Name string } diff --git a/typedapi/types/enums/conditionoperator/conditionoperator.go b/typedapi/types/enums/conditionoperator/conditionoperator.go index c8c70fa24d..b0b06fb468 100644 --- a/typedapi/types/enums/conditionoperator/conditionoperator.go +++ b/typedapi/types/enums/conditionoperator/conditionoperator.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package conditionoperator package conditionoperator import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Rule.ts#L74-L79 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Rule.ts#L74-L79 type ConditionOperator struct { Name string } diff --git a/typedapi/types/enums/conditiontype/conditiontype.go b/typedapi/types/enums/conditiontype/conditiontype.go index bca4de4dce..2c6497e82a 100644 --- a/typedapi/types/enums/conditiontype/conditiontype.go +++ b/typedapi/types/enums/conditiontype/conditiontype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package conditiontype package conditiontype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Conditions.ts#L64-L70 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Conditions.ts#L64-L70 type ConditionType struct { Name string } diff --git a/typedapi/types/enums/conflicts/conflicts.go b/typedapi/types/enums/conflicts/conflicts.go index 27c0295aa5..f313b28e31 100644 --- a/typedapi/types/enums/conflicts/conflicts.go +++ b/typedapi/types/enums/conflicts/conflicts.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package conflicts package conflicts import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L184-L193 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L180-L189 type Conflicts struct { Name string } diff --git a/typedapi/types/enums/connectionscheme/connectionscheme.go b/typedapi/types/enums/connectionscheme/connectionscheme.go index 64405ba037..a975b31104 100644 --- a/typedapi/types/enums/connectionscheme/connectionscheme.go +++ b/typedapi/types/enums/connectionscheme/connectionscheme.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package connectionscheme package connectionscheme import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Input.ts#L39-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Input.ts#L39-L42 type ConnectionScheme struct { Name string } diff --git a/typedapi/types/enums/connectorfieldtype/connectorfieldtype.go b/typedapi/types/enums/connectorfieldtype/connectorfieldtype.go index 19d1ab4af1..38230ba622 100644 --- a/typedapi/types/enums/connectorfieldtype/connectorfieldtype.go +++ b/typedapi/types/enums/connectorfieldtype/connectorfieldtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package connectorfieldtype package connectorfieldtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L43-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L43-L48 type ConnectorFieldType struct { Name string } diff --git a/typedapi/types/enums/connectorstatus/connectorstatus.go b/typedapi/types/enums/connectorstatus/connectorstatus.go index 275d6a731b..3d03a9fa6e 100644 --- a/typedapi/types/enums/connectorstatus/connectorstatus.go +++ b/typedapi/types/enums/connectorstatus/connectorstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package connectorstatus package connectorstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L130-L136 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L130-L136 type ConnectorStatus struct { Name string } diff --git a/typedapi/types/enums/converttype/converttype.go b/typedapi/types/enums/converttype/converttype.go index 9767d303fc..68499c7e3a 100644 --- a/typedapi/types/enums/converttype/converttype.go +++ b/typedapi/types/enums/converttype/converttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package converttype package converttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L621-L630 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L662-L671 type ConvertType struct { Name string } diff --git a/typedapi/types/enums/dataattachmentformat/dataattachmentformat.go b/typedapi/types/enums/dataattachmentformat/dataattachmentformat.go index 6207fafcf7..32758a7409 100644 --- a/typedapi/types/enums/dataattachmentformat/dataattachmentformat.go +++ b/typedapi/types/enums/dataattachmentformat/dataattachmentformat.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package dataattachmentformat package dataattachmentformat import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L187-L190 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L187-L190 type DataAttachmentFormat struct { Name string } diff --git a/typedapi/types/enums/datafeedstate/datafeedstate.go b/typedapi/types/enums/datafeedstate/datafeedstate.go index 5475d3538b..5225de6def 100644 --- a/typedapi/types/enums/datafeedstate/datafeedstate.go +++ b/typedapi/types/enums/datafeedstate/datafeedstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package datafeedstate package datafeedstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Datafeed.ts#L135-L140 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Datafeed.ts#L136-L141 type DatafeedState struct { Name string } diff --git a/typedapi/types/enums/dataframestate/dataframestate.go b/typedapi/types/enums/dataframestate/dataframestate.go index 8d92e1e4fb..456577a947 100644 --- a/typedapi/types/enums/dataframestate/dataframestate.go +++ b/typedapi/types/enums/dataframestate/dataframestate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package dataframestate package dataframestate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Dataframe.ts#L20-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Dataframe.ts#L20-L26 type DataframeState struct { Name string } diff --git a/typedapi/types/enums/day/day.go b/typedapi/types/enums/day/day.go index 078578f7ee..3aa7ce83f7 100644 --- a/typedapi/types/enums/day/day.go +++ b/typedapi/types/enums/day/day.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package day package day import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Schedule.ts#L37-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Schedule.ts#L37-L45 type Day struct { Name string } diff --git a/typedapi/types/enums/decision/decision.go b/typedapi/types/enums/decision/decision.go index 64930c68d4..e436962613 100644 --- a/typedapi/types/enums/decision/decision.go +++ b/typedapi/types/enums/decision/decision.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package decision package decision import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/allocation_explain/types.ts#L92-L101 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/allocation_explain/types.ts#L92-L101 type Decision struct { Name string } diff --git a/typedapi/types/enums/delimitedpayloadencoding/delimitedpayloadencoding.go b/typedapi/types/enums/delimitedpayloadencoding/delimitedpayloadencoding.go index d1e7c3bd07..f7893b1c76 100644 --- a/typedapi/types/enums/delimitedpayloadencoding/delimitedpayloadencoding.go +++ b/typedapi/types/enums/delimitedpayloadencoding/delimitedpayloadencoding.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package delimitedpayloadencoding package delimitedpayloadencoding import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L61-L65 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L61-L65 type DelimitedPayloadEncoding struct { Name string } diff --git a/typedapi/types/enums/densevectorelementtype/densevectorelementtype.go b/typedapi/types/enums/densevectorelementtype/densevectorelementtype.go index 01fc0fe367..d9ef4bdf17 100644 --- a/typedapi/types/enums/densevectorelementtype/densevectorelementtype.go +++ b/typedapi/types/enums/densevectorelementtype/densevectorelementtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package densevectorelementtype package densevectorelementtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/DenseVectorProperty.ts#L64-L80 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/DenseVectorProperty.ts#L64-L80 type DenseVectorElementType struct { Name string } diff --git a/typedapi/types/enums/densevectorindexoptionstype/densevectorindexoptionstype.go b/typedapi/types/enums/densevectorindexoptionstype/densevectorindexoptionstype.go index d5e5bfe618..654b4d01a3 100644 --- a/typedapi/types/enums/densevectorindexoptionstype/densevectorindexoptionstype.go +++ b/typedapi/types/enums/densevectorindexoptionstype/densevectorindexoptionstype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package densevectorindexoptionstype package densevectorindexoptionstype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/DenseVectorProperty.ts#L164-L197 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/DenseVectorProperty.ts#L164-L197 type DenseVectorIndexOptionsType struct { Name string } diff --git a/typedapi/types/enums/densevectorsimilarity/densevectorsimilarity.go b/typedapi/types/enums/densevectorsimilarity/densevectorsimilarity.go index 9fb6846528..ae60e04561 100644 --- a/typedapi/types/enums/densevectorsimilarity/densevectorsimilarity.go +++ b/typedapi/types/enums/densevectorsimilarity/densevectorsimilarity.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package densevectorsimilarity package densevectorsimilarity import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/DenseVectorProperty.ts#L82-L127 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/DenseVectorProperty.ts#L82-L127 type DenseVectorSimilarity struct { Name string } diff --git a/typedapi/types/enums/deploymentallocationstate/deploymentallocationstate.go b/typedapi/types/enums/deploymentallocationstate/deploymentallocationstate.go index ccc147649f..222029dce3 100644 --- a/typedapi/types/enums/deploymentallocationstate/deploymentallocationstate.go +++ b/typedapi/types/enums/deploymentallocationstate/deploymentallocationstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package deploymentallocationstate package deploymentallocationstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L273-L286 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L330-L343 type DeploymentAllocationState struct { Name string } diff --git a/typedapi/types/enums/deploymentassignmentstate/deploymentassignmentstate.go b/typedapi/types/enums/deploymentassignmentstate/deploymentassignmentstate.go index bbb2f8fb30..7d282c0d0b 100644 --- a/typedapi/types/enums/deploymentassignmentstate/deploymentassignmentstate.go +++ b/typedapi/types/enums/deploymentassignmentstate/deploymentassignmentstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package deploymentassignmentstate package deploymentassignmentstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L288-L305 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L345-L362 type DeploymentAssignmentState struct { Name string } diff --git a/typedapi/types/enums/deprecationlevel/deprecationlevel.go b/typedapi/types/enums/deprecationlevel/deprecationlevel.go index b23ab42869..d21b70fe31 100644 --- a/typedapi/types/enums/deprecationlevel/deprecationlevel.go +++ b/typedapi/types/enums/deprecationlevel/deprecationlevel.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package deprecationlevel package deprecationlevel import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/migration/deprecations/types.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/migration/deprecations/types.ts#L23-L30 type DeprecationLevel struct { Name string } diff --git a/typedapi/types/enums/dfiindependencemeasure/dfiindependencemeasure.go b/typedapi/types/enums/dfiindependencemeasure/dfiindependencemeasure.go index 1f4b7853c0..381c80382c 100644 --- a/typedapi/types/enums/dfiindependencemeasure/dfiindependencemeasure.go +++ b/typedapi/types/enums/dfiindependencemeasure/dfiindependencemeasure.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package dfiindependencemeasure package dfiindependencemeasure import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Similarity.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Similarity.ts#L20-L24 type DFIIndependenceMeasure struct { Name string } diff --git a/typedapi/types/enums/dfraftereffect/dfraftereffect.go b/typedapi/types/enums/dfraftereffect/dfraftereffect.go index 25d526bffa..dff831d4e1 100644 --- a/typedapi/types/enums/dfraftereffect/dfraftereffect.go +++ b/typedapi/types/enums/dfraftereffect/dfraftereffect.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package dfraftereffect package dfraftereffect import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Similarity.ts#L26-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Similarity.ts#L26-L30 type DFRAfterEffect struct { Name string } diff --git a/typedapi/types/enums/dfrbasicmodel/dfrbasicmodel.go b/typedapi/types/enums/dfrbasicmodel/dfrbasicmodel.go index c35e25b9fa..3d23f2b1dd 100644 --- a/typedapi/types/enums/dfrbasicmodel/dfrbasicmodel.go +++ b/typedapi/types/enums/dfrbasicmodel/dfrbasicmodel.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package dfrbasicmodel package dfrbasicmodel import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Similarity.ts#L32-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Similarity.ts#L32-L40 type DFRBasicModel struct { Name string } diff --git a/typedapi/types/enums/displaytype/displaytype.go b/typedapi/types/enums/displaytype/displaytype.go index 38057755bf..ddc01a8150 100644 --- a/typedapi/types/enums/displaytype/displaytype.go +++ b/typedapi/types/enums/displaytype/displaytype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package displaytype package displaytype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L35-L41 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L35-L41 type DisplayType struct { Name string } diff --git a/typedapi/types/enums/distanceunit/distanceunit.go b/typedapi/types/enums/distanceunit/distanceunit.go index 893acdd858..da077e20c4 100644 --- a/typedapi/types/enums/distanceunit/distanceunit.go +++ b/typedapi/types/enums/distanceunit/distanceunit.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package distanceunit package distanceunit import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Geo.ts#L30-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Geo.ts#L30-L40 type DistanceUnit struct { Name string } diff --git a/typedapi/types/enums/dynamicmapping/dynamicmapping.go b/typedapi/types/enums/dynamicmapping/dynamicmapping.go index 3046cf245a..adf28e2aa8 100644 --- a/typedapi/types/enums/dynamicmapping/dynamicmapping.go +++ b/typedapi/types/enums/dynamicmapping/dynamicmapping.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package dynamicmapping package dynamicmapping import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/dynamic-template.ts#L49-L58 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/dynamic-template.ts#L50-L59 type DynamicMapping struct { Name string } diff --git a/typedapi/types/enums/ecscompatibilitytype/ecscompatibilitytype.go b/typedapi/types/enums/ecscompatibilitytype/ecscompatibilitytype.go new file mode 100644 index 0000000000..e3df2806d4 --- /dev/null +++ b/typedapi/types/enums/ecscompatibilitytype/ecscompatibilitytype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Package ecscompatibilitytype +package ecscompatibilitytype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/text_structure/_types/Structure.ts#L40-L43 +type EcsCompatibilityType struct { + Name string +} + +var ( + Disabled = EcsCompatibilityType{"disabled"} + + V1 = EcsCompatibilityType{"v1"} +) + +func (e EcsCompatibilityType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *EcsCompatibilityType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "disabled": + *e = Disabled + case "v1": + *e = V1 + default: + *e = EcsCompatibilityType{string(text)} + } + + return nil +} + +func (e EcsCompatibilityType) String() string { + return e.Name +} diff --git a/typedapi/types/enums/edgengramside/edgengramside.go b/typedapi/types/enums/edgengramside/edgengramside.go index c4d10c2bfa..31256f56bb 100644 --- a/typedapi/types/enums/edgengramside/edgengramside.go +++ b/typedapi/types/enums/edgengramside/edgengramside.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package edgengramside package edgengramside import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L73-L76 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L73-L76 type EdgeNGramSide struct { Name string } diff --git a/typedapi/types/enums/emailpriority/emailpriority.go b/typedapi/types/enums/emailpriority/emailpriority.go index 3b78cf3ce4..5495577b0a 100644 --- a/typedapi/types/enums/emailpriority/emailpriority.go +++ b/typedapi/types/enums/emailpriority/emailpriority.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package emailpriority package emailpriority import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L197-L203 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L197-L203 type EmailPriority struct { Name string } diff --git a/typedapi/types/enums/enrichpolicyphase/enrichpolicyphase.go b/typedapi/types/enums/enrichpolicyphase/enrichpolicyphase.go index 39970c5ace..f4d673ea91 100644 --- a/typedapi/types/enums/enrichpolicyphase/enrichpolicyphase.go +++ b/typedapi/types/enums/enrichpolicyphase/enrichpolicyphase.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package enrichpolicyphase package enrichpolicyphase import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/enrich/execute_policy/types.ts#L24-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/enrich/execute_policy/types.ts#L24-L29 type EnrichPolicyPhase struct { Name string } @@ -45,13 +45,13 @@ func (e EnrichPolicyPhase) MarshalText() (text []byte, err error) { func (e *EnrichPolicyPhase) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "SCHEDULED": + case "scheduled": *e = SCHEDULED - case "RUNNING": + case "running": *e = RUNNING - case "COMPLETE": + case "complete": *e = COMPLETE - case "FAILED": + case "failed": *e = FAILED default: *e = EnrichPolicyPhase{string(text)} diff --git a/typedapi/types/enums/esqlformat/esqlformat.go b/typedapi/types/enums/esqlformat/esqlformat.go index fa5f0a9656..a6b2ef9317 100644 --- a/typedapi/types/enums/esqlformat/esqlformat.go +++ b/typedapi/types/enums/esqlformat/esqlformat.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package esqlformat package esqlformat import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/esql/query/QueryRequest.ts#L91-L100 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/esql/_types/QueryParameters.ts#L20-L29 type EsqlFormat struct { Name string } diff --git a/typedapi/types/enums/eventtype/eventtype.go b/typedapi/types/enums/eventtype/eventtype.go new file mode 100644 index 0000000000..b8bee0c69e --- /dev/null +++ b/typedapi/types/enums/eventtype/eventtype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Package eventtype +package eventtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/_types/AnalyticsEvent.ts#L22-L26 +type EventType struct { + Name string +} + +var ( + PageView = EventType{"page_view"} + + Search = EventType{"search"} + + SearchClick = EventType{"search_click"} +) + +func (e EventType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *EventType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "page_view": + *e = PageView + case "search": + *e = Search + case "search_click": + *e = SearchClick + default: + *e = EventType{string(text)} + } + + return nil +} + +func (e EventType) String() string { + return e.Name +} diff --git a/typedapi/types/enums/excludefrequent/excludefrequent.go b/typedapi/types/enums/excludefrequent/excludefrequent.go index 2a2a2c2bee..057e043ef7 100644 --- a/typedapi/types/enums/excludefrequent/excludefrequent.go +++ b/typedapi/types/enums/excludefrequent/excludefrequent.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package excludefrequent package excludefrequent import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Detector.ts#L127-L132 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Detector.ts#L145-L150 type ExcludeFrequent struct { Name string } diff --git a/typedapi/types/enums/executionphase/executionphase.go b/typedapi/types/enums/executionphase/executionphase.go index bc84a8cee7..484add3451 100644 --- a/typedapi/types/enums/executionphase/executionphase.go +++ b/typedapi/types/enums/executionphase/executionphase.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package executionphase package executionphase import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Execution.ts#L49-L58 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Execution.ts#L49-L58 type ExecutionPhase struct { Name string } diff --git a/typedapi/types/enums/executionstatus/executionstatus.go b/typedapi/types/enums/executionstatus/executionstatus.go index c72442472a..51153a01bc 100644 --- a/typedapi/types/enums/executionstatus/executionstatus.go +++ b/typedapi/types/enums/executionstatus/executionstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package executionstatus package executionstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Execution.ts#L38-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Execution.ts#L38-L47 type ExecutionStatus struct { Name string } diff --git a/typedapi/types/enums/expandwildcard/expandwildcard.go b/typedapi/types/enums/expandwildcard/expandwildcard.go index 183dd545c4..6386b17bb7 100644 --- a/typedapi/types/enums/expandwildcard/expandwildcard.go +++ b/typedapi/types/enums/expandwildcard/expandwildcard.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package expandwildcard package expandwildcard import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L202-L216 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L198-L212 type ExpandWildcard struct { Name string } diff --git a/typedapi/types/enums/failurestorestatus/failurestorestatus.go b/typedapi/types/enums/failurestorestatus/failurestorestatus.go new file mode 100644 index 0000000000..920b9b148b --- /dev/null +++ b/typedapi/types/enums/failurestorestatus/failurestorestatus.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Package failurestorestatus +package failurestorestatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/bulk/types.ts#L86-L91 +type FailureStoreStatus struct { + Name string +} + +var ( + Notapplicableorunknown = FailureStoreStatus{"not_applicable_or_unknown"} + + Used = FailureStoreStatus{"used"} + + Notenabled = FailureStoreStatus{"not_enabled"} + + Failed = FailureStoreStatus{"failed"} +) + +func (f FailureStoreStatus) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FailureStoreStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "not_applicable_or_unknown": + *f = Notapplicableorunknown + case "used": + *f = Used + case "not_enabled": + *f = Notenabled + case "failed": + *f = Failed + default: + *f = FailureStoreStatus{string(text)} + } + + return nil +} + +func (f FailureStoreStatus) String() string { + return f.Name +} diff --git a/typedapi/types/enums/feature/feature.go b/typedapi/types/enums/feature/feature.go index 89ae237278..c69e629c86 100644 --- a/typedapi/types/enums/feature/feature.go +++ b/typedapi/types/enums/feature/feature.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package feature package feature import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/get/IndicesGetRequest.ts#L91-L95 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get/IndicesGetRequest.ts#L98-L102 type Feature struct { Name string } diff --git a/typedapi/types/enums/fieldsortnumerictype/fieldsortnumerictype.go b/typedapi/types/enums/fieldsortnumerictype/fieldsortnumerictype.go index 9d7cb43c14..4b0de4fee7 100644 --- a/typedapi/types/enums/fieldsortnumerictype/fieldsortnumerictype.go +++ b/typedapi/types/enums/fieldsortnumerictype/fieldsortnumerictype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package fieldsortnumerictype package fieldsortnumerictype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/sort.ts#L36-L41 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/sort.ts#L36-L41 type FieldSortNumericType struct { Name string } diff --git a/typedapi/types/enums/fieldtype/fieldtype.go b/typedapi/types/enums/fieldtype/fieldtype.go index 9020c3d649..396cc5d07f 100644 --- a/typedapi/types/enums/fieldtype/fieldtype.go +++ b/typedapi/types/enums/fieldtype/fieldtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package fieldtype package fieldtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/Property.ts#L166-L213 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/Property.ts#L191-L240 type FieldType struct { Name string } @@ -57,6 +57,8 @@ var ( Object = FieldType{"object"} + Passthrough = FieldType{"passthrough"} + Version = FieldType{"version"} Murmur3 = FieldType{"murmur3"} @@ -109,6 +111,8 @@ var ( Constantkeyword = FieldType{"constant_keyword"} + Countedkeyword = FieldType{"counted_keyword"} + Aggregatemetricdouble = FieldType{"aggregate_metric_double"} Densevector = FieldType{"dense_vector"} @@ -157,6 +161,8 @@ func (f *FieldType) UnmarshalText(text []byte) error { *f = Nested case "object": *f = Object + case "passthrough": + *f = Passthrough case "version": *f = Version case "murmur3": @@ -209,6 +215,8 @@ func (f *FieldType) UnmarshalText(text []byte) error { *f = Histogram case "constant_keyword": *f = Constantkeyword + case "counted_keyword": + *f = Countedkeyword case "aggregate_metric_double": *f = Aggregatemetricdouble case "dense_vector": diff --git a/typedapi/types/enums/fieldvaluefactormodifier/fieldvaluefactormodifier.go b/typedapi/types/enums/fieldvaluefactormodifier/fieldvaluefactormodifier.go index 20cef66747..fa69ef042a 100644 --- a/typedapi/types/enums/fieldvaluefactormodifier/fieldvaluefactormodifier.go +++ b/typedapi/types/enums/fieldvaluefactormodifier/fieldvaluefactormodifier.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package fieldvaluefactormodifier package fieldvaluefactormodifier import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L323-L366 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L323-L366 type FieldValueFactorModifier struct { Name string } diff --git a/typedapi/types/enums/filteringpolicy/filteringpolicy.go b/typedapi/types/enums/filteringpolicy/filteringpolicy.go index 9046b2e3ca..e5f3cdd81e 100644 --- a/typedapi/types/enums/filteringpolicy/filteringpolicy.go +++ b/typedapi/types/enums/filteringpolicy/filteringpolicy.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package filteringpolicy package filteringpolicy import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L155-L158 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L155-L158 type FilteringPolicy struct { Name string } diff --git a/typedapi/types/enums/filteringrulerule/filteringrulerule.go b/typedapi/types/enums/filteringrulerule/filteringrulerule.go index 229ce2243a..6f2097769f 100644 --- a/typedapi/types/enums/filteringrulerule/filteringrulerule.go +++ b/typedapi/types/enums/filteringrulerule/filteringrulerule.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package filteringrulerule package filteringrulerule import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L160-L168 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L160-L168 type FilteringRuleRule struct { Name string } diff --git a/typedapi/types/enums/filteringvalidationstate/filteringvalidationstate.go b/typedapi/types/enums/filteringvalidationstate/filteringvalidationstate.go index fa8a00dff1..29f530b004 100644 --- a/typedapi/types/enums/filteringvalidationstate/filteringvalidationstate.go +++ b/typedapi/types/enums/filteringvalidationstate/filteringvalidationstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package filteringvalidationstate package filteringvalidationstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L186-L190 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L186-L190 type FilteringValidationState struct { Name string } diff --git a/typedapi/types/enums/filtertype/filtertype.go b/typedapi/types/enums/filtertype/filtertype.go index 23881f397d..0004f75b2a 100644 --- a/typedapi/types/enums/filtertype/filtertype.go +++ b/typedapi/types/enums/filtertype/filtertype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package filtertype package filtertype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Filter.ts#L43-L46 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Filter.ts#L43-L46 type FilterType struct { Name string } diff --git a/typedapi/types/enums/fingerprintdigest/fingerprintdigest.go b/typedapi/types/enums/fingerprintdigest/fingerprintdigest.go index dcca257726..d48160840d 100644 --- a/typedapi/types/enums/fingerprintdigest/fingerprintdigest.go +++ b/typedapi/types/enums/fingerprintdigest/fingerprintdigest.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package fingerprintdigest package fingerprintdigest import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L856-L862 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L897-L903 type FingerprintDigest struct { Name string } @@ -47,15 +47,15 @@ func (f FingerprintDigest) MarshalText() (text []byte, err error) { func (f *FingerprintDigest) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "MD5": + case "md5": *f = Md5 - case "SHA-1": + case "sha-1": *f = Sha1 - case "SHA-256": + case "sha-256": *f = Sha256 - case "SHA-512": + case "sha-512": *f = Sha512 - case "MurmurHash3": + case "murmurhash3": *f = MurmurHash3 default: *f = FingerprintDigest{string(text)} diff --git a/typedapi/types/enums/followerindexstatus/followerindexstatus.go b/typedapi/types/enums/followerindexstatus/followerindexstatus.go index b8285b76ae..8d8a76a56c 100644 --- a/typedapi/types/enums/followerindexstatus/followerindexstatus.go +++ b/typedapi/types/enums/followerindexstatus/followerindexstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package followerindexstatus package followerindexstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/follow_info/types.ts#L32-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/follow_info/types.ts#L37-L40 type FollowerIndexStatus struct { Name string } diff --git a/typedapi/types/enums/formattype/formattype.go b/typedapi/types/enums/formattype/formattype.go new file mode 100644 index 0000000000..300f2e36f0 --- /dev/null +++ b/typedapi/types/enums/formattype/formattype.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Package formattype +package formattype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/text_structure/_types/Structure.ts#L45-L50 +type FormatType struct { + Name string +} + +var ( + Delimited = FormatType{"delimited"} + + Ndjson = FormatType{"ndjson"} + + Semistructuredtext = FormatType{"semi_structured_text"} + + Xml = FormatType{"xml"} +) + +func (f FormatType) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FormatType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "delimited": + *f = Delimited + case "ndjson": + *f = Ndjson + case "semi_structured_text": + *f = Semistructuredtext + case "xml": + *f = Xml + default: + *f = FormatType{string(text)} + } + + return nil +} + +func (f FormatType) String() string { + return f.Name +} diff --git a/typedapi/types/enums/functionboostmode/functionboostmode.go b/typedapi/types/enums/functionboostmode/functionboostmode.go index d445b6787f..24809bafa3 100644 --- a/typedapi/types/enums/functionboostmode/functionboostmode.go +++ b/typedapi/types/enums/functionboostmode/functionboostmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package functionboostmode package functionboostmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L295-L321 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L295-L321 type FunctionBoostMode struct { Name string } diff --git a/typedapi/types/enums/functionscoremode/functionscoremode.go b/typedapi/types/enums/functionscoremode/functionscoremode.go index d9474531d8..b8825158dd 100644 --- a/typedapi/types/enums/functionscoremode/functionscoremode.go +++ b/typedapi/types/enums/functionscoremode/functionscoremode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package functionscoremode package functionscoremode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L268-L293 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L268-L293 type FunctionScoreMode struct { Name string } diff --git a/typedapi/types/enums/gappolicy/gappolicy.go b/typedapi/types/enums/gappolicy/gappolicy.go index 78918451fe..087bf227dc 100644 --- a/typedapi/types/enums/gappolicy/gappolicy.go +++ b/typedapi/types/enums/gappolicy/gappolicy.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package gappolicy package gappolicy import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L61-L76 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L61-L76 type GapPolicy struct { Name string } diff --git a/typedapi/types/enums/geodistancetype/geodistancetype.go b/typedapi/types/enums/geodistancetype/geodistancetype.go index a300951944..28eeb956ae 100644 --- a/typedapi/types/enums/geodistancetype/geodistancetype.go +++ b/typedapi/types/enums/geodistancetype/geodistancetype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package geodistancetype package geodistancetype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Geo.ts#L42-L51 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Geo.ts#L42-L51 type GeoDistanceType struct { Name string } diff --git a/typedapi/types/enums/geoexecution/geoexecution.go b/typedapi/types/enums/geoexecution/geoexecution.go index 4485cca2ac..a585c00a85 100644 --- a/typedapi/types/enums/geoexecution/geoexecution.go +++ b/typedapi/types/enums/geoexecution/geoexecution.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package geoexecution package geoexecution import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/geo.ts#L56-L59 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/geo.ts#L59-L62 type GeoExecution struct { Name string } diff --git a/typedapi/types/enums/geogridtargetformat/geogridtargetformat.go b/typedapi/types/enums/geogridtargetformat/geogridtargetformat.go index 4919a5f625..f636f106e4 100644 --- a/typedapi/types/enums/geogridtargetformat/geogridtargetformat.go +++ b/typedapi/types/enums/geogridtargetformat/geogridtargetformat.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package geogridtargetformat package geogridtargetformat import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L433-L436 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L438-L441 type GeoGridTargetFormat struct { Name string } diff --git a/typedapi/types/enums/geogridtiletype/geogridtiletype.go b/typedapi/types/enums/geogridtiletype/geogridtiletype.go index e823bfc0c3..96bd7da46e 100644 --- a/typedapi/types/enums/geogridtiletype/geogridtiletype.go +++ b/typedapi/types/enums/geogridtiletype/geogridtiletype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package geogridtiletype package geogridtiletype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L427-L431 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L432-L436 type GeoGridTileType struct { Name string } diff --git a/typedapi/types/enums/geoorientation/geoorientation.go b/typedapi/types/enums/geoorientation/geoorientation.go index 8474cc26e1..0eb30fe406 100644 --- a/typedapi/types/enums/geoorientation/geoorientation.go +++ b/typedapi/types/enums/geoorientation/geoorientation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package geoorientation package geoorientation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/geo.ts#L34-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/geo.ts#L34-L39 type GeoOrientation struct { Name string } diff --git a/typedapi/types/enums/geoshaperelation/geoshaperelation.go b/typedapi/types/enums/geoshaperelation/geoshaperelation.go index 30bc01f74b..d9dbb469e3 100644 --- a/typedapi/types/enums/geoshaperelation/geoshaperelation.go +++ b/typedapi/types/enums/geoshaperelation/geoshaperelation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package geoshaperelation package geoshaperelation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Geo.ts#L64-L82 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Geo.ts#L64-L82 type GeoShapeRelation struct { Name string } diff --git a/typedapi/types/enums/geostrategy/geostrategy.go b/typedapi/types/enums/geostrategy/geostrategy.go index 504bcb0a54..1906f1bb96 100644 --- a/typedapi/types/enums/geostrategy/geostrategy.go +++ b/typedapi/types/enums/geostrategy/geostrategy.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package geostrategy package geostrategy import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/geo.ts#L56-L59 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/geo.ts#L56-L59 type GeoStrategy struct { Name string } diff --git a/typedapi/types/enums/geovalidationmethod/geovalidationmethod.go b/typedapi/types/enums/geovalidationmethod/geovalidationmethod.go index b8a68e9774..74b76e2680 100644 --- a/typedapi/types/enums/geovalidationmethod/geovalidationmethod.go +++ b/typedapi/types/enums/geovalidationmethod/geovalidationmethod.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package geovalidationmethod package geovalidationmethod import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/geo.ts#L163-L173 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/geo.ts#L173-L183 type GeoValidationMethod struct { Name string } diff --git a/typedapi/types/enums/granttype/granttype.go b/typedapi/types/enums/granttype/granttype.go index e8042429b1..258a6fc4a9 100644 --- a/typedapi/types/enums/granttype/granttype.go +++ b/typedapi/types/enums/granttype/granttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package granttype package granttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/GrantType.ts#L20-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/GrantType.ts#L20-L30 type GrantType struct { Name string } diff --git a/typedapi/types/enums/gridaggregationtype/gridaggregationtype.go b/typedapi/types/enums/gridaggregationtype/gridaggregationtype.go index 01d47b5522..84d712c6d5 100644 --- a/typedapi/types/enums/gridaggregationtype/gridaggregationtype.go +++ b/typedapi/types/enums/gridaggregationtype/gridaggregationtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package gridaggregationtype package gridaggregationtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search_mvt/_types/GridType.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search_mvt/_types/GridType.ts#L30-L33 type GridAggregationType struct { Name string } diff --git a/typedapi/types/enums/gridtype/gridtype.go b/typedapi/types/enums/gridtype/gridtype.go index 958a129def..f10ddc6bab 100644 --- a/typedapi/types/enums/gridtype/gridtype.go +++ b/typedapi/types/enums/gridtype/gridtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package gridtype package gridtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search_mvt/_types/GridType.ts#L20-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search_mvt/_types/GridType.ts#L20-L28 type GridType struct { Name string } diff --git a/typedapi/types/enums/groupby/groupby.go b/typedapi/types/enums/groupby/groupby.go index 89d720c23c..04b984e76a 100644 --- a/typedapi/types/enums/groupby/groupby.go +++ b/typedapi/types/enums/groupby/groupby.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package groupby package groupby import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/tasks/_types/GroupBy.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/tasks/_types/GroupBy.ts#L20-L27 type GroupBy struct { Name string } diff --git a/typedapi/types/enums/healthstatus/healthstatus.go b/typedapi/types/enums/healthstatus/healthstatus.go index 998e9a5c78..570d959670 100644 --- a/typedapi/types/enums/healthstatus/healthstatus.go +++ b/typedapi/types/enums/healthstatus/healthstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package healthstatus package healthstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L220-L240 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L216-L236 type HealthStatus struct { Name string } diff --git a/typedapi/types/enums/highlighterencoder/highlighterencoder.go b/typedapi/types/enums/highlighterencoder/highlighterencoder.go index 339a4c498e..bcb99e6a03 100644 --- a/typedapi/types/enums/highlighterencoder/highlighterencoder.go +++ b/typedapi/types/enums/highlighterencoder/highlighterencoder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package highlighterencoder package highlighterencoder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/highlighting.ts#L157-L160 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/highlighting.ts#L157-L160 type HighlighterEncoder struct { Name string } diff --git a/typedapi/types/enums/highlighterfragmenter/highlighterfragmenter.go b/typedapi/types/enums/highlighterfragmenter/highlighterfragmenter.go index 71fe36b331..dff732b1b5 100644 --- a/typedapi/types/enums/highlighterfragmenter/highlighterfragmenter.go +++ b/typedapi/types/enums/highlighterfragmenter/highlighterfragmenter.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package highlighterfragmenter package highlighterfragmenter import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/highlighting.ts#L162-L165 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/highlighting.ts#L162-L165 type HighlighterFragmenter struct { Name string } diff --git a/typedapi/types/enums/highlighterorder/highlighterorder.go b/typedapi/types/enums/highlighterorder/highlighterorder.go index 2c1ef63397..e154951a3e 100644 --- a/typedapi/types/enums/highlighterorder/highlighterorder.go +++ b/typedapi/types/enums/highlighterorder/highlighterorder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package highlighterorder package highlighterorder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/highlighting.ts#L167-L169 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/highlighting.ts#L167-L169 type HighlighterOrder struct { Name string } diff --git a/typedapi/types/enums/highlightertagsschema/highlightertagsschema.go b/typedapi/types/enums/highlightertagsschema/highlightertagsschema.go index 6456cf526a..5d502080da 100644 --- a/typedapi/types/enums/highlightertagsschema/highlightertagsschema.go +++ b/typedapi/types/enums/highlightertagsschema/highlightertagsschema.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package highlightertagsschema package highlightertagsschema import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/highlighting.ts#L171-L173 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/highlighting.ts#L171-L173 type HighlighterTagsSchema struct { Name string } diff --git a/typedapi/types/enums/highlightertype/highlightertype.go b/typedapi/types/enums/highlightertype/highlightertype.go index a86fe33965..a304fa8762 100644 --- a/typedapi/types/enums/highlightertype/highlightertype.go +++ b/typedapi/types/enums/highlightertype/highlightertype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package highlightertype package highlightertype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/highlighting.ts#L175-L190 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/highlighting.ts#L175-L190 type HighlighterType struct { Name string } diff --git a/typedapi/types/enums/holtwinterstype/holtwinterstype.go b/typedapi/types/enums/holtwinterstype/holtwinterstype.go index 4b94f768d6..a379fb06bc 100644 --- a/typedapi/types/enums/holtwinterstype/holtwinterstype.go +++ b/typedapi/types/enums/holtwinterstype/holtwinterstype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package holtwinterstype package holtwinterstype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L309-L312 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L309-L312 type HoltWintersType struct { Name string } diff --git a/typedapi/types/enums/httpinputmethod/httpinputmethod.go b/typedapi/types/enums/httpinputmethod/httpinputmethod.go index 2ea712cb85..a7c841ce6c 100644 --- a/typedapi/types/enums/httpinputmethod/httpinputmethod.go +++ b/typedapi/types/enums/httpinputmethod/httpinputmethod.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package httpinputmethod package httpinputmethod import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Input.ts#L59-L65 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Input.ts#L59-L65 type HttpInputMethod struct { Name string } diff --git a/typedapi/types/enums/ibdistribution/ibdistribution.go b/typedapi/types/enums/ibdistribution/ibdistribution.go index 706e09ce64..0f72ab0a1c 100644 --- a/typedapi/types/enums/ibdistribution/ibdistribution.go +++ b/typedapi/types/enums/ibdistribution/ibdistribution.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package ibdistribution package ibdistribution import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Similarity.ts#L42-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Similarity.ts#L42-L45 type IBDistribution struct { Name string } diff --git a/typedapi/types/enums/iblambda/iblambda.go b/typedapi/types/enums/iblambda/iblambda.go index d74c0639ac..b30e42d932 100644 --- a/typedapi/types/enums/iblambda/iblambda.go +++ b/typedapi/types/enums/iblambda/iblambda.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package iblambda package iblambda import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Similarity.ts#L47-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Similarity.ts#L47-L50 type IBLambda struct { Name string } diff --git a/typedapi/types/enums/icucollationalternate/icucollationalternate.go b/typedapi/types/enums/icucollationalternate/icucollationalternate.go index 6967fcf7a3..c72d8b73b3 100644 --- a/typedapi/types/enums/icucollationalternate/icucollationalternate.go +++ b/typedapi/types/enums/icucollationalternate/icucollationalternate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package icucollationalternate package icucollationalternate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/icu-plugin.ts#L89-L92 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/icu-plugin.ts#L89-L92 type IcuCollationAlternate struct { Name string } diff --git a/typedapi/types/enums/icucollationcasefirst/icucollationcasefirst.go b/typedapi/types/enums/icucollationcasefirst/icucollationcasefirst.go index 73c670a626..9fc7fe7fbe 100644 --- a/typedapi/types/enums/icucollationcasefirst/icucollationcasefirst.go +++ b/typedapi/types/enums/icucollationcasefirst/icucollationcasefirst.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package icucollationcasefirst package icucollationcasefirst import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/icu-plugin.ts#L94-L97 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/icu-plugin.ts#L94-L97 type IcuCollationCaseFirst struct { Name string } diff --git a/typedapi/types/enums/icucollationdecomposition/icucollationdecomposition.go b/typedapi/types/enums/icucollationdecomposition/icucollationdecomposition.go index c1bcae7ac6..7e2d0d7869 100644 --- a/typedapi/types/enums/icucollationdecomposition/icucollationdecomposition.go +++ b/typedapi/types/enums/icucollationdecomposition/icucollationdecomposition.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package icucollationdecomposition package icucollationdecomposition import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/icu-plugin.ts#L99-L102 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/icu-plugin.ts#L99-L102 type IcuCollationDecomposition struct { Name string } diff --git a/typedapi/types/enums/icucollationstrength/icucollationstrength.go b/typedapi/types/enums/icucollationstrength/icucollationstrength.go index d920b18752..f6f7f9410b 100644 --- a/typedapi/types/enums/icucollationstrength/icucollationstrength.go +++ b/typedapi/types/enums/icucollationstrength/icucollationstrength.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package icucollationstrength package icucollationstrength import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/icu-plugin.ts#L104-L110 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/icu-plugin.ts#L104-L110 type IcuCollationStrength struct { Name string } diff --git a/typedapi/types/enums/icunormalizationmode/icunormalizationmode.go b/typedapi/types/enums/icunormalizationmode/icunormalizationmode.go index 2a75acf485..e73f5f9b18 100644 --- a/typedapi/types/enums/icunormalizationmode/icunormalizationmode.go +++ b/typedapi/types/enums/icunormalizationmode/icunormalizationmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package icunormalizationmode package icunormalizationmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/icu-plugin.ts#L78-L81 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/icu-plugin.ts#L78-L81 type IcuNormalizationMode struct { Name string } diff --git a/typedapi/types/enums/icunormalizationtype/icunormalizationtype.go b/typedapi/types/enums/icunormalizationtype/icunormalizationtype.go index 889fe1a0ad..7b2fe1db6e 100644 --- a/typedapi/types/enums/icunormalizationtype/icunormalizationtype.go +++ b/typedapi/types/enums/icunormalizationtype/icunormalizationtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package icunormalizationtype package icunormalizationtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/icu-plugin.ts#L83-L87 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/icu-plugin.ts#L83-L87 type IcuNormalizationType struct { Name string } diff --git a/typedapi/types/enums/icutransformdirection/icutransformdirection.go b/typedapi/types/enums/icutransformdirection/icutransformdirection.go index 548dfa0cc1..ef1b9130c4 100644 --- a/typedapi/types/enums/icutransformdirection/icutransformdirection.go +++ b/typedapi/types/enums/icutransformdirection/icutransformdirection.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package icutransformdirection package icutransformdirection import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/icu-plugin.ts#L73-L76 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/icu-plugin.ts#L73-L76 type IcuTransformDirection struct { Name string } diff --git a/typedapi/types/enums/impactarea/impactarea.go b/typedapi/types/enums/impactarea/impactarea.go index 413d75c1b5..e16eadad9e 100644 --- a/typedapi/types/enums/impactarea/impactarea.go +++ b/typedapi/types/enums/impactarea/impactarea.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package impactarea package impactarea import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L73-L78 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L74-L79 type ImpactArea struct { Name string } diff --git a/typedapi/types/enums/include/include.go b/typedapi/types/enums/include/include.go index f9d652cc30..57b45ec618 100644 --- a/typedapi/types/enums/include/include.go +++ b/typedapi/types/enums/include/include.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package include package include import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Include.ts#L20-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Include.ts#L20-L47 type Include struct { Name string } diff --git a/typedapi/types/enums/indexcheckonstartup/indexcheckonstartup.go b/typedapi/types/enums/indexcheckonstartup/indexcheckonstartup.go index 2116bc82a9..4007014ff5 100644 --- a/typedapi/types/enums/indexcheckonstartup/indexcheckonstartup.go +++ b/typedapi/types/enums/indexcheckonstartup/indexcheckonstartup.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package indexcheckonstartup package indexcheckonstartup import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L262-L269 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L270-L277 type IndexCheckOnStartup struct { Name string } diff --git a/typedapi/types/enums/indexingjobstate/indexingjobstate.go b/typedapi/types/enums/indexingjobstate/indexingjobstate.go index 04ae35cd34..2f94a7c210 100644 --- a/typedapi/types/enums/indexingjobstate/indexingjobstate.go +++ b/typedapi/types/enums/indexingjobstate/indexingjobstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package indexingjobstate package indexingjobstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/get_jobs/types.ts#L66-L72 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/get_jobs/types.ts#L77-L83 type IndexingJobState struct { Name string } diff --git a/typedapi/types/enums/indexmetadatastate/indexmetadatastate.go b/typedapi/types/enums/indexmetadatastate/indexmetadatastate.go index f1d798c2e0..0e16b67b10 100644 --- a/typedapi/types/enums/indexmetadatastate/indexmetadatastate.go +++ b/typedapi/types/enums/indexmetadatastate/indexmetadatastate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package indexmetadatastate package indexmetadatastate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/stats/types.ts#L225-L232 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/stats/types.ts#L225-L232 type IndexMetadataState struct { Name string } diff --git a/typedapi/types/enums/indexoptions/indexoptions.go b/typedapi/types/enums/indexoptions/indexoptions.go index 4dd01fc4db..eef7589e3b 100644 --- a/typedapi/types/enums/indexoptions/indexoptions.go +++ b/typedapi/types/enums/indexoptions/indexoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package indexoptions package indexoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L258-L263 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L274-L279 type IndexOptions struct { Name string } diff --git a/typedapi/types/enums/indexprivilege/indexprivilege.go b/typedapi/types/enums/indexprivilege/indexprivilege.go index 16cdc63bc3..6885b3900f 100644 --- a/typedapi/types/enums/indexprivilege/indexprivilege.go +++ b/typedapi/types/enums/indexprivilege/indexprivilege.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package indexprivilege package indexprivilege import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Privileges.ts#L324-L366 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L386-L428 type IndexPrivilege struct { Name string } diff --git a/typedapi/types/enums/indexroutingallocationoptions/indexroutingallocationoptions.go b/typedapi/types/enums/indexroutingallocationoptions/indexroutingallocationoptions.go index 35bcde0b24..811085b51a 100644 --- a/typedapi/types/enums/indexroutingallocationoptions/indexroutingallocationoptions.go +++ b/typedapi/types/enums/indexroutingallocationoptions/indexroutingallocationoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package indexroutingallocationoptions package indexroutingallocationoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexRouting.ts#L38-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexRouting.ts#L38-L43 type IndexRoutingAllocationOptions struct { Name string } diff --git a/typedapi/types/enums/indexroutingrebalanceoptions/indexroutingrebalanceoptions.go b/typedapi/types/enums/indexroutingrebalanceoptions/indexroutingrebalanceoptions.go index 21d843f522..1dab3ffed2 100644 --- a/typedapi/types/enums/indexroutingrebalanceoptions/indexroutingrebalanceoptions.go +++ b/typedapi/types/enums/indexroutingrebalanceoptions/indexroutingrebalanceoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package indexroutingrebalanceoptions package indexroutingrebalanceoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexRouting.ts#L45-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexRouting.ts#L45-L50 type IndexRoutingRebalanceOptions struct { Name string } diff --git a/typedapi/types/enums/indicatorhealthstatus/indicatorhealthstatus.go b/typedapi/types/enums/indicatorhealthstatus/indicatorhealthstatus.go index 0faa33481a..235d232e95 100644 --- a/typedapi/types/enums/indicatorhealthstatus/indicatorhealthstatus.go +++ b/typedapi/types/enums/indicatorhealthstatus/indicatorhealthstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package indicatorhealthstatus package indicatorhealthstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L25-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L25-L30 type IndicatorHealthStatus struct { Name string } diff --git a/typedapi/types/enums/indicesblockoptions/indicesblockoptions.go b/typedapi/types/enums/indicesblockoptions/indicesblockoptions.go index b7bb3a2773..6a3e36968b 100644 --- a/typedapi/types/enums/indicesblockoptions/indicesblockoptions.go +++ b/typedapi/types/enums/indicesblockoptions/indicesblockoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package indicesblockoptions package indicesblockoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/add_block/IndicesAddBlockRequest.ts#L45-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/add_block/IndicesAddBlockRequest.ts#L91-L100 type IndicesBlockOptions struct { Name string } diff --git a/typedapi/types/enums/inputtype/inputtype.go b/typedapi/types/enums/inputtype/inputtype.go index 6b7485cd70..2a521576a3 100644 --- a/typedapi/types/enums/inputtype/inputtype.go +++ b/typedapi/types/enums/inputtype/inputtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package inputtype package inputtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Input.ts#L100-L104 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Input.ts#L100-L104 type InputType struct { Name string } diff --git a/typedapi/types/enums/jobblockedreason/jobblockedreason.go b/typedapi/types/enums/jobblockedreason/jobblockedreason.go index 7392befde5..e5e0dd1bc2 100644 --- a/typedapi/types/enums/jobblockedreason/jobblockedreason.go +++ b/typedapi/types/enums/jobblockedreason/jobblockedreason.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package jobblockedreason package jobblockedreason import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Job.ts#L397-L401 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Job.ts#L397-L401 type JobBlockedReason struct { Name string } diff --git a/typedapi/types/enums/jobstate/jobstate.go b/typedapi/types/enums/jobstate/jobstate.go index 3835f2e359..4d07506cd0 100644 --- a/typedapi/types/enums/jobstate/jobstate.go +++ b/typedapi/types/enums/jobstate/jobstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package jobstate package jobstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Job.ts#L36-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Job.ts#L36-L52 type JobState struct { Name string } diff --git a/typedapi/types/enums/jsonprocessorconflictstrategy/jsonprocessorconflictstrategy.go b/typedapi/types/enums/jsonprocessorconflictstrategy/jsonprocessorconflictstrategy.go index a49d968898..38fc66934a 100644 --- a/typedapi/types/enums/jsonprocessorconflictstrategy/jsonprocessorconflictstrategy.go +++ b/typedapi/types/enums/jsonprocessorconflictstrategy/jsonprocessorconflictstrategy.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package jsonprocessorconflictstrategy package jsonprocessorconflictstrategy import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1111-L1116 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1152-L1157 type JsonProcessorConflictStrategy struct { Name string } diff --git a/typedapi/types/enums/keeptypesmode/keeptypesmode.go b/typedapi/types/enums/keeptypesmode/keeptypesmode.go index 9a93d03fd7..d30df4c8f9 100644 --- a/typedapi/types/enums/keeptypesmode/keeptypesmode.go +++ b/typedapi/types/enums/keeptypesmode/keeptypesmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package keeptypesmode package keeptypesmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L214-L217 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L214-L217 type KeepTypesMode struct { Name string } diff --git a/typedapi/types/enums/kuromojitokenizationmode/kuromojitokenizationmode.go b/typedapi/types/enums/kuromojitokenizationmode/kuromojitokenizationmode.go index edf379c314..594f4e0b5d 100644 --- a/typedapi/types/enums/kuromojitokenizationmode/kuromojitokenizationmode.go +++ b/typedapi/types/enums/kuromojitokenizationmode/kuromojitokenizationmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package kuromojitokenizationmode package kuromojitokenizationmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/kuromoji-plugin.ts#L52-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/kuromoji-plugin.ts#L52-L56 type KuromojiTokenizationMode struct { Name string } diff --git a/typedapi/types/enums/language/language.go b/typedapi/types/enums/language/language.go deleted file mode 100644 index ba0c2bfdbc..0000000000 --- a/typedapi/types/enums/language/language.go +++ /dev/null @@ -1,185 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -// Package language -package language - -import "strings" - -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/languages.ts#L20-L55 -type Language struct { - Name string -} - -var ( - Arabic = Language{"Arabic"} - - Armenian = Language{"Armenian"} - - Basque = Language{"Basque"} - - Brazilian = Language{"Brazilian"} - - Bulgarian = Language{"Bulgarian"} - - Catalan = Language{"Catalan"} - - Chinese = Language{"Chinese"} - - Cjk = Language{"Cjk"} - - Czech = Language{"Czech"} - - Danish = Language{"Danish"} - - Dutch = Language{"Dutch"} - - English = Language{"English"} - - Estonian = Language{"Estonian"} - - Finnish = Language{"Finnish"} - - French = Language{"French"} - - Galician = Language{"Galician"} - - German = Language{"German"} - - Greek = Language{"Greek"} - - Hindi = Language{"Hindi"} - - Hungarian = Language{"Hungarian"} - - Indonesian = Language{"Indonesian"} - - Irish = Language{"Irish"} - - Italian = Language{"Italian"} - - Latvian = Language{"Latvian"} - - Norwegian = Language{"Norwegian"} - - Persian = Language{"Persian"} - - Portuguese = Language{"Portuguese"} - - Romanian = Language{"Romanian"} - - Russian = Language{"Russian"} - - Sorani = Language{"Sorani"} - - Spanish = Language{"Spanish"} - - Swedish = Language{"Swedish"} - - Turkish = Language{"Turkish"} - - Thai = Language{"Thai"} -) - -func (l Language) MarshalText() (text []byte, err error) { - return []byte(l.String()), nil -} - -func (l *Language) UnmarshalText(text []byte) error { - switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - - case "Arabic": - *l = Arabic - case "Armenian": - *l = Armenian - case "Basque": - *l = Basque - case "Brazilian": - *l = Brazilian - case "Bulgarian": - *l = Bulgarian - case "Catalan": - *l = Catalan - case "Chinese": - *l = Chinese - case "Cjk": - *l = Cjk - case "Czech": - *l = Czech - case "Danish": - *l = Danish - case "Dutch": - *l = Dutch - case "English": - *l = English - case "Estonian": - *l = Estonian - case "Finnish": - *l = Finnish - case "French": - *l = French - case "Galician": - *l = Galician - case "German": - *l = German - case "Greek": - *l = Greek - case "Hindi": - *l = Hindi - case "Hungarian": - *l = Hungarian - case "Indonesian": - *l = Indonesian - case "Irish": - *l = Irish - case "Italian": - *l = Italian - case "Latvian": - *l = Latvian - case "Norwegian": - *l = Norwegian - case "Persian": - *l = Persian - case "Portuguese": - *l = Portuguese - case "Romanian": - *l = Romanian - case "Russian": - *l = Russian - case "Sorani": - *l = Sorani - case "Spanish": - *l = Spanish - case "Swedish": - *l = Swedish - case "Turkish": - *l = Turkish - case "Thai": - *l = Thai - default: - *l = Language{string(text)} - } - - return nil -} - -func (l Language) String() string { - return l.Name -} diff --git a/typedapi/types/enums/level/level.go b/typedapi/types/enums/level/level.go index 7d17dc4017..4ce2dcdfff 100644 --- a/typedapi/types/enums/level/level.go +++ b/typedapi/types/enums/level/level.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package level package level import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L250-L254 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L246-L250 type Level struct { Name string } diff --git a/typedapi/types/enums/licensestatus/licensestatus.go b/typedapi/types/enums/licensestatus/licensestatus.go index 8c3d2fb908..c062ce9c6e 100644 --- a/typedapi/types/enums/licensestatus/licensestatus.go +++ b/typedapi/types/enums/licensestatus/licensestatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package licensestatus package licensestatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/license/_types/License.ts#L35-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/license/_types/License.ts#L35-L40 type LicenseStatus struct { Name string } diff --git a/typedapi/types/enums/licensetype/licensetype.go b/typedapi/types/enums/licensetype/licensetype.go index 4957b80178..e6922bae23 100644 --- a/typedapi/types/enums/licensetype/licensetype.go +++ b/typedapi/types/enums/licensetype/licensetype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package licensetype package licensetype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/license/_types/License.ts#L23-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/license/_types/License.ts#L23-L33 type LicenseType struct { Name string } diff --git a/typedapi/types/enums/lifecycleoperationmode/lifecycleoperationmode.go b/typedapi/types/enums/lifecycleoperationmode/lifecycleoperationmode.go index cd8e56e9fd..64f920f3df 100644 --- a/typedapi/types/enums/lifecycleoperationmode/lifecycleoperationmode.go +++ b/typedapi/types/enums/lifecycleoperationmode/lifecycleoperationmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package lifecycleoperationmode package lifecycleoperationmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Lifecycle.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Lifecycle.ts#L20-L24 type LifecycleOperationMode struct { Name string } @@ -43,11 +43,11 @@ func (l LifecycleOperationMode) MarshalText() (text []byte, err error) { func (l *LifecycleOperationMode) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "RUNNING": + case "running": *l = RUNNING - case "STOPPING": + case "stopping": *l = STOPPING - case "STOPPED": + case "stopped": *l = STOPPED default: *l = LifecycleOperationMode{string(text)} diff --git a/typedapi/types/enums/managedby/managedby.go b/typedapi/types/enums/managedby/managedby.go index 582bec1e46..b144c1eb89 100644 --- a/typedapi/types/enums/managedby/managedby.go +++ b/typedapi/types/enums/managedby/managedby.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package managedby package managedby import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/DataStream.ts#L32-L37 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/DataStream.ts#L32-L37 type ManagedBy struct { Name string } @@ -43,11 +43,11 @@ func (m ManagedBy) MarshalText() (text []byte, err error) { func (m *ManagedBy) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "Index Lifecycle Management": + case "index lifecycle management": *m = Ilm - case "Data stream lifecycle": + case "data stream lifecycle": *m = Datastream - case "Unmanaged": + case "unmanaged": *m = Unmanaged default: *m = ManagedBy{string(text)} diff --git a/typedapi/types/enums/matchtype/matchtype.go b/typedapi/types/enums/matchtype/matchtype.go index 0871e3755c..b4b020ccac 100644 --- a/typedapi/types/enums/matchtype/matchtype.go +++ b/typedapi/types/enums/matchtype/matchtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package matchtype package matchtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/dynamic-template.ts#L44-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/dynamic-template.ts#L45-L48 type MatchType struct { Name string } diff --git a/typedapi/types/enums/memorystatus/memorystatus.go b/typedapi/types/enums/memorystatus/memorystatus.go index 09c19904e0..2d4c2a4c50 100644 --- a/typedapi/types/enums/memorystatus/memorystatus.go +++ b/typedapi/types/enums/memorystatus/memorystatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package memorystatus package memorystatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Model.ts#L88-L92 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Model.ts#L89-L93 type MemoryStatus struct { Name string } diff --git a/typedapi/types/enums/metric/metric.go b/typedapi/types/enums/metric/metric.go index 41c25456e0..45ce308285 100644 --- a/typedapi/types/enums/metric/metric.go +++ b/typedapi/types/enums/metric/metric.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package metric package metric import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/_types/Metric.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/_types/Metric.ts#L22-L28 type Metric struct { Name string } diff --git a/typedapi/types/enums/migrationstatus/migrationstatus.go b/typedapi/types/enums/migrationstatus/migrationstatus.go index dde4456bd6..7dfce5b924 100644 --- a/typedapi/types/enums/migrationstatus/migrationstatus.go +++ b/typedapi/types/enums/migrationstatus/migrationstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package migrationstatus package migrationstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L30-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L30-L35 type MigrationStatus struct { Name string } @@ -45,13 +45,13 @@ func (m MigrationStatus) MarshalText() (text []byte, err error) { func (m *MigrationStatus) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "NO_MIGRATION_NEEDED": + case "no_migration_needed": *m = NOMIGRATIONNEEDED - case "MIGRATION_NEEDED": + case "migration_needed": *m = MIGRATIONNEEDED - case "IN_PROGRESS": + case "in_progress": *m = INPROGRESS - case "ERROR": + case "error": *m = ERROR default: *m = MigrationStatus{string(text)} diff --git a/typedapi/types/enums/minimuminterval/minimuminterval.go b/typedapi/types/enums/minimuminterval/minimuminterval.go index fc93a218d3..a1b4c6134c 100644 --- a/typedapi/types/enums/minimuminterval/minimuminterval.go +++ b/typedapi/types/enums/minimuminterval/minimuminterval.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package minimuminterval package minimuminterval import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L112-L119 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L112-L119 type MinimumInterval struct { Name string } diff --git a/typedapi/types/enums/missingorder/missingorder.go b/typedapi/types/enums/missingorder/missingorder.go index 82777f4d57..fda461089d 100644 --- a/typedapi/types/enums/missingorder/missingorder.go +++ b/typedapi/types/enums/missingorder/missingorder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package missingorder package missingorder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/AggregationContainer.ts#L536-L540 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/AggregationContainer.ts#L536-L540 type MissingOrder struct { Name string } diff --git a/typedapi/types/enums/modeenum/modeenum.go b/typedapi/types/enums/modeenum/modeenum.go new file mode 100644 index 0000000000..d4dd353289 --- /dev/null +++ b/typedapi/types/enums/modeenum/modeenum.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Package modeenum +package modeenum + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/migrate_reindex/MigrateReindexRequest.ts#L54-L56 +type ModeEnum struct { + Name string +} + +var ( + Upgrade = ModeEnum{"upgrade"} +) + +func (m ModeEnum) MarshalText() (text []byte, err error) { + return []byte(m.String()), nil +} + +func (m *ModeEnum) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "upgrade": + *m = Upgrade + default: + *m = ModeEnum{string(text)} + } + + return nil +} + +func (m ModeEnum) String() string { + return m.Name +} diff --git a/typedapi/types/enums/month/month.go b/typedapi/types/enums/month/month.go index 93e1e0fb78..f401d1f7e5 100644 --- a/typedapi/types/enums/month/month.go +++ b/typedapi/types/enums/month/month.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package month package month import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Schedule.ts#L65-L78 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Schedule.ts#L65-L78 type Month struct { Name string } diff --git a/typedapi/types/enums/multivaluemode/multivaluemode.go b/typedapi/types/enums/multivaluemode/multivaluemode.go index 4325c6d6c8..f95650eeb4 100644 --- a/typedapi/types/enums/multivaluemode/multivaluemode.go +++ b/typedapi/types/enums/multivaluemode/multivaluemode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package multivaluemode package multivaluemode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L368-L385 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L368-L385 type MultiValueMode struct { Name string } diff --git a/typedapi/types/enums/noderole/noderole.go b/typedapi/types/enums/noderole/noderole.go index 40861fa5f9..e468983a9b 100644 --- a/typedapi/types/enums/noderole/noderole.go +++ b/typedapi/types/enums/noderole/noderole.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package noderole package noderole import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Node.ts#L71-L89 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Node.ts#L71-L89 type NodeRole struct { Name string } diff --git a/typedapi/types/enums/noridecompoundmode/noridecompoundmode.go b/typedapi/types/enums/noridecompoundmode/noridecompoundmode.go index b19a068b49..6517f819bb 100644 --- a/typedapi/types/enums/noridecompoundmode/noridecompoundmode.go +++ b/typedapi/types/enums/noridecompoundmode/noridecompoundmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package noridecompoundmode package noridecompoundmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/nori-plugin.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/nori-plugin.ts#L22-L26 type NoriDecompoundMode struct { Name string } diff --git a/typedapi/types/enums/normalization/normalization.go b/typedapi/types/enums/normalization/normalization.go index 3a2d762428..490e1c5954 100644 --- a/typedapi/types/enums/normalization/normalization.go +++ b/typedapi/types/enums/normalization/normalization.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package normalization package normalization import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Similarity.ts#L52-L58 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Similarity.ts#L52-L58 type Normalization struct { Name string } diff --git a/typedapi/types/enums/normalizemethod/normalizemethod.go b/typedapi/types/enums/normalizemethod/normalizemethod.go index 08564ee30b..e831490872 100644 --- a/typedapi/types/enums/normalizemethod/normalizemethod.go +++ b/typedapi/types/enums/normalizemethod/normalizemethod.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package normalizemethod package normalizemethod import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L361-L387 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L361-L387 type NormalizeMethod struct { Name string } diff --git a/typedapi/types/enums/numericfielddataformat/numericfielddataformat.go b/typedapi/types/enums/numericfielddataformat/numericfielddataformat.go index ef4d74ddbd..d18f3d23a8 100644 --- a/typedapi/types/enums/numericfielddataformat/numericfielddataformat.go +++ b/typedapi/types/enums/numericfielddataformat/numericfielddataformat.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package numericfielddataformat package numericfielddataformat import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/NumericFielddataFormat.ts#L20-L23 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/NumericFielddataFormat.ts#L20-L23 type NumericFielddataFormat struct { Name string } diff --git a/typedapi/types/enums/onscripterror/onscripterror.go b/typedapi/types/enums/onscripterror/onscripterror.go index d374bdfc9b..26bbaa58b2 100644 --- a/typedapi/types/enums/onscripterror/onscripterror.go +++ b/typedapi/types/enums/onscripterror/onscripterror.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package onscripterror package onscripterror import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L137-L140 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L141-L144 type OnScriptError struct { Name string } diff --git a/typedapi/types/enums/openaitasktype/openaitasktype.go b/typedapi/types/enums/openaitasktype/openaitasktype.go new file mode 100644 index 0000000000..04c107843b --- /dev/null +++ b/typedapi/types/enums/openaitasktype/openaitasktype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Package openaitasktype +package openaitasktype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/put_openai/PutOpenAiRequest.ts#L84-L88 +type OpenAITaskType struct { + Name string +} + +var ( + Chatcompletion = OpenAITaskType{"chat_completion"} + + Completion = OpenAITaskType{"completion"} + + Textembedding = OpenAITaskType{"text_embedding"} +) + +func (o OpenAITaskType) MarshalText() (text []byte, err error) { + return []byte(o.String()), nil +} + +func (o *OpenAITaskType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "chat_completion": + *o = Chatcompletion + case "completion": + *o = Completion + case "text_embedding": + *o = Textembedding + default: + *o = OpenAITaskType{string(text)} + } + + return nil +} + +func (o OpenAITaskType) String() string { + return o.Name +} diff --git a/typedapi/types/enums/operationtype/operationtype.go b/typedapi/types/enums/operationtype/operationtype.go index 04c96fe947..e0405a6521 100644 --- a/typedapi/types/enums/operationtype/operationtype.go +++ b/typedapi/types/enums/operationtype/operationtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package operationtype package operationtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/bulk/types.ts#L83-L88 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/bulk/types.ts#L93-L98 type OperationType struct { Name string } diff --git a/typedapi/types/enums/operator/operator.go b/typedapi/types/enums/operator/operator.go index 8b9b9cfd2b..58db157880 100644 --- a/typedapi/types/enums/operator/operator.go +++ b/typedapi/types/enums/operator/operator.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package operator package operator import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/Operator.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/Operator.ts#L22-L27 type Operator struct { Name string } diff --git a/typedapi/types/enums/optype/optype.go b/typedapi/types/enums/optype/optype.go index 595bad338f..7233b51ff7 100644 --- a/typedapi/types/enums/optype/optype.go +++ b/typedapi/types/enums/optype/optype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package optype package optype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L256-L265 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L252-L261 type OpType struct { Name string } diff --git a/typedapi/types/enums/pagerdutycontexttype/pagerdutycontexttype.go b/typedapi/types/enums/pagerdutycontexttype/pagerdutycontexttype.go index e0ba772e97..ca10ffa31a 100644 --- a/typedapi/types/enums/pagerdutycontexttype/pagerdutycontexttype.go +++ b/typedapi/types/enums/pagerdutycontexttype/pagerdutycontexttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package pagerdutycontexttype package pagerdutycontexttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L67-L70 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L67-L70 type PagerDutyContextType struct { Name string } diff --git a/typedapi/types/enums/pagerdutyeventtype/pagerdutyeventtype.go b/typedapi/types/enums/pagerdutyeventtype/pagerdutyeventtype.go index 7a19ab4946..6e20e54199 100644 --- a/typedapi/types/enums/pagerdutyeventtype/pagerdutyeventtype.go +++ b/typedapi/types/enums/pagerdutyeventtype/pagerdutyeventtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package pagerdutyeventtype package pagerdutyeventtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L72-L76 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L72-L76 type PagerDutyEventType struct { Name string } diff --git a/typedapi/types/enums/painlesscontext/painlesscontext.go b/typedapi/types/enums/painlesscontext/painlesscontext.go new file mode 100644 index 0000000000..f77d873c02 --- /dev/null +++ b/typedapi/types/enums/painlesscontext/painlesscontext.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Package painlesscontext +package painlesscontext + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/scripts_painless_execute/types.ts#L57-L80 +type PainlessContext struct { + Name string +} + +var ( + Painlesstest = PainlessContext{"painless_test"} + + Filter = PainlessContext{"filter"} + + Score = PainlessContext{"score"} + + Booleanfield = PainlessContext{"boolean_field"} + + Datefield = PainlessContext{"date_field"} + + Doublefield = PainlessContext{"double_field"} + + Geopointfield = PainlessContext{"geo_point_field"} + + Ipfield = PainlessContext{"ip_field"} + + Keywordfield = PainlessContext{"keyword_field"} + + Longfield = PainlessContext{"long_field"} + + Compositefield = PainlessContext{"composite_field"} +) + +func (p PainlessContext) MarshalText() (text []byte, err error) { + return []byte(p.String()), nil +} + +func (p *PainlessContext) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "painless_test": + *p = Painlesstest + case "filter": + *p = Filter + case "score": + *p = Score + case "boolean_field": + *p = Booleanfield + case "date_field": + *p = Datefield + case "double_field": + *p = Doublefield + case "geo_point_field": + *p = Geopointfield + case "ip_field": + *p = Ipfield + case "keyword_field": + *p = Keywordfield + case "long_field": + *p = Longfield + case "composite_field": + *p = Compositefield + default: + *p = PainlessContext{string(text)} + } + + return nil +} + +func (p PainlessContext) String() string { + return p.Name +} diff --git a/typedapi/types/enums/phoneticencoder/phoneticencoder.go b/typedapi/types/enums/phoneticencoder/phoneticencoder.go index c8926b4715..42200350b7 100644 --- a/typedapi/types/enums/phoneticencoder/phoneticencoder.go +++ b/typedapi/types/enums/phoneticencoder/phoneticencoder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package phoneticencoder package phoneticencoder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/phonetic-plugin.ts#L23-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/phonetic-plugin.ts#L23-L36 type PhoneticEncoder struct { Name string } diff --git a/typedapi/types/enums/phoneticlanguage/phoneticlanguage.go b/typedapi/types/enums/phoneticlanguage/phoneticlanguage.go index fa06f37352..036047b382 100644 --- a/typedapi/types/enums/phoneticlanguage/phoneticlanguage.go +++ b/typedapi/types/enums/phoneticlanguage/phoneticlanguage.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package phoneticlanguage package phoneticlanguage import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/phonetic-plugin.ts#L38-L51 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/phonetic-plugin.ts#L38-L51 type PhoneticLanguage struct { Name string } diff --git a/typedapi/types/enums/phoneticnametype/phoneticnametype.go b/typedapi/types/enums/phoneticnametype/phoneticnametype.go index b8a238bf1a..38d8e59d76 100644 --- a/typedapi/types/enums/phoneticnametype/phoneticnametype.go +++ b/typedapi/types/enums/phoneticnametype/phoneticnametype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package phoneticnametype package phoneticnametype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/phonetic-plugin.ts#L53-L57 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/phonetic-plugin.ts#L53-L57 type PhoneticNameType struct { Name string } diff --git a/typedapi/types/enums/phoneticruletype/phoneticruletype.go b/typedapi/types/enums/phoneticruletype/phoneticruletype.go index 7e0b2caabb..6d5bf721df 100644 --- a/typedapi/types/enums/phoneticruletype/phoneticruletype.go +++ b/typedapi/types/enums/phoneticruletype/phoneticruletype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package phoneticruletype package phoneticruletype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/phonetic-plugin.ts#L59-L62 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/phonetic-plugin.ts#L59-L62 type PhoneticRuleType struct { Name string } diff --git a/typedapi/types/enums/policytype/policytype.go b/typedapi/types/enums/policytype/policytype.go index 4771294ef4..e14e820568 100644 --- a/typedapi/types/enums/policytype/policytype.go +++ b/typedapi/types/enums/policytype/policytype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package policytype package policytype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/enrich/_types/Policy.ts#L28-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/enrich/_types/Policy.ts#L28-L32 type PolicyType struct { Name string } diff --git a/typedapi/types/enums/quantifier/quantifier.go b/typedapi/types/enums/quantifier/quantifier.go index 4960c43f04..107da8e93f 100644 --- a/typedapi/types/enums/quantifier/quantifier.go +++ b/typedapi/types/enums/quantifier/quantifier.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package quantifier package quantifier import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Conditions.ts#L74-L77 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Conditions.ts#L74-L77 type Quantifier struct { Name string } diff --git a/typedapi/types/enums/queryrulecriteriatype/queryrulecriteriatype.go b/typedapi/types/enums/queryrulecriteriatype/queryrulecriteriatype.go index 739174dc3c..f8ab9e3714 100644 --- a/typedapi/types/enums/queryrulecriteriatype/queryrulecriteriatype.go +++ b/typedapi/types/enums/queryrulecriteriatype/queryrulecriteriatype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package queryrulecriteriatype package queryrulecriteriatype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/_types/QueryRuleset.ts#L55-L68 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/_types/QueryRuleset.ts#L95-L108 type QueryRuleCriteriaType struct { Name string } diff --git a/typedapi/types/enums/queryruletype/queryruletype.go b/typedapi/types/enums/queryruletype/queryruletype.go index 037af911ab..c8d933adcb 100644 --- a/typedapi/types/enums/queryruletype/queryruletype.go +++ b/typedapi/types/enums/queryruletype/queryruletype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package queryruletype package queryruletype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/_types/QueryRuleset.ts#L44-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/_types/QueryRuleset.ts#L60-L63 type QueryRuleType struct { Name string } diff --git a/typedapi/types/enums/rangerelation/rangerelation.go b/typedapi/types/enums/rangerelation/rangerelation.go index eb17912a8f..64e717623d 100644 --- a/typedapi/types/enums/rangerelation/rangerelation.go +++ b/typedapi/types/enums/rangerelation/rangerelation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package rangerelation package rangerelation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L188-L201 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L188-L201 type RangeRelation struct { Name string } diff --git a/typedapi/types/enums/ratemode/ratemode.go b/typedapi/types/enums/ratemode/ratemode.go index 9c229a2263..fd2cf19187 100644 --- a/typedapi/types/enums/ratemode/ratemode.go +++ b/typedapi/types/enums/ratemode/ratemode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package ratemode package ratemode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L252-L261 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L252-L261 type RateMode struct { Name string } diff --git a/typedapi/types/enums/refresh/refresh.go b/typedapi/types/enums/refresh/refresh.go index dd89cb47b0..7bdd028674 100644 --- a/typedapi/types/enums/refresh/refresh.go +++ b/typedapi/types/enums/refresh/refresh.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package refresh package refresh import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L267-L274 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L263-L270 type Refresh struct { Name string } diff --git a/typedapi/types/enums/remoteclusterprivilege/remoteclusterprivilege.go b/typedapi/types/enums/remoteclusterprivilege/remoteclusterprivilege.go new file mode 100644 index 0000000000..2609f11add --- /dev/null +++ b/typedapi/types/enums/remoteclusterprivilege/remoteclusterprivilege.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Package remoteclusterprivilege +package remoteclusterprivilege + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L201-L213 +type RemoteClusterPrivilege struct { + Name string +} + +var ( + Monitorenrich = RemoteClusterPrivilege{"monitor_enrich"} + + Monitorstats = RemoteClusterPrivilege{"monitor_stats"} +) + +func (r RemoteClusterPrivilege) MarshalText() (text []byte, err error) { + return []byte(r.String()), nil +} + +func (r *RemoteClusterPrivilege) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "monitor_enrich": + *r = Monitorenrich + case "monitor_stats": + *r = Monitorstats + default: + *r = RemoteClusterPrivilege{string(text)} + } + + return nil +} + +func (r RemoteClusterPrivilege) String() string { + return r.Name +} diff --git a/typedapi/types/enums/responsecontenttype/responsecontenttype.go b/typedapi/types/enums/responsecontenttype/responsecontenttype.go index 13021fa872..0530a01b3e 100644 --- a/typedapi/types/enums/responsecontenttype/responsecontenttype.go +++ b/typedapi/types/enums/responsecontenttype/responsecontenttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package responsecontenttype package responsecontenttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Input.ts#L106-L110 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Input.ts#L106-L110 type ResponseContentType struct { Name string } diff --git a/typedapi/types/enums/restrictionworkflow/restrictionworkflow.go b/typedapi/types/enums/restrictionworkflow/restrictionworkflow.go new file mode 100644 index 0000000000..9bded4a81e --- /dev/null +++ b/typedapi/types/enums/restrictionworkflow/restrictionworkflow.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Package restrictionworkflow +package restrictionworkflow + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/RoleDescriptor.ts#L143-L146 +type RestrictionWorkflow struct { + Name string +} + +var ( + Searchapplicationquery = RestrictionWorkflow{"search_application_query"} +) + +func (r RestrictionWorkflow) MarshalText() (text []byte, err error) { + return []byte(r.String()), nil +} + +func (r *RestrictionWorkflow) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "search_application_query": + *r = Searchapplicationquery + default: + *r = RestrictionWorkflow{string(text)} + } + + return nil +} + +func (r RestrictionWorkflow) String() string { + return r.Name +} diff --git a/typedapi/types/enums/result/result.go b/typedapi/types/enums/result/result.go index b0e4eb26a0..f5e06e1abf 100644 --- a/typedapi/types/enums/result/result.go +++ b/typedapi/types/enums/result/result.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package result package result import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Result.ts#L20-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Result.ts#L20-L26 type Result struct { Name string } diff --git a/typedapi/types/enums/resultposition/resultposition.go b/typedapi/types/enums/resultposition/resultposition.go index a6b2436501..54351a8284 100644 --- a/typedapi/types/enums/resultposition/resultposition.go +++ b/typedapi/types/enums/resultposition/resultposition.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package resultposition package resultposition import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/eql/search/types.ts#L20-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/eql/search/types.ts#L20-L32 type ResultPosition struct { Name string } diff --git a/typedapi/types/enums/routingstate/routingstate.go b/typedapi/types/enums/routingstate/routingstate.go index 23b10ac3d6..75f5e7ffae 100644 --- a/typedapi/types/enums/routingstate/routingstate.go +++ b/typedapi/types/enums/routingstate/routingstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package routingstate package routingstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L347-L368 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L407-L428 type RoutingState struct { Name string } diff --git a/typedapi/types/enums/ruleaction/ruleaction.go b/typedapi/types/enums/ruleaction/ruleaction.go index 76fba30475..2f37a02898 100644 --- a/typedapi/types/enums/ruleaction/ruleaction.go +++ b/typedapi/types/enums/ruleaction/ruleaction.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package ruleaction package ruleaction import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Rule.ts#L41-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Rule.ts#L41-L50 type RuleAction struct { Name string } diff --git a/typedapi/types/enums/runtimefieldtype/runtimefieldtype.go b/typedapi/types/enums/runtimefieldtype/runtimefieldtype.go index f7434ee763..6bdbc7150f 100644 --- a/typedapi/types/enums/runtimefieldtype/runtimefieldtype.go +++ b/typedapi/types/enums/runtimefieldtype/runtimefieldtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package runtimefieldtype package runtimefieldtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/RuntimeFields.ts#L62-L72 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/RuntimeFields.ts#L62-L72 type RuntimeFieldType struct { Name string } diff --git a/typedapi/types/enums/sampleraggregationexecutionhint/sampleraggregationexecutionhint.go b/typedapi/types/enums/sampleraggregationexecutionhint/sampleraggregationexecutionhint.go index 6d4b2adad7..94cc4a500f 100644 --- a/typedapi/types/enums/sampleraggregationexecutionhint/sampleraggregationexecutionhint.go +++ b/typedapi/types/enums/sampleraggregationexecutionhint/sampleraggregationexecutionhint.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package sampleraggregationexecutionhint package sampleraggregationexecutionhint import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L359-L372 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L359-L372 type SamplerAggregationExecutionHint struct { Name string } diff --git a/typedapi/types/enums/scoremode/scoremode.go b/typedapi/types/enums/scoremode/scoremode.go index 8b0f65e9e1..2cf789da11 100644 --- a/typedapi/types/enums/scoremode/scoremode.go +++ b/typedapi/types/enums/scoremode/scoremode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package scoremode package scoremode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/rescoring.ts#L64-L86 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/rescoring.ts#L64-L86 type ScoreMode struct { Name string } diff --git a/typedapi/types/enums/scriptlanguage/scriptlanguage.go b/typedapi/types/enums/scriptlanguage/scriptlanguage.go index 3ef6ae3d7c..4e22951fb5 100644 --- a/typedapi/types/enums/scriptlanguage/scriptlanguage.go +++ b/typedapi/types/enums/scriptlanguage/scriptlanguage.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package scriptlanguage package scriptlanguage import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Scripting.ts#L24-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Scripting.ts#L24-L45 type ScriptLanguage struct { Name string } diff --git a/typedapi/types/enums/scriptsorttype/scriptsorttype.go b/typedapi/types/enums/scriptsorttype/scriptsorttype.go index cd00288ac0..5ff4bf9b8d 100644 --- a/typedapi/types/enums/scriptsorttype/scriptsorttype.go +++ b/typedapi/types/enums/scriptsorttype/scriptsorttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package scriptsorttype package scriptsorttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/sort.ts#L80-L84 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/sort.ts#L80-L84 type ScriptSortType struct { Name string } diff --git a/typedapi/types/enums/searchtype/searchtype.go b/typedapi/types/enums/searchtype/searchtype.go index 46b7936e95..e0892d6054 100644 --- a/typedapi/types/enums/searchtype/searchtype.go +++ b/typedapi/types/enums/searchtype/searchtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package searchtype package searchtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L276-L281 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L272-L277 type SearchType struct { Name string } diff --git a/typedapi/types/enums/segmentsortmissing/segmentsortmissing.go b/typedapi/types/enums/segmentsortmissing/segmentsortmissing.go index b86126f87f..18a1228d74 100644 --- a/typedapi/types/enums/segmentsortmissing/segmentsortmissing.go +++ b/typedapi/types/enums/segmentsortmissing/segmentsortmissing.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package segmentsortmissing package segmentsortmissing import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSegmentSort.ts#L43-L46 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSegmentSort.ts#L43-L46 type SegmentSortMissing struct { Name string } diff --git a/typedapi/types/enums/segmentsortmode/segmentsortmode.go b/typedapi/types/enums/segmentsortmode/segmentsortmode.go index 31ebfce282..625b6dac46 100644 --- a/typedapi/types/enums/segmentsortmode/segmentsortmode.go +++ b/typedapi/types/enums/segmentsortmode/segmentsortmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package segmentsortmode package segmentsortmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSegmentSort.ts#L36-L41 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSegmentSort.ts#L36-L41 type SegmentSortMode struct { Name string } diff --git a/typedapi/types/enums/segmentsortorder/segmentsortorder.go b/typedapi/types/enums/segmentsortorder/segmentsortorder.go index 982edeb379..e9b3f2e0c7 100644 --- a/typedapi/types/enums/segmentsortorder/segmentsortorder.go +++ b/typedapi/types/enums/segmentsortorder/segmentsortorder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package segmentsortorder package segmentsortorder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSegmentSort.ts#L29-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSegmentSort.ts#L29-L34 type SegmentSortOrder struct { Name string } diff --git a/typedapi/types/enums/servicetype/servicetype.go b/typedapi/types/enums/servicetype/servicetype.go new file mode 100644 index 0000000000..20b6f3870c --- /dev/null +++ b/typedapi/types/enums/servicetype/servicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Package servicetype +package servicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/put_watsonx/PutWatsonxRequest.ts#L76-L78 +type ServiceType struct { + Name string +} + +var ( + Watsonxai = ServiceType{"watsonxai"} +) + +func (s ServiceType) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *ServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "watsonxai": + *s = Watsonxai + default: + *s = ServiceType{string(text)} + } + + return nil +} + +func (s ServiceType) String() string { + return s.Name +} diff --git a/typedapi/types/enums/shapetype/shapetype.go b/typedapi/types/enums/shapetype/shapetype.go index 8dd739423d..56c01b8e59 100644 --- a/typedapi/types/enums/shapetype/shapetype.go +++ b/typedapi/types/enums/shapetype/shapetype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package shapetype package shapetype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1433-L1436 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1474-L1477 type ShapeType struct { Name string } diff --git a/typedapi/types/enums/shardroutingstate/shardroutingstate.go b/typedapi/types/enums/shardroutingstate/shardroutingstate.go index 9bd240ce6c..4e430412ab 100644 --- a/typedapi/types/enums/shardroutingstate/shardroutingstate.go +++ b/typedapi/types/enums/shardroutingstate/shardroutingstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package shardroutingstate package shardroutingstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/stats/types.ts#L169-L174 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/stats/types.ts#L169-L174 type ShardRoutingState struct { Name string } @@ -45,13 +45,13 @@ func (s ShardRoutingState) MarshalText() (text []byte, err error) { func (s *ShardRoutingState) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "UNASSIGNED": + case "unassigned": *s = UNASSIGNED - case "INITIALIZING": + case "initializing": *s = INITIALIZING - case "STARTED": + case "started": *s = STARTED - case "RELOCATING": + case "relocating": *s = RELOCATING default: *s = ShardRoutingState{string(text)} diff --git a/typedapi/types/enums/shardsstatsstage/shardsstatsstage.go b/typedapi/types/enums/shardsstatsstage/shardsstatsstage.go index 4d57f791cc..8237ffb5cf 100644 --- a/typedapi/types/enums/shardsstatsstage/shardsstatsstage.go +++ b/typedapi/types/enums/shardsstatsstage/shardsstatsstage.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package shardsstatsstage package shardsstatsstage import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotShardsStatsStage.ts#L20-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotShardsStatsStage.ts#L20-L31 type ShardsStatsStage struct { Name string } @@ -47,15 +47,15 @@ func (s ShardsStatsStage) MarshalText() (text []byte, err error) { func (s *ShardsStatsStage) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "DONE": + case "done": *s = DONE - case "FAILURE": + case "failure": *s = FAILURE - case "FINALIZE": + case "finalize": *s = FINALIZE - case "INIT": + case "init": *s = INIT - case "STARTED": + case "started": *s = STARTED default: *s = ShardsStatsStage{string(text)} diff --git a/typedapi/types/enums/shardstoreallocation/shardstoreallocation.go b/typedapi/types/enums/shardstoreallocation/shardstoreallocation.go index 03e6967418..4b8cfdab38 100644 --- a/typedapi/types/enums/shardstoreallocation/shardstoreallocation.go +++ b/typedapi/types/enums/shardstoreallocation/shardstoreallocation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package shardstoreallocation package shardstoreallocation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/shard_stores/types.ts#L47-L51 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/shard_stores/types.ts#L47-L51 type ShardStoreAllocation struct { Name string } diff --git a/typedapi/types/enums/shardstorestatus/shardstorestatus.go b/typedapi/types/enums/shardstorestatus/shardstorestatus.go index 6fef2de899..9fa73be190 100644 --- a/typedapi/types/enums/shardstorestatus/shardstorestatus.go +++ b/typedapi/types/enums/shardstorestatus/shardstorestatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package shardstorestatus package shardstorestatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/shard_stores/types.ts#L62-L71 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/shard_stores/types.ts#L62-L71 type ShardStoreStatus struct { Name string } diff --git a/typedapi/types/enums/shutdownstatus/shutdownstatus.go b/typedapi/types/enums/shutdownstatus/shutdownstatus.go index dda8923699..9510494922 100644 --- a/typedapi/types/enums/shutdownstatus/shutdownstatus.go +++ b/typedapi/types/enums/shutdownstatus/shutdownstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package shutdownstatus package shutdownstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L45-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L45-L50 type ShutdownStatus struct { Name string } diff --git a/typedapi/types/enums/shutdowntype/shutdowntype.go b/typedapi/types/enums/shutdowntype/shutdowntype.go index b792f9e07a..d3a4650901 100644 --- a/typedapi/types/enums/shutdowntype/shutdowntype.go +++ b/typedapi/types/enums/shutdowntype/shutdowntype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package shutdowntype package shutdowntype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L40-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L40-L43 type ShutdownType struct { Name string } diff --git a/typedapi/types/enums/simplequerystringflag/simplequerystringflag.go b/typedapi/types/enums/simplequerystringflag/simplequerystringflag.go index 0fa08cf511..438ab70f3b 100644 --- a/typedapi/types/enums/simplequerystringflag/simplequerystringflag.go +++ b/typedapi/types/enums/simplequerystringflag/simplequerystringflag.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package simplequerystringflag package simplequerystringflag import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L729-L784 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L729-L784 type SimpleQueryStringFlag struct { Name string } @@ -63,31 +63,31 @@ func (s SimpleQueryStringFlag) MarshalText() (text []byte, err error) { func (s *SimpleQueryStringFlag) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "NONE": + case "none": *s = NONE - case "AND": + case "and": *s = AND - case "NOT": + case "not": *s = NOT - case "OR": + case "or": *s = OR - case "PREFIX": + case "prefix": *s = PREFIX - case "PHRASE": + case "phrase": *s = PHRASE - case "PRECEDENCE": + case "precedence": *s = PRECEDENCE - case "ESCAPE": + case "escape": *s = ESCAPE - case "WHITESPACE": + case "whitespace": *s = WHITESPACE - case "FUZZY": + case "fuzzy": *s = FUZZY - case "NEAR": + case "near": *s = NEAR - case "SLOP": + case "slop": *s = SLOP - case "ALL": + case "all": *s = ALL default: *s = SimpleQueryStringFlag{string(text)} diff --git a/typedapi/types/enums/slicescalculation/slicescalculation.go b/typedapi/types/enums/slicescalculation/slicescalculation.go index 89e9b187ba..a6bd65626b 100644 --- a/typedapi/types/enums/slicescalculation/slicescalculation.go +++ b/typedapi/types/enums/slicescalculation/slicescalculation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package slicescalculation package slicescalculation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L372-L380 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L368-L376 type SlicesCalculation struct { Name string } diff --git a/typedapi/types/enums/snapshotsort/snapshotsort.go b/typedapi/types/enums/snapshotsort/snapshotsort.go index de61f7613a..9a0ae1ea28 100644 --- a/typedapi/types/enums/snapshotsort/snapshotsort.go +++ b/typedapi/types/enums/snapshotsort/snapshotsort.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package snapshotsort package snapshotsort import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotInfo.ts#L73-L93 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotInfo.ts#L73-L93 type SnapshotSort struct { Name string } diff --git a/typedapi/types/enums/snapshotupgradestate/snapshotupgradestate.go b/typedapi/types/enums/snapshotupgradestate/snapshotupgradestate.go index 5153177934..0f91466973 100644 --- a/typedapi/types/enums/snapshotupgradestate/snapshotupgradestate.go +++ b/typedapi/types/enums/snapshotupgradestate/snapshotupgradestate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package snapshotupgradestate package snapshotupgradestate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Model.ts#L94-L99 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Model.ts#L95-L100 type SnapshotUpgradeState struct { Name string } diff --git a/typedapi/types/enums/snowballlanguage/snowballlanguage.go b/typedapi/types/enums/snowballlanguage/snowballlanguage.go index 8b8bc21a89..d128fb4bb4 100644 --- a/typedapi/types/enums/snowballlanguage/snowballlanguage.go +++ b/typedapi/types/enums/snowballlanguage/snowballlanguage.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package snowballlanguage package snowballlanguage import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/languages.ts#L57-L80 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/languages.ts#L20-L43 type SnowballLanguage struct { Name string } @@ -81,49 +81,49 @@ func (s SnowballLanguage) MarshalText() (text []byte, err error) { func (s *SnowballLanguage) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "Armenian": + case "armenian": *s = Armenian - case "Basque": + case "basque": *s = Basque - case "Catalan": + case "catalan": *s = Catalan - case "Danish": + case "danish": *s = Danish - case "Dutch": + case "dutch": *s = Dutch - case "English": + case "english": *s = English - case "Finnish": + case "finnish": *s = Finnish - case "French": + case "french": *s = French - case "German": + case "german": *s = German - case "German2": + case "german2": *s = German2 - case "Hungarian": + case "hungarian": *s = Hungarian - case "Italian": + case "italian": *s = Italian - case "Kp": + case "kp": *s = Kp - case "Lovins": + case "lovins": *s = Lovins - case "Norwegian": + case "norwegian": *s = Norwegian - case "Porter": + case "porter": *s = Porter - case "Portuguese": + case "portuguese": *s = Portuguese - case "Romanian": + case "romanian": *s = Romanian - case "Russian": + case "russian": *s = Russian - case "Spanish": + case "spanish": *s = Spanish - case "Swedish": + case "swedish": *s = Swedish - case "Turkish": + case "turkish": *s = Turkish default: *s = SnowballLanguage{string(text)} diff --git a/typedapi/types/enums/sortmode/sortmode.go b/typedapi/types/enums/sortmode/sortmode.go index 72b66af3fc..900f23546d 100644 --- a/typedapi/types/enums/sortmode/sortmode.go +++ b/typedapi/types/enums/sortmode/sortmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package sortmode package sortmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/sort.ts#L108-L117 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/sort.ts#L108-L117 type SortMode struct { Name string } diff --git a/typedapi/types/enums/sortorder/sortorder.go b/typedapi/types/enums/sortorder/sortorder.go index 12332a6227..9b039874e2 100644 --- a/typedapi/types/enums/sortorder/sortorder.go +++ b/typedapi/types/enums/sortorder/sortorder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package sortorder package sortorder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/sort.ts#L119-L128 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/sort.ts#L119-L128 type SortOrder struct { Name string } diff --git a/typedapi/types/enums/sourcefieldmode/sourcefieldmode.go b/typedapi/types/enums/sourcefieldmode/sourcefieldmode.go index 4d3e016c75..45ebb7f7a0 100644 --- a/typedapi/types/enums/sourcefieldmode/sourcefieldmode.go +++ b/typedapi/types/enums/sourcefieldmode/sourcefieldmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package sourcefieldmode package sourcefieldmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/meta-fields.ts#L67-L75 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/meta-fields.ts#L67-L75 type SourceFieldMode struct { Name string } diff --git a/typedapi/types/enums/sourcemode/sourcemode.go b/typedapi/types/enums/sourcemode/sourcemode.go new file mode 100644 index 0000000000..c98f0cc175 --- /dev/null +++ b/typedapi/types/enums/sourcemode/sourcemode.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Package sourcemode +package sourcemode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L509-L513 +type SourceMode struct { + Name string +} + +var ( + Disabled = SourceMode{"disabled"} + + Stored = SourceMode{"stored"} + + Synthetic = SourceMode{"synthetic"} +) + +func (s SourceMode) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SourceMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "disabled": + *s = Disabled + case "stored": + *s = Stored + case "synthetic": + *s = Synthetic + default: + *s = SourceMode{string(text)} + } + + return nil +} + +func (s SourceMode) String() string { + return s.Name +} diff --git a/typedapi/types/enums/sqlformat/sqlformat.go b/typedapi/types/enums/sqlformat/sqlformat.go index 48022b4f9a..d2a340f22a 100644 --- a/typedapi/types/enums/sqlformat/sqlformat.go +++ b/typedapi/types/enums/sqlformat/sqlformat.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package sqlformat package sqlformat import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/sql/query/QuerySqlRequest.ts#L124-L132 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/sql/query/QuerySqlRequest.ts#L154-L162 type SqlFormat struct { Name string } diff --git a/typedapi/types/enums/statslevel/statslevel.go b/typedapi/types/enums/statslevel/statslevel.go index 3324a8c2e9..a4273e63db 100644 --- a/typedapi/types/enums/statslevel/statslevel.go +++ b/typedapi/types/enums/statslevel/statslevel.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package statslevel package statslevel import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/searchable_snapshots/_types/stats.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/searchable_snapshots/_types/stats.ts#L20-L24 type StatsLevel struct { Name string } diff --git a/typedapi/types/enums/storagetype/storagetype.go b/typedapi/types/enums/storagetype/storagetype.go index 18053420e7..c302361be5 100644 --- a/typedapi/types/enums/storagetype/storagetype.go +++ b/typedapi/types/enums/storagetype/storagetype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package storagetype package storagetype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L520-L548 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L545-L573 type StorageType struct { Name string } diff --git a/typedapi/types/enums/stringdistance/stringdistance.go b/typedapi/types/enums/stringdistance/stringdistance.go index a018ac7aed..4dc25038f9 100644 --- a/typedapi/types/enums/stringdistance/stringdistance.go +++ b/typedapi/types/enums/stringdistance/stringdistance.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package stringdistance package stringdistance import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L472-L493 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L472-L493 type StringDistance struct { Name string } diff --git a/typedapi/types/enums/subobjects/subobjects.go b/typedapi/types/enums/subobjects/subobjects.go new file mode 100644 index 0000000000..b53b0793c5 --- /dev/null +++ b/typedapi/types/enums/subobjects/subobjects.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Package subobjects +package subobjects + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/TypeMapping.ts#L63-L74 +type Subobjects struct { + Name string +} + +var ( + True = Subobjects{"true"} + + False = Subobjects{"false"} + + Auto = Subobjects{"auto"} +) + +func (s *Subobjects) UnmarshalJSON(data []byte) error { + return s.UnmarshalText(data) +} + +func (s Subobjects) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *Subobjects) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "true": + *s = True + case "false": + *s = False + case "auto": + *s = Auto + default: + *s = Subobjects{string(text)} + } + + return nil +} + +func (s Subobjects) String() string { + return s.Name +} diff --git a/typedapi/types/enums/suggestmode/suggestmode.go b/typedapi/types/enums/suggestmode/suggestmode.go index dc0e17068a..d736b2d9d3 100644 --- a/typedapi/types/enums/suggestmode/suggestmode.go +++ b/typedapi/types/enums/suggestmode/suggestmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package suggestmode package suggestmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L283-L296 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L279-L292 type SuggestMode struct { Name string } diff --git a/typedapi/types/enums/suggestsort/suggestsort.go b/typedapi/types/enums/suggestsort/suggestsort.go index 4869d46e7e..9a1da7820a 100644 --- a/typedapi/types/enums/suggestsort/suggestsort.go +++ b/typedapi/types/enums/suggestsort/suggestsort.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package suggestsort package suggestsort import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L495-L504 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L495-L504 type SuggestSort struct { Name string } diff --git a/typedapi/types/enums/syncjobtriggermethod/syncjobtriggermethod.go b/typedapi/types/enums/syncjobtriggermethod/syncjobtriggermethod.go index 5a44ad457e..a355372cc0 100644 --- a/typedapi/types/enums/syncjobtriggermethod/syncjobtriggermethod.go +++ b/typedapi/types/enums/syncjobtriggermethod/syncjobtriggermethod.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package syncjobtriggermethod package syncjobtriggermethod import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/SyncJob.ts#L48-L51 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/SyncJob.ts#L48-L51 type SyncJobTriggerMethod struct { Name string } diff --git a/typedapi/types/enums/syncjobtype/syncjobtype.go b/typedapi/types/enums/syncjobtype/syncjobtype.go index 152ffb2f99..30feed1675 100644 --- a/typedapi/types/enums/syncjobtype/syncjobtype.go +++ b/typedapi/types/enums/syncjobtype/syncjobtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package syncjobtype package syncjobtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/SyncJob.ts#L42-L46 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/SyncJob.ts#L42-L46 type SyncJobType struct { Name string } diff --git a/typedapi/types/enums/syncstatus/syncstatus.go b/typedapi/types/enums/syncstatus/syncstatus.go index af94296b8d..f29f4aca88 100644 --- a/typedapi/types/enums/syncstatus/syncstatus.go +++ b/typedapi/types/enums/syncstatus/syncstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package syncstatus package syncstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L138-L146 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L138-L146 type SyncStatus struct { Name string } diff --git a/typedapi/types/enums/synonymformat/synonymformat.go b/typedapi/types/enums/synonymformat/synonymformat.go index 8541024b90..dd1c32bc0c 100644 --- a/typedapi/types/enums/synonymformat/synonymformat.go +++ b/typedapi/types/enums/synonymformat/synonymformat.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package synonymformat package synonymformat import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L104-L107 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L104-L107 type SynonymFormat struct { Name string } diff --git a/typedapi/types/enums/syntheticsourcekeepenum/syntheticsourcekeepenum.go b/typedapi/types/enums/syntheticsourcekeepenum/syntheticsourcekeepenum.go new file mode 100644 index 0000000000..8c2e84d193 --- /dev/null +++ b/typedapi/types/enums/syntheticsourcekeepenum/syntheticsourcekeepenum.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Package syntheticsourcekeepenum +package syntheticsourcekeepenum + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/Property.ts#L99-L117 +type SyntheticSourceKeepEnum struct { + Name string +} + +var ( + None = SyntheticSourceKeepEnum{"none"} + + Arrays = SyntheticSourceKeepEnum{"arrays"} + + All = SyntheticSourceKeepEnum{"all"} +) + +func (s SyntheticSourceKeepEnum) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SyntheticSourceKeepEnum) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "none": + *s = None + case "arrays": + *s = Arrays + case "all": + *s = All + default: + *s = SyntheticSourceKeepEnum{string(text)} + } + + return nil +} + +func (s SyntheticSourceKeepEnum) String() string { + return s.Name +} diff --git a/typedapi/types/enums/tasktype/tasktype.go b/typedapi/types/enums/tasktype/tasktype.go index d32356a98b..340fba0085 100644 --- a/typedapi/types/enums/tasktype/tasktype.go +++ b/typedapi/types/enums/tasktype/tasktype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package tasktype package tasktype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/_types/TaskType.ts#L20-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/_types/TaskType.ts#L20-L29 type TaskType struct { Name string } @@ -36,6 +36,8 @@ var ( Rerank = TaskType{"rerank"} Completion = TaskType{"completion"} + + Chatcompletion = TaskType{"chat_completion"} ) func (t TaskType) MarshalText() (text []byte, err error) { @@ -53,6 +55,8 @@ func (t *TaskType) UnmarshalText(text []byte) error { *t = Rerank case "completion": *t = Completion + case "chat_completion": + *t = Chatcompletion default: *t = TaskType{string(text)} } diff --git a/typedapi/types/enums/templateformat/templateformat.go b/typedapi/types/enums/templateformat/templateformat.go index 50ddfebec5..20d94a988b 100644 --- a/typedapi/types/enums/templateformat/templateformat.go +++ b/typedapi/types/enums/templateformat/templateformat.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package templateformat package templateformat import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/RoleTemplate.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/RoleTemplate.ts#L22-L25 type TemplateFormat struct { Name string } diff --git a/typedapi/types/enums/termsaggregationcollectmode/termsaggregationcollectmode.go b/typedapi/types/enums/termsaggregationcollectmode/termsaggregationcollectmode.go index 70483ad01a..97fb0f6af0 100644 --- a/typedapi/types/enums/termsaggregationcollectmode/termsaggregationcollectmode.go +++ b/typedapi/types/enums/termsaggregationcollectmode/termsaggregationcollectmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package termsaggregationcollectmode package termsaggregationcollectmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L1056-L1065 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L1056-L1065 type TermsAggregationCollectMode struct { Name string } diff --git a/typedapi/types/enums/termsaggregationexecutionhint/termsaggregationexecutionhint.go b/typedapi/types/enums/termsaggregationexecutionhint/termsaggregationexecutionhint.go index 81daad4efb..828c3f13dc 100644 --- a/typedapi/types/enums/termsaggregationexecutionhint/termsaggregationexecutionhint.go +++ b/typedapi/types/enums/termsaggregationexecutionhint/termsaggregationexecutionhint.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package termsaggregationexecutionhint package termsaggregationexecutionhint import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L1067-L1072 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L1067-L1072 type TermsAggregationExecutionHint struct { Name string } diff --git a/typedapi/types/enums/termvectoroption/termvectoroption.go b/typedapi/types/enums/termvectoroption/termvectoroption.go index 0e1b897b80..33f80ddc46 100644 --- a/typedapi/types/enums/termvectoroption/termvectoroption.go +++ b/typedapi/types/enums/termvectoroption/termvectoroption.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package termvectoroption package termvectoroption import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/TermVectorOption.ts#L20-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/TermVectorOption.ts#L20-L28 type TermVectorOption struct { Name string } diff --git a/typedapi/types/enums/textquerytype/textquerytype.go b/typedapi/types/enums/textquerytype/textquerytype.go index 45300b3624..41251d658a 100644 --- a/typedapi/types/enums/textquerytype/textquerytype.go +++ b/typedapi/types/enums/textquerytype/textquerytype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package textquerytype package textquerytype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L559-L585 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L559-L585 type TextQueryType struct { Name string } diff --git a/typedapi/types/enums/threadtype/threadtype.go b/typedapi/types/enums/threadtype/threadtype.go index a94559e846..4d3ea0ec38 100644 --- a/typedapi/types/enums/threadtype/threadtype.go +++ b/typedapi/types/enums/threadtype/threadtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package threadtype package threadtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L298-L304 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L294-L300 type ThreadType struct { Name string } diff --git a/typedapi/types/enums/timeseriesmetrictype/timeseriesmetrictype.go b/typedapi/types/enums/timeseriesmetrictype/timeseriesmetrictype.go index 74b9e84670..a775686b6e 100644 --- a/typedapi/types/enums/timeseriesmetrictype/timeseriesmetrictype.go +++ b/typedapi/types/enums/timeseriesmetrictype/timeseriesmetrictype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package timeseriesmetrictype package timeseriesmetrictype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/TimeSeriesMetricType.ts#L20-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/TimeSeriesMetricType.ts#L20-L26 type TimeSeriesMetricType struct { Name string } diff --git a/typedapi/types/enums/timeunit/timeunit.go b/typedapi/types/enums/timeunit/timeunit.go index c207d7268f..f632b989d6 100644 --- a/typedapi/types/enums/timeunit/timeunit.go +++ b/typedapi/types/enums/timeunit/timeunit.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package timeunit package timeunit import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Time.ts#L69-L77 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Time.ts#L69-L77 type TimeUnit struct { Name string } diff --git a/typedapi/types/enums/tokenchar/tokenchar.go b/typedapi/types/enums/tokenchar/tokenchar.go index bbbb916c73..951e4c038f 100644 --- a/typedapi/types/enums/tokenchar/tokenchar.go +++ b/typedapi/types/enums/tokenchar/tokenchar.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package tokenchar package tokenchar import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L59-L66 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L59-L66 type TokenChar struct { Name string } diff --git a/typedapi/types/enums/tokenizationtruncate/tokenizationtruncate.go b/typedapi/types/enums/tokenizationtruncate/tokenizationtruncate.go index 7febf888b0..2c98f48ac5 100644 --- a/typedapi/types/enums/tokenizationtruncate/tokenizationtruncate.go +++ b/typedapi/types/enums/tokenizationtruncate/tokenizationtruncate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package tokenizationtruncate package tokenizationtruncate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L350-L354 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L338-L342 type TokenizationTruncate struct { Name string } diff --git a/typedapi/types/enums/totalhitsrelation/totalhitsrelation.go b/typedapi/types/enums/totalhitsrelation/totalhitsrelation.go index 58b6ce0b3c..f5fad0ac00 100644 --- a/typedapi/types/enums/totalhitsrelation/totalhitsrelation.go +++ b/typedapi/types/enums/totalhitsrelation/totalhitsrelation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package totalhitsrelation package totalhitsrelation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/hits.ts#L101-L106 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/hits.ts#L100-L105 type TotalHitsRelation struct { Name string } diff --git a/typedapi/types/enums/trainedmodeltype/trainedmodeltype.go b/typedapi/types/enums/trainedmodeltype/trainedmodeltype.go index 21548f1aef..9e581cc9ee 100644 --- a/typedapi/types/enums/trainedmodeltype/trainedmodeltype.go +++ b/typedapi/types/enums/trainedmodeltype/trainedmodeltype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package trainedmodeltype package trainedmodeltype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L257-L271 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L314-L328 type TrainedModelType struct { Name string } diff --git a/typedapi/types/enums/trainingpriority/trainingpriority.go b/typedapi/types/enums/trainingpriority/trainingpriority.go index c1192ccdd9..4eee1550c4 100644 --- a/typedapi/types/enums/trainingpriority/trainingpriority.go +++ b/typedapi/types/enums/trainingpriority/trainingpriority.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package trainingpriority package trainingpriority import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L307-L310 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L364-L367 type TrainingPriority struct { Name string } diff --git a/typedapi/types/enums/translogdurability/translogdurability.go b/typedapi/types/enums/translogdurability/translogdurability.go index 9edb8e761f..e226150dec 100644 --- a/typedapi/types/enums/translogdurability/translogdurability.go +++ b/typedapi/types/enums/translogdurability/translogdurability.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package translogdurability package translogdurability import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L365-L380 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L379-L394 type TranslogDurability struct { Name string } diff --git a/typedapi/types/enums/ttesttype/ttesttype.go b/typedapi/types/enums/ttesttype/ttesttype.go index 3053be4224..5a4e8e7582 100644 --- a/typedapi/types/enums/ttesttype/ttesttype.go +++ b/typedapi/types/enums/ttesttype/ttesttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package ttesttype package ttesttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L331-L344 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L331-L344 type TTestType struct { Name string } diff --git a/typedapi/types/enums/type_/type_.go b/typedapi/types/enums/type_/type_.go index f6b06344ec..fe2ca71d2e 100644 --- a/typedapi/types/enums/type_/type_.go +++ b/typedapi/types/enums/type_/type_.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package type_ package type_ import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/shutdown/_types/types.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/shutdown/_types/types.ts#L20-L24 type Type struct { Name string } diff --git a/typedapi/types/enums/unassignedinformationreason/unassignedinformationreason.go b/typedapi/types/enums/unassignedinformationreason/unassignedinformationreason.go index c3a2c5c0c2..217236a5c6 100644 --- a/typedapi/types/enums/unassignedinformationreason/unassignedinformationreason.go +++ b/typedapi/types/enums/unassignedinformationreason/unassignedinformationreason.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package unassignedinformationreason package unassignedinformationreason import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/allocation_explain/types.ts#L138-L157 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/allocation_explain/types.ts#L138-L157 type UnassignedInformationReason struct { Name string } @@ -67,35 +67,35 @@ func (u UnassignedInformationReason) MarshalText() (text []byte, err error) { func (u *UnassignedInformationReason) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "INDEX_CREATED": + case "index_created": *u = INDEXCREATED - case "CLUSTER_RECOVERED": + case "cluster_recovered": *u = CLUSTERRECOVERED - case "INDEX_REOPENED": + case "index_reopened": *u = INDEXREOPENED - case "DANGLING_INDEX_IMPORTED": + case "dangling_index_imported": *u = DANGLINGINDEXIMPORTED - case "NEW_INDEX_RESTORED": + case "new_index_restored": *u = NEWINDEXRESTORED - case "EXISTING_INDEX_RESTORED": + case "existing_index_restored": *u = EXISTINGINDEXRESTORED - case "REPLICA_ADDED": + case "replica_added": *u = REPLICAADDED - case "ALLOCATION_FAILED": + case "allocation_failed": *u = ALLOCATIONFAILED - case "NODE_LEFT": + case "node_left": *u = NODELEFT - case "REROUTE_CANCELLED": + case "reroute_cancelled": *u = REROUTECANCELLED - case "REINITIALIZED": + case "reinitialized": *u = REINITIALIZED - case "REALLOCATED_REPLICA": + case "reallocated_replica": *u = REALLOCATEDREPLICA - case "PRIMARY_FAILED": + case "primary_failed": *u = PRIMARYFAILED - case "FORCED_EMPTY_PRIMARY": + case "forced_empty_primary": *u = FORCEDEMPTYPRIMARY - case "MANUAL_ALLOCATION": + case "manual_allocation": *u = MANUALALLOCATION default: *u = UnassignedInformationReason{string(text)} diff --git a/typedapi/types/enums/useragentproperty/useragentproperty.go b/typedapi/types/enums/useragentproperty/useragentproperty.go index 51a6526ab9..1e1c151a3d 100644 --- a/typedapi/types/enums/useragentproperty/useragentproperty.go +++ b/typedapi/types/enums/useragentproperty/useragentproperty.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package useragentproperty package useragentproperty import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L507-L513 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L548-L554 type UserAgentProperty struct { Name string } diff --git a/typedapi/types/enums/valuetype/valuetype.go b/typedapi/types/enums/valuetype/valuetype.go index b01fc28ffa..283de32750 100644 --- a/typedapi/types/enums/valuetype/valuetype.go +++ b/typedapi/types/enums/valuetype/valuetype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package valuetype package valuetype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L436-L447 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L436-L447 type ValueType struct { Name string } diff --git a/typedapi/types/enums/versiontype/versiontype.go b/typedapi/types/enums/versiontype/versiontype.go index a53c54d921..1f83730857 100644 --- a/typedapi/types/enums/versiontype/versiontype.go +++ b/typedapi/types/enums/versiontype/versiontype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package versiontype package versiontype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L107-L123 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L100-L119 type VersionType struct { Name string } diff --git a/typedapi/types/enums/waitforactiveshardoptions/waitforactiveshardoptions.go b/typedapi/types/enums/waitforactiveshardoptions/waitforactiveshardoptions.go index aa3119891b..c79cc25506 100644 --- a/typedapi/types/enums/waitforactiveshardoptions/waitforactiveshardoptions.go +++ b/typedapi/types/enums/waitforactiveshardoptions/waitforactiveshardoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package waitforactiveshardoptions package waitforactiveshardoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L306-L310 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L302-L306 type WaitForActiveShardOptions struct { Name string } diff --git a/typedapi/types/enums/waitforevents/waitforevents.go b/typedapi/types/enums/waitforevents/waitforevents.go index 51275a9033..1c82cf2e6c 100644 --- a/typedapi/types/enums/waitforevents/waitforevents.go +++ b/typedapi/types/enums/waitforevents/waitforevents.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package waitforevents package waitforevents import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L312-L319 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L308-L315 type WaitForEvents struct { Name string } diff --git a/typedapi/types/enums/watchermetric/watchermetric.go b/typedapi/types/enums/watchermetric/watchermetric.go index 58850d313c..48ac6e08fa 100644 --- a/typedapi/types/enums/watchermetric/watchermetric.go +++ b/typedapi/types/enums/watchermetric/watchermetric.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package watchermetric package watchermetric import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/stats/types.ts#L42-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/stats/types.ts#L63-L69 type WatcherMetric struct { Name string } diff --git a/typedapi/types/enums/watcherstate/watcherstate.go b/typedapi/types/enums/watcherstate/watcherstate.go index 1e4b4867ae..fafeb54fa4 100644 --- a/typedapi/types/enums/watcherstate/watcherstate.go +++ b/typedapi/types/enums/watcherstate/watcherstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package watcherstate package watcherstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/stats/types.ts#L26-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/stats/types.ts#L26-L31 type WatcherState struct { Name string } diff --git a/typedapi/types/enums/watsonxtasktype/watsonxtasktype.go b/typedapi/types/enums/watsonxtasktype/watsonxtasktype.go new file mode 100644 index 0000000000..3c99c58802 --- /dev/null +++ b/typedapi/types/enums/watsonxtasktype/watsonxtasktype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +// Package watsonxtasktype +package watsonxtasktype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/put_watsonx/PutWatsonxRequest.ts#L72-L74 +type WatsonxTaskType struct { + Name string +} + +var ( + Textembedding = WatsonxTaskType{"text_embedding"} +) + +func (w WatsonxTaskType) MarshalText() (text []byte, err error) { + return []byte(w.String()), nil +} + +func (w *WatsonxTaskType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "text_embedding": + *w = Textembedding + default: + *w = WatsonxTaskType{string(text)} + } + + return nil +} + +func (w WatsonxTaskType) String() string { + return w.Name +} diff --git a/typedapi/types/enums/xpackcategory/xpackcategory.go b/typedapi/types/enums/xpackcategory/xpackcategory.go index ac80aeab08..7dcbba282f 100644 --- a/typedapi/types/enums/xpackcategory/xpackcategory.go +++ b/typedapi/types/enums/xpackcategory/xpackcategory.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package xpackcategory package xpackcategory import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/info/XPackInfoRequest.ts#L44-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/info/XPackInfoRequest.ts#L58-L62 type XPackCategory struct { Name string } diff --git a/typedapi/types/enums/zerotermsquery/zerotermsquery.go b/typedapi/types/enums/zerotermsquery/zerotermsquery.go index 18582d36d0..769d751ce2 100644 --- a/typedapi/types/enums/zerotermsquery/zerotermsquery.go +++ b/typedapi/types/enums/zerotermsquery/zerotermsquery.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b // Package zerotermsquery package zerotermsquery import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L587-L596 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L587-L596 type ZeroTermsQuery struct { Name string } diff --git a/typedapi/types/epochtimeunitmillis.go b/typedapi/types/epochtimeunitmillis.go index 9228490789..d7d0469844 100644 --- a/typedapi/types/epochtimeunitmillis.go +++ b/typedapi/types/epochtimeunitmillis.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // EpochTimeUnitMillis type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Time.ts#L40-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Time.ts#L40-L40 type EpochTimeUnitMillis int64 + +type EpochTimeUnitMillisVariant interface { + EpochTimeUnitMillisCaster() *EpochTimeUnitMillis +} diff --git a/typedapi/types/epochtimeunitseconds.go b/typedapi/types/epochtimeunitseconds.go index 6098dee21f..25a4d16c0a 100644 --- a/typedapi/types/epochtimeunitseconds.go +++ b/typedapi/types/epochtimeunitseconds.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // EpochTimeUnitSeconds type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Time.ts#L40-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Time.ts#L40-L40 type EpochTimeUnitSeconds int64 + +type EpochTimeUnitSecondsVariant interface { + EpochTimeUnitSecondsCaster() *EpochTimeUnitSeconds +} diff --git a/typedapi/types/eql.go b/typedapi/types/eql.go index c83b9d4f1c..35e0966180 100644 --- a/typedapi/types/eql.go +++ b/typedapi/types/eql.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Eql type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L349-L352 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L361-L364 type Eql struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -103,8 +103,10 @@ func (s *Eql) UnmarshalJSON(data []byte) error { // NewEql returns a Eql. func NewEql() *Eql { r := &Eql{ - Queries: make(map[string]XpackQuery, 0), + Queries: make(map[string]XpackQuery), } return r } + +// false diff --git a/typedapi/types/eqlfeatures.go b/typedapi/types/eqlfeatures.go index 456665005b..17206b7a09 100644 --- a/typedapi/types/eqlfeatures.go +++ b/typedapi/types/eqlfeatures.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // EqlFeatures type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L97-L105 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L96-L104 type EqlFeatures struct { Event uint `json:"event"` Join uint `json:"join"` @@ -39,3 +39,5 @@ func NewEqlFeatures() *EqlFeatures { return r } + +// false diff --git a/typedapi/types/eqlfeaturesjoin.go b/typedapi/types/eqlfeaturesjoin.go index 4129efb528..4a461357f6 100644 --- a/typedapi/types/eqlfeaturesjoin.go +++ b/typedapi/types/eqlfeaturesjoin.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // EqlFeaturesJoin type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L107-L113 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L106-L112 type EqlFeaturesJoin struct { JoinQueriesFiveOrMore uint `json:"join_queries_five_or_more"` JoinQueriesFour uint `json:"join_queries_four"` @@ -37,3 +37,5 @@ func NewEqlFeaturesJoin() *EqlFeaturesJoin { return r } + +// false diff --git a/typedapi/types/eqlfeatureskeys.go b/typedapi/types/eqlfeatureskeys.go index 818a136557..4235c0ca0a 100644 --- a/typedapi/types/eqlfeatureskeys.go +++ b/typedapi/types/eqlfeatureskeys.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // EqlFeaturesKeys type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L115-L121 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L114-L120 type EqlFeaturesKeys struct { JoinKeysFiveOrMore uint `json:"join_keys_five_or_more"` JoinKeysFour uint `json:"join_keys_four"` @@ -37,3 +37,5 @@ func NewEqlFeaturesKeys() *EqlFeaturesKeys { return r } + +// false diff --git a/typedapi/types/eqlfeaturespipes.go b/typedapi/types/eqlfeaturespipes.go index 3a05153a73..1ad37701e2 100644 --- a/typedapi/types/eqlfeaturespipes.go +++ b/typedapi/types/eqlfeaturespipes.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // EqlFeaturesPipes type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L123-L126 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L122-L125 type EqlFeaturesPipes struct { PipeHead uint `json:"pipe_head"` PipeTail uint `json:"pipe_tail"` @@ -34,3 +34,5 @@ func NewEqlFeaturesPipes() *EqlFeaturesPipes { return r } + +// false diff --git a/typedapi/types/eqlfeaturessequences.go b/typedapi/types/eqlfeaturessequences.go index bf992348cd..6e4341798c 100644 --- a/typedapi/types/eqlfeaturessequences.go +++ b/typedapi/types/eqlfeaturessequences.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // EqlFeaturesSequences type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L128-L135 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L127-L134 type EqlFeaturesSequences struct { SequenceMaxspan uint `json:"sequence_maxspan"` SequenceQueriesFiveOrMore uint `json:"sequence_queries_five_or_more"` @@ -38,3 +38,5 @@ func NewEqlFeaturesSequences() *EqlFeaturesSequences { return r } + +// false diff --git a/typedapi/types/eqlhits.go b/typedapi/types/eqlhits.go index b45cc00855..04d7bffc24 100644 --- a/typedapi/types/eqlhits.go +++ b/typedapi/types/eqlhits.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // EqlHits type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/eql/_types/EqlHits.ts#L25-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/eql/_types/EqlHits.ts#L25-L39 type EqlHits struct { // Events Contains events matching the query. Each object represents a matching event. Events []HitsEvent `json:"events,omitempty"` @@ -40,3 +40,5 @@ func NewEqlHits() *EqlHits { return r } + +// false diff --git a/typedapi/types/errorcause.go b/typedapi/types/errorcause.go index bf7215eed2..03199da65e 100644 --- a/typedapi/types/errorcause.go +++ b/typedapi/types/errorcause.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,11 +31,11 @@ import ( // ErrorCause type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Errors.ts#L25-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Errors.ts#L25-L50 type ErrorCause struct { CausedBy *ErrorCause `json:"caused_by,omitempty"` Metadata map[string]json.RawMessage `json:"-"` - // Reason A human-readable explanation of the error, in english + // Reason A human-readable explanation of the error, in English. Reason *string `json:"reason,omitempty"` RootCause []ErrorCause `json:"root_cause,omitempty"` // StackTrace The server stack trace. Present only if the `error_trace=true` parameter was @@ -168,8 +168,10 @@ func (s ErrorCause) MarshalJSON() ([]byte, error) { // NewErrorCause returns a ErrorCause. func NewErrorCause() *ErrorCause { r := &ErrorCause{ - Metadata: make(map[string]json.RawMessage, 0), + Metadata: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/errorresponsebase.go b/typedapi/types/errorresponsebase.go index 07b8e857fa..2f1e9e1dec 100644 --- a/typedapi/types/errorresponsebase.go +++ b/typedapi/types/errorresponsebase.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ErrorResponseBase type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Base.ts#L76-L85 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Base.ts#L127-L136 type ErrorResponseBase struct { Error ErrorCause `json:"error"` Status int `json:"status"` @@ -84,3 +84,5 @@ func NewErrorResponseBase() *ErrorResponseBase { return r } + +// false diff --git a/typedapi/types/esqlcolumns.go b/typedapi/types/esqlresult.go similarity index 75% rename from typedapi/types/esqlcolumns.go rename to typedapi/types/esqlresult.go index f888a96d73..df7099fa2a 100644 --- a/typedapi/types/esqlcolumns.go +++ b/typedapi/types/esqlresult.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types -// EsqlColumns type alias. +// EsqlResult type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Binary.ts#L24-L24 -type EsqlColumns []byte +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Binary.ts#L24-L24 +type EsqlResult []byte diff --git a/typedapi/types/estoniananalyzer.go b/typedapi/types/estoniananalyzer.go index 553ac2f93c..80b3c8337a 100644 --- a/typedapi/types/estoniananalyzer.go +++ b/typedapi/types/estoniananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // EstonianAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L148-L152 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L159-L163 type EstonianAnalyzer struct { Stopwords []string `json:"stopwords,omitempty"` StopwordsPath *string `json:"stopwords_path,omitempty"` @@ -111,3 +111,13 @@ func NewEstonianAnalyzer() *EstonianAnalyzer { return r } + +// true + +type EstonianAnalyzerVariant interface { + EstonianAnalyzerCaster() *EstonianAnalyzer +} + +func (s *EstonianAnalyzer) EstonianAnalyzerCaster() *EstonianAnalyzer { + return s +} diff --git a/typedapi/types/eventdatastream.go b/typedapi/types/eventdatastream.go index 91c818e1d9..e2f2fc1f38 100644 --- a/typedapi/types/eventdatastream.go +++ b/typedapi/types/eventdatastream.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // EventDataStream type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/search_application/_types/BehavioralAnalytics.ts#L29-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/_types/BehavioralAnalytics.ts#L29-L31 type EventDataStream struct { Name string `json:"name"` } @@ -66,3 +66,5 @@ func NewEventDataStream() *EventDataStream { return r } + +// false diff --git a/typedapi/types/ewmamodelsettings.go b/typedapi/types/ewmamodelsettings.go index 1854abeeb8..5ecbc88b47 100644 --- a/typedapi/types/ewmamodelsettings.go +++ b/typedapi/types/ewmamodelsettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // EwmaModelSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L293-L295 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L293-L295 type EwmaModelSettings struct { Alpha *float32 `json:"alpha,omitempty"` } @@ -78,3 +78,13 @@ func NewEwmaModelSettings() *EwmaModelSettings { return r } + +// true + +type EwmaModelSettingsVariant interface { + EwmaModelSettingsCaster() *EwmaModelSettings +} + +func (s *EwmaModelSettings) EwmaModelSettingsCaster() *EwmaModelSettings { + return s +} diff --git a/typedapi/types/ewmamovingaverageaggregation.go b/typedapi/types/ewmamovingaverageaggregation.go index 3e6d018aa2..4181fa6dbd 100644 --- a/typedapi/types/ewmamovingaverageaggregation.go +++ b/typedapi/types/ewmamovingaverageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // EwmaMovingAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L278-L281 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L278-L281 type EwmaMovingAverageAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -173,3 +173,13 @@ func NewEwmaMovingAverageAggregation() *EwmaMovingAverageAggregation { return r } + +// true + +type EwmaMovingAverageAggregationVariant interface { + EwmaMovingAverageAggregationCaster() *EwmaMovingAverageAggregation +} + +func (s *EwmaMovingAverageAggregation) EwmaMovingAverageAggregationCaster() *EwmaMovingAverageAggregation { + return s +} diff --git a/typedapi/types/executeenrichpolicystatus.go b/typedapi/types/executeenrichpolicystatus.go index 4c033fad28..ad1a72dd7f 100644 --- a/typedapi/types/executeenrichpolicystatus.go +++ b/typedapi/types/executeenrichpolicystatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // ExecuteEnrichPolicyStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/enrich/execute_policy/types.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/enrich/execute_policy/types.ts#L20-L22 type ExecuteEnrichPolicyStatus struct { Phase enrichpolicyphase.EnrichPolicyPhase `json:"phase"` } @@ -37,3 +37,5 @@ func NewExecuteEnrichPolicyStatus() *ExecuteEnrichPolicyStatus { return r } + +// false diff --git a/typedapi/types/executingpolicy.go b/typedapi/types/executingpolicy.go index 6af005f97b..5ca0e3daab 100644 --- a/typedapi/types/executingpolicy.go +++ b/typedapi/types/executingpolicy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ExecutingPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/enrich/stats/types.ts#L25-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/enrich/stats/types.ts#L25-L28 type ExecutingPolicy struct { Name string `json:"name"` Task TaskInfo `json:"task"` @@ -72,3 +72,5 @@ func NewExecutingPolicy() *ExecutingPolicy { return r } + +// false diff --git a/typedapi/types/executionresult.go b/typedapi/types/executionresult.go index e38e757a72..d87659d1cc 100644 --- a/typedapi/types/executionresult.go +++ b/typedapi/types/executionresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ExecutionResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Execution.ts#L60-L66 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Execution.ts#L60-L66 type ExecutionResult struct { Actions []ExecutionResultAction `json:"actions"` Condition ExecutionResultCondition `json:"condition"` @@ -90,3 +90,5 @@ func NewExecutionResult() *ExecutionResult { return r } + +// false diff --git a/typedapi/types/executionresultaction.go b/typedapi/types/executionresultaction.go index 73f6ffee37..19b53b5592 100644 --- a/typedapi/types/executionresultaction.go +++ b/typedapi/types/executionresultaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // ExecutionResultAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Execution.ts#L74-L86 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Execution.ts#L74-L86 type ExecutionResultAction struct { Email *EmailResult `json:"email,omitempty"` Error *ErrorCause `json:"error,omitempty"` @@ -137,3 +137,5 @@ func NewExecutionResultAction() *ExecutionResultAction { return r } + +// false diff --git a/typedapi/types/executionresultcondition.go b/typedapi/types/executionresultcondition.go index e5fd789455..a666f7f9db 100644 --- a/typedapi/types/executionresultcondition.go +++ b/typedapi/types/executionresultcondition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // ExecutionResultCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Execution.ts#L68-L72 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Execution.ts#L68-L72 type ExecutionResultCondition struct { Met bool `json:"met"` Status actionstatusoptions.ActionStatusOptions `json:"status"` @@ -91,3 +91,5 @@ func NewExecutionResultCondition() *ExecutionResultCondition { return r } + +// false diff --git a/typedapi/types/executionresultinput.go b/typedapi/types/executionresultinput.go index 375b5a7d2f..e070d1af8f 100644 --- a/typedapi/types/executionresultinput.go +++ b/typedapi/types/executionresultinput.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,7 +29,7 @@ import ( // ExecutionResultInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Execution.ts#L88-L92 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Execution.ts#L88-L92 type ExecutionResultInput struct { Payload map[string]json.RawMessage `json:"payload"` Status actionstatusoptions.ActionStatusOptions `json:"status"` @@ -39,8 +39,10 @@ type ExecutionResultInput struct { // NewExecutionResultInput returns a ExecutionResultInput. func NewExecutionResultInput() *ExecutionResultInput { r := &ExecutionResultInput{ - Payload: make(map[string]json.RawMessage, 0), + Payload: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/executionstate.go b/typedapi/types/executionstate.go index 64b09918af..3085a85fa2 100644 --- a/typedapi/types/executionstate.go +++ b/typedapi/types/executionstate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ExecutionState type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Action.ts#L114-L118 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Action.ts#L114-L118 type ExecutionState struct { Reason *string `json:"reason,omitempty"` Successful bool `json:"successful"` @@ -95,3 +95,13 @@ func NewExecutionState() *ExecutionState { return r } + +// true + +type ExecutionStateVariant interface { + ExecutionStateCaster() *ExecutionState +} + +func (s *ExecutionState) ExecutionStateCaster() *ExecutionState { + return s +} diff --git a/typedapi/types/executionthreadpool.go b/typedapi/types/executionthreadpool.go index 9e2cc2a25c..da32d1c12f 100644 --- a/typedapi/types/executionthreadpool.go +++ b/typedapi/types/executionthreadpool.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,9 +31,12 @@ import ( // ExecutionThreadPool type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Execution.ts#L94-L97 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Execution.ts#L94-L103 type ExecutionThreadPool struct { - MaxSize int64 `json:"max_size"` + // MaxSize The largest size of the execution thread pool, which indicates the largest + // number of concurrent running watches. + MaxSize int64 `json:"max_size"` + // QueueSize The number of watches that were triggered and are currently queued. QueueSize int64 `json:"queue_size"` } @@ -93,3 +96,5 @@ func NewExecutionThreadPool() *ExecutionThreadPool { return r } + +// false diff --git a/typedapi/types/existsquery.go b/typedapi/types/existsquery.go index 2b85698bf7..9a9d2930ce 100644 --- a/typedapi/types/existsquery.go +++ b/typedapi/types/existsquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ExistsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L38-L46 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L38-L46 type ExistsQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -103,3 +103,13 @@ func NewExistsQuery() *ExistsQuery { return r } + +// true + +type ExistsQueryVariant interface { + ExistsQueryCaster() *ExistsQuery +} + +func (s *ExistsQuery) ExistsQueryCaster() *ExistsQuery { + return s +} diff --git a/typedapi/types/expandwildcards.go b/typedapi/types/expandwildcards.go index e1dcd7be94..41616a4374 100644 --- a/typedapi/types/expandwildcards.go +++ b/typedapi/types/expandwildcards.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,5 +26,9 @@ import ( // ExpandWildcards type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L218-L218 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L214-L214 type ExpandWildcards []expandwildcard.ExpandWildcard + +type ExpandWildcardsVariant interface { + ExpandWildcardsCaster() *ExpandWildcards +} diff --git a/typedapi/types/explainanalyzetoken.go b/typedapi/types/explainanalyzetoken.go index e999a8eb4f..d2d1acdd0c 100644 --- a/typedapi/types/explainanalyzetoken.go +++ b/typedapi/types/explainanalyzetoken.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ExplainAnalyzeToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/analyze/types.ts#L52-L67 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/analyze/types.ts#L52-L67 type ExplainAnalyzeToken struct { Bytes string `json:"bytes"` EndOffset int64 `json:"end_offset"` @@ -235,8 +235,10 @@ func (s ExplainAnalyzeToken) MarshalJSON() ([]byte, error) { // NewExplainAnalyzeToken returns a ExplainAnalyzeToken. func NewExplainAnalyzeToken() *ExplainAnalyzeToken { r := &ExplainAnalyzeToken{ - ExplainAnalyzeToken: make(map[string]json.RawMessage, 0), + ExplainAnalyzeToken: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/explanation.go b/typedapi/types/explanation.go index f0cc955bf3..2b2641f696 100644 --- a/typedapi/types/explanation.go +++ b/typedapi/types/explanation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Explanation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/explain/types.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/explain/types.ts#L22-L26 type Explanation struct { Description string `json:"description"` Details []ExplanationDetail `json:"details"` @@ -97,3 +97,5 @@ func NewExplanation() *Explanation { return r } + +// false diff --git a/typedapi/types/explanationdetail.go b/typedapi/types/explanationdetail.go index 71359329db..6b9d93ea74 100644 --- a/typedapi/types/explanationdetail.go +++ b/typedapi/types/explanationdetail.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ExplanationDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/explain/types.ts#L28-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/explain/types.ts#L28-L32 type ExplanationDetail struct { Description string `json:"description"` Details []ExplanationDetail `json:"details,omitempty"` @@ -97,3 +97,5 @@ func NewExplanationDetail() *ExplanationDetail { return r } + +// false diff --git a/typedapi/types/explorecontrols.go b/typedapi/types/explorecontrols.go index 1b1b3c5bd3..b9daa18615 100644 --- a/typedapi/types/explorecontrols.go +++ b/typedapi/types/explorecontrols.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ExploreControls type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/graph/_types/ExploreControls.ts#L24-L49 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/graph/_types/ExploreControls.ts#L24-L49 type ExploreControls struct { // SampleDiversity To avoid the top-matching documents sample being dominated by a single source // of results, it is sometimes necessary to request diversity in the sample. @@ -123,3 +123,13 @@ func NewExploreControls() *ExploreControls { return r } + +// true + +type ExploreControlsVariant interface { + ExploreControlsCaster() *ExploreControls +} + +func (s *ExploreControls) ExploreControlsCaster() *ExploreControls { + return s +} diff --git a/typedapi/types/exponentialaveragecalculationcontext.go b/typedapi/types/exponentialaveragecalculationcontext.go new file mode 100644 index 0000000000..f5b7e7598d --- /dev/null +++ b/typedapi/types/exponentialaveragecalculationcontext.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ExponentialAverageCalculationContext type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Datafeed.ts#L204-L208 +type ExponentialAverageCalculationContext struct { + IncrementalMetricValueMs Float64 `json:"incremental_metric_value_ms"` + LatestTimestamp *int64 `json:"latest_timestamp,omitempty"` + PreviousExponentialAverageMs Float64 `json:"previous_exponential_average_ms,omitempty"` +} + +func (s *ExponentialAverageCalculationContext) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "incremental_metric_value_ms": + if err := dec.Decode(&s.IncrementalMetricValueMs); err != nil { + return fmt.Errorf("%s | %w", "IncrementalMetricValueMs", err) + } + + case "latest_timestamp": + if err := dec.Decode(&s.LatestTimestamp); err != nil { + return fmt.Errorf("%s | %w", "LatestTimestamp", err) + } + + case "previous_exponential_average_ms": + if err := dec.Decode(&s.PreviousExponentialAverageMs); err != nil { + return fmt.Errorf("%s | %w", "PreviousExponentialAverageMs", err) + } + + } + } + return nil +} + +// NewExponentialAverageCalculationContext returns a ExponentialAverageCalculationContext. +func NewExponentialAverageCalculationContext() *ExponentialAverageCalculationContext { + r := &ExponentialAverageCalculationContext{} + + return r +} + +// false diff --git a/typedapi/types/extendedboundsdouble.go b/typedapi/types/extendedboundsdouble.go index e64b4b10cb..51f9740522 100644 --- a/typedapi/types/extendedboundsdouble.go +++ b/typedapi/types/extendedboundsdouble.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ExtendedBoundsdouble type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L508-L517 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L508-L517 type ExtendedBoundsdouble struct { // Max Maximum value for the bound. Max *Float64 `json:"max,omitempty"` @@ -97,3 +97,13 @@ func NewExtendedBoundsdouble() *ExtendedBoundsdouble { return r } + +// true + +type ExtendedBoundsdoubleVariant interface { + ExtendedBoundsdoubleCaster() *ExtendedBoundsdouble +} + +func (s *ExtendedBoundsdouble) ExtendedBoundsdoubleCaster() *ExtendedBoundsdouble { + return s +} diff --git a/typedapi/types/extendedboundsfielddatemath.go b/typedapi/types/extendedboundsfielddatemath.go index 74a518d246..d7db3b3a89 100644 --- a/typedapi/types/extendedboundsfielddatemath.go +++ b/typedapi/types/extendedboundsfielddatemath.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ExtendedBoundsFieldDateMath type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L508-L517 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L508-L517 type ExtendedBoundsFieldDateMath struct { // Max Maximum value for the bound. Max FieldDateMath `json:"max,omitempty"` @@ -74,3 +74,13 @@ func NewExtendedBoundsFieldDateMath() *ExtendedBoundsFieldDateMath { return r } + +// true + +type ExtendedBoundsFieldDateMathVariant interface { + ExtendedBoundsFieldDateMathCaster() *ExtendedBoundsFieldDateMath +} + +func (s *ExtendedBoundsFieldDateMath) ExtendedBoundsFieldDateMathCaster() *ExtendedBoundsFieldDateMath { + return s +} diff --git a/typedapi/types/extendedmemorystats.go b/typedapi/types/extendedmemorystats.go index 0e85cf4f96..30a58e13b0 100644 --- a/typedapi/types/extendedmemorystats.go +++ b/typedapi/types/extendedmemorystats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ExtendedMemoryStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L658-L667 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L658-L667 type ExtendedMemoryStats struct { // AdjustedTotalInBytes If the amount of physical memory has been overridden using the // `es`.`total_memory_bytes` system property then this reports the overridden @@ -255,3 +255,5 @@ func NewExtendedMemoryStats() *ExtendedMemoryStats { return r } + +// false diff --git a/typedapi/types/extendedstatsaggregate.go b/typedapi/types/extendedstatsaggregate.go index 8f3dc083bd..de27cb035c 100644 --- a/typedapi/types/extendedstatsaggregate.go +++ b/typedapi/types/extendedstatsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ExtendedStatsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L299-L320 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L299-L320 type ExtendedStatsAggregate struct { Avg *Float64 `json:"avg,omitempty"` AvgAsString *string `json:"avg_as_string,omitempty"` @@ -289,3 +289,5 @@ func NewExtendedStatsAggregate() *ExtendedStatsAggregate { return r } + +// false diff --git a/typedapi/types/extendedstatsaggregation.go b/typedapi/types/extendedstatsaggregation.go index bfffad9cf4..40b9c8b22e 100644 --- a/typedapi/types/extendedstatsaggregation.go +++ b/typedapi/types/extendedstatsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ExtendedStatsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L101-L106 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L101-L106 type ExtendedStatsAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -113,3 +113,13 @@ func NewExtendedStatsAggregation() *ExtendedStatsAggregation { return r } + +// true + +type ExtendedStatsAggregationVariant interface { + ExtendedStatsAggregationCaster() *ExtendedStatsAggregation +} + +func (s *ExtendedStatsAggregation) ExtendedStatsAggregationCaster() *ExtendedStatsAggregation { + return s +} diff --git a/typedapi/types/extendedstatsbucketaggregate.go b/typedapi/types/extendedstatsbucketaggregate.go index 187dd85fd6..8d81b2b352 100644 --- a/typedapi/types/extendedstatsbucketaggregate.go +++ b/typedapi/types/extendedstatsbucketaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ExtendedStatsBucketAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L322-L323 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L322-L323 type ExtendedStatsBucketAggregate struct { Avg *Float64 `json:"avg,omitempty"` AvgAsString *string `json:"avg_as_string,omitempty"` @@ -289,3 +289,5 @@ func NewExtendedStatsBucketAggregate() *ExtendedStatsBucketAggregate { return r } + +// false diff --git a/typedapi/types/extendedstatsbucketaggregation.go b/typedapi/types/extendedstatsbucketaggregation.go index 1ca1fe23b7..b2c00ca75e 100644 --- a/typedapi/types/extendedstatsbucketaggregation.go +++ b/typedapi/types/extendedstatsbucketaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // ExtendedStatsBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L218-L223 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L218-L223 type ExtendedStatsBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -111,3 +111,13 @@ func NewExtendedStatsBucketAggregation() *ExtendedStatsBucketAggregation { return r } + +// true + +type ExtendedStatsBucketAggregationVariant interface { + ExtendedStatsBucketAggregationCaster() *ExtendedStatsBucketAggregation +} + +func (s *ExtendedStatsBucketAggregation) ExtendedStatsBucketAggregationCaster() *ExtendedStatsBucketAggregation { + return s +} diff --git a/typedapi/types/failprocessor.go b/typedapi/types/failprocessor.go index f0e2677fba..a5bee46bca 100644 --- a/typedapi/types/failprocessor.go +++ b/typedapi/types/failprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,13 +31,13 @@ import ( // FailProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L848-L854 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L889-L895 type FailProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. Description *string `json:"description,omitempty"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // Message The error message thrown by the processor. @@ -78,16 +78,9 @@ func (s *FailProcessor) UnmarshalJSON(data []byte) error { s.Description = &o case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -143,3 +136,13 @@ func NewFailProcessor() *FailProcessor { return r } + +// true + +type FailProcessorVariant interface { + FailProcessorCaster() *FailProcessor +} + +func (s *FailProcessor) FailProcessorCaster() *FailProcessor { + return s +} diff --git a/typedapi/types/failurestore.go b/typedapi/types/failurestore.go index 2aee48eaa6..18833a4896 100644 --- a/typedapi/types/failurestore.go +++ b/typedapi/types/failurestore.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FailureStore type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/DataStream.ts#L39-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/DataStream.ts#L39-L43 type FailureStore struct { Enabled bool `json:"enabled"` Indices []DataStreamIndex `json:"indices"` @@ -97,3 +97,5 @@ func NewFailureStore() *FailureStore { return r } + +// false diff --git a/typedapi/types/feature.go b/typedapi/types/feature.go index 5bb9226f43..1f6d05fb83 100644 --- a/typedapi/types/feature.go +++ b/typedapi/types/feature.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Feature type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/features/_types/Feature.ts#L20-L23 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/features/_types/Feature.ts#L20-L23 type Feature struct { Description string `json:"description"` Name string `json:"name"` @@ -87,3 +87,5 @@ func NewFeature() *Feature { return r } + +// false diff --git a/typedapi/types/featureenabled.go b/typedapi/types/featureenabled.go index 6f19f2c8d8..13ccc01fa0 100644 --- a/typedapi/types/featureenabled.go +++ b/typedapi/types/featureenabled.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FeatureEnabled type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L215-L217 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L215-L217 type FeatureEnabled struct { Enabled bool `json:"enabled"` } @@ -76,3 +76,13 @@ func NewFeatureEnabled() *FeatureEnabled { return r } + +// true + +type FeatureEnabledVariant interface { + FeatureEnabledCaster() *FeatureEnabled +} + +func (s *FeatureEnabled) FeatureEnabledCaster() *FeatureEnabled { + return s +} diff --git a/typedapi/types/features.go b/typedapi/types/features.go index 395b289f72..6c15583543 100644 --- a/typedapi/types/features.go +++ b/typedapi/types/features.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,5 +26,5 @@ import ( // Features type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/get/IndicesGetRequest.ts#L96-L96 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get/IndicesGetRequest.ts#L103-L103 type Features []feature.Feature diff --git a/typedapi/types/featuretoggle.go b/typedapi/types/featuretoggle.go index e157429206..0c05c2c927 100644 --- a/typedapi/types/featuretoggle.go +++ b/typedapi/types/featuretoggle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FeatureToggle type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L38-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L37-L39 type FeatureToggle struct { Enabled bool `json:"enabled"` } @@ -76,3 +76,5 @@ func NewFeatureToggle() *FeatureToggle { return r } + +// false diff --git a/typedapi/types/fetchprofile.go b/typedapi/types/fetchprofile.go index 0dc7ea5b8a..b8bdd5f4c8 100644 --- a/typedapi/types/fetchprofile.go +++ b/typedapi/types/fetchprofile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FetchProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L230-L237 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L230-L237 type FetchProfile struct { Breakdown FetchProfileBreakdown `json:"breakdown"` Children []FetchProfile `json:"children,omitempty"` @@ -111,3 +111,5 @@ func NewFetchProfile() *FetchProfile { return r } + +// false diff --git a/typedapi/types/fetchprofilebreakdown.go b/typedapi/types/fetchprofilebreakdown.go index 8b4fbc0f27..9d54f26958 100644 --- a/typedapi/types/fetchprofilebreakdown.go +++ b/typedapi/types/fetchprofilebreakdown.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FetchProfileBreakdown type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L239-L248 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L239-L248 type FetchProfileBreakdown struct { LoadSource *int `json:"load_source,omitempty"` LoadSourceCount *int `json:"load_source_count,omitempty"` @@ -197,3 +197,5 @@ func NewFetchProfileBreakdown() *FetchProfileBreakdown { return r } + +// false diff --git a/typedapi/types/fetchprofiledebug.go b/typedapi/types/fetchprofiledebug.go index 5989572043..0c1f2a19ad 100644 --- a/typedapi/types/fetchprofiledebug.go +++ b/typedapi/types/fetchprofiledebug.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FetchProfileDebug type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L250-L253 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L250-L253 type FetchProfileDebug struct { FastPath *int `json:"fast_path,omitempty"` StoredFields []string `json:"stored_fields,omitempty"` @@ -84,3 +84,5 @@ func NewFetchProfileDebug() *FetchProfileDebug { return r } + +// false diff --git a/typedapi/types/fieldaliasproperty.go b/typedapi/types/fieldaliasproperty.go index 011e6c0d0e..e44f445462 100644 --- a/typedapi/types/fieldaliasproperty.go +++ b/typedapi/types/fieldaliasproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,20 +29,22 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // FieldAliasProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/specialized.ts#L55-L58 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/specialized.ts#L64-L67 type FieldAliasProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Path *string `json:"path,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Path *string `json:"path,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { @@ -84,301 +86,313 @@ func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -432,306 +446,323 @@ func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -746,13 +777,14 @@ func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { func (s FieldAliasProperty) MarshalJSON() ([]byte, error) { type innerFieldAliasProperty FieldAliasProperty tmp := innerFieldAliasProperty{ - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Path: s.Path, - Properties: s.Properties, - Type: s.Type, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Path: s.Path, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "alias" @@ -763,10 +795,20 @@ func (s FieldAliasProperty) MarshalJSON() ([]byte, error) { // NewFieldAliasProperty returns a FieldAliasProperty. func NewFieldAliasProperty() *FieldAliasProperty { r := &FieldAliasProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type FieldAliasPropertyVariant interface { + FieldAliasPropertyCaster() *FieldAliasProperty +} + +func (s *FieldAliasProperty) FieldAliasPropertyCaster() *FieldAliasProperty { + return s +} diff --git a/typedapi/types/fieldandformat.go b/typedapi/types/fieldandformat.go index a4a1c39131..e7e55abcfa 100644 --- a/typedapi/types/fieldandformat.go +++ b/typedapi/types/fieldandformat.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,12 +31,12 @@ import ( // FieldAndFormat type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/abstractions.ts#L528-L542 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/abstractions.ts#L535-L549 type FieldAndFormat struct { - // Field Wildcard pattern. The request returns values for field names matching this + // Field A wildcard pattern. The request returns values for field names matching this // pattern. Field string `json:"field"` - // Format Format in which the values are returned. + // Format The format in which the values are returned. Format *string `json:"format,omitempty"` IncludeUnmapped *bool `json:"include_unmapped,omitempty"` } @@ -110,3 +110,13 @@ func NewFieldAndFormat() *FieldAndFormat { return r } + +// true + +type FieldAndFormatVariant interface { + FieldAndFormatCaster() *FieldAndFormat +} + +func (s *FieldAndFormat) FieldAndFormatCaster() *FieldAndFormat { + return s +} diff --git a/typedapi/types/fieldcapability.go b/typedapi/types/fieldcapability.go index 90c7143307..39d091ffe9 100644 --- a/typedapi/types/fieldcapability.go +++ b/typedapi/types/fieldcapability.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // FieldCapability type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/field_caps/types.ts#L23-L81 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/field_caps/types.ts#L23-L81 type FieldCapability struct { // Aggregatable Whether this field can be aggregated on all indices. Aggregatable bool `json:"aggregatable"` @@ -231,3 +231,5 @@ func NewFieldCapability() *FieldCapability { return r } + +// false diff --git a/typedapi/types/fieldcollapse.go b/typedapi/types/fieldcollapse.go index 403e87f382..ce4dce4761 100644 --- a/typedapi/types/fieldcollapse.go +++ b/typedapi/types/fieldcollapse.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FieldCollapse type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/FieldCollapse.ts#L24-L41 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/FieldCollapse.ts#L24-L41 type FieldCollapse struct { Collapse *FieldCollapse `json:"collapse,omitempty"` // Field The field to collapse the result set on @@ -111,3 +111,13 @@ func NewFieldCollapse() *FieldCollapse { return r } + +// true + +type FieldCollapseVariant interface { + FieldCollapseCaster() *FieldCollapse +} + +func (s *FieldCollapse) FieldCollapseCaster() *FieldCollapse { + return s +} diff --git a/typedapi/types/fielddatafrequencyfilter.go b/typedapi/types/fielddatafrequencyfilter.go index 5afda7d96e..d3dc61fda8 100644 --- a/typedapi/types/fielddatafrequencyfilter.go +++ b/typedapi/types/fielddatafrequencyfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FielddataFrequencyFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/FielddataFrequencyFilter.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/FielddataFrequencyFilter.ts#L22-L26 type FielddataFrequencyFilter struct { Max Float64 `json:"max"` Min Float64 `json:"min"` @@ -112,3 +112,13 @@ func NewFielddataFrequencyFilter() *FielddataFrequencyFilter { return r } + +// true + +type FielddataFrequencyFilterVariant interface { + FielddataFrequencyFilterCaster() *FielddataFrequencyFilter +} + +func (s *FielddataFrequencyFilter) FielddataFrequencyFilterCaster() *FielddataFrequencyFilter { + return s +} diff --git a/typedapi/types/fielddatarecord.go b/typedapi/types/fielddatarecord.go index 63a8b50713..08319e4412 100644 --- a/typedapi/types/fielddatarecord.go +++ b/typedapi/types/fielddatarecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FielddataRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/fielddata/types.ts#L20-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/fielddata/types.ts#L20-L48 type FielddataRecord struct { // Field field name Field *string `json:"field,omitempty"` @@ -145,3 +145,5 @@ func NewFielddataRecord() *FielddataRecord { return r } + +// false diff --git a/typedapi/types/fielddatastats.go b/typedapi/types/fielddatastats.go index 9cac21708c..6262c3d3d5 100644 --- a/typedapi/types/fielddatastats.go +++ b/typedapi/types/fielddatastats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FielddataStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L111-L116 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L114-L119 type FielddataStats struct { Evictions *int64 `json:"evictions,omitempty"` Fields map[string]FieldMemoryUsage `json:"fields,omitempty"` @@ -105,8 +105,10 @@ func (s *FielddataStats) UnmarshalJSON(data []byte) error { // NewFielddataStats returns a FielddataStats. func NewFielddataStats() *FielddataStats { r := &FielddataStats{ - Fields: make(map[string]FieldMemoryUsage, 0), + Fields: make(map[string]FieldMemoryUsage), } return r } + +// false diff --git a/typedapi/types/fielddatemath.go b/typedapi/types/fielddatemath.go index c3c35bbaf1..5ac3d52e85 100644 --- a/typedapi/types/fielddatemath.go +++ b/typedapi/types/fielddatemath.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // string // Float64 // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L309-L316 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L309-L316 type FieldDateMath any + +type FieldDateMathVariant interface { + FieldDateMathCaster() *FieldDateMath +} diff --git a/typedapi/types/fieldlookup.go b/typedapi/types/fieldlookup.go index 8094f6f62a..d0647c90b4 100644 --- a/typedapi/types/fieldlookup.go +++ b/typedapi/types/fieldlookup.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // FieldLookup type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/abstractions.ts#L429-L446 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/abstractions.ts#L436-L453 type FieldLookup struct { // Id `id` of the document. Id string `json:"id"` @@ -88,3 +88,13 @@ func NewFieldLookup() *FieldLookup { return r } + +// true + +type FieldLookupVariant interface { + FieldLookupCaster() *FieldLookup +} + +func (s *FieldLookup) FieldLookupCaster() *FieldLookup { + return s +} diff --git a/typedapi/types/fieldmapping.go b/typedapi/types/fieldmapping.go index dde1bfc508..c4f769b9a8 100644 --- a/typedapi/types/fieldmapping.go +++ b/typedapi/types/fieldmapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FieldMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/meta-fields.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/meta-fields.ts#L24-L27 type FieldMapping struct { FullName string `json:"full_name"` Mapping map[string]Property `json:"mapping"` @@ -83,301 +83,313 @@ func (s *FieldMapping) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Mapping[key] = oo } @@ -391,8 +403,10 @@ func (s *FieldMapping) UnmarshalJSON(data []byte) error { // NewFieldMapping returns a FieldMapping. func NewFieldMapping() *FieldMapping { r := &FieldMapping{ - Mapping: make(map[string]Property, 0), + Mapping: make(map[string]Property), } return r } + +// false diff --git a/typedapi/types/fieldmemoryusage.go b/typedapi/types/fieldmemoryusage.go index cf2d4dbd0b..d8b6167d69 100644 --- a/typedapi/types/fieldmemoryusage.go +++ b/typedapi/types/fieldmemoryusage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FieldMemoryUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L118-L121 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L121-L124 type FieldMemoryUsage struct { MemorySize ByteSize `json:"memory_size,omitempty"` MemorySizeInBytes int64 `json:"memory_size_in_bytes"` @@ -83,3 +83,5 @@ func NewFieldMemoryUsage() *FieldMemoryUsage { return r } + +// false diff --git a/typedapi/types/fieldmetric.go b/typedapi/types/fieldmetric.go index 4147d0de70..4ea64da7db 100644 --- a/typedapi/types/fieldmetric.go +++ b/typedapi/types/fieldmetric.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // FieldMetric type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/_types/Metric.ts#L30-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/_types/Metric.ts#L30-L35 type FieldMetric struct { // Field The field to collect metrics for. This must be a numeric of some kind. Field string `json:"field"` @@ -77,3 +77,13 @@ func NewFieldMetric() *FieldMetric { return r } + +// true + +type FieldMetricVariant interface { + FieldMetricCaster() *FieldMetric +} + +func (s *FieldMetric) FieldMetricCaster() *FieldMetric { + return s +} diff --git a/typedapi/types/fieldnamesfield.go b/typedapi/types/fieldnamesfield.go index cbb28d7f4e..f4bee78877 100644 --- a/typedapi/types/fieldnamesfield.go +++ b/typedapi/types/fieldnamesfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FieldNamesField type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/meta-fields.ts#L42-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/meta-fields.ts#L42-L44 type FieldNamesField struct { Enabled bool `json:"enabled"` } @@ -76,3 +76,13 @@ func NewFieldNamesField() *FieldNamesField { return r } + +// true + +type FieldNamesFieldVariant interface { + FieldNamesFieldCaster() *FieldNamesField +} + +func (s *FieldNamesField) FieldNamesFieldCaster() *FieldNamesField { + return s +} diff --git a/typedapi/types/fieldrule.go b/typedapi/types/fieldrule.go deleted file mode 100644 index 26a5da09e9..0000000000 --- a/typedapi/types/fieldrule.go +++ /dev/null @@ -1,113 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// FieldRule type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/RoleMappingRule.ts#L35-L43 -type FieldRule struct { - Dn []string `json:"dn,omitempty"` - Groups []string `json:"groups,omitempty"` - Username []string `json:"username,omitempty"` -} - -func (s *FieldRule) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "dn": - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - if !bytes.HasPrefix(rawMsg, []byte("[")) { - o := new(string) - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Dn", err) - } - - s.Dn = append(s.Dn, *o) - } else { - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Dn); err != nil { - return fmt.Errorf("%s | %w", "Dn", err) - } - } - - case "groups": - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - if !bytes.HasPrefix(rawMsg, []byte("[")) { - o := new(string) - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Groups", err) - } - - s.Groups = append(s.Groups, *o) - } else { - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Groups); err != nil { - return fmt.Errorf("%s | %w", "Groups", err) - } - } - - case "username": - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - if !bytes.HasPrefix(rawMsg, []byte("[")) { - o := new(string) - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Username", err) - } - - s.Username = append(s.Username, *o) - } else { - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Username); err != nil { - return fmt.Errorf("%s | %w", "Username", err) - } - } - - } - } - return nil -} - -// NewFieldRule returns a FieldRule. -func NewFieldRule() *FieldRule { - r := &FieldRule{} - - return r -} diff --git a/typedapi/types/fields.go b/typedapi/types/fields.go index 9ad0e1491f..9e51087b26 100644 --- a/typedapi/types/fields.go +++ b/typedapi/types/fields.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Fields type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L141-L141 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L137-L137 type Fields []string + +type FieldsVariant interface { + FieldsCaster() *Fields +} diff --git a/typedapi/types/fieldsecurity.go b/typedapi/types/fieldsecurity.go index 95525b0cc7..66e3f01ace 100644 --- a/typedapi/types/fieldsecurity.go +++ b/typedapi/types/fieldsecurity.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // FieldSecurity type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/FieldSecurity.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/FieldSecurity.ts#L22-L25 type FieldSecurity struct { Except []string `json:"except,omitempty"` Grant []string `json:"grant,omitempty"` @@ -94,3 +94,13 @@ func NewFieldSecurity() *FieldSecurity { return r } + +// true + +type FieldSecurityVariant interface { + FieldSecurityCaster() *FieldSecurity +} + +func (s *FieldSecurity) FieldSecurityCaster() *FieldSecurity { + return s +} diff --git a/typedapi/types/fieldsizeusage.go b/typedapi/types/fieldsizeusage.go index f429967be3..875a22ce16 100644 --- a/typedapi/types/fieldsizeusage.go +++ b/typedapi/types/fieldsizeusage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FieldSizeUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L92-L95 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L95-L98 type FieldSizeUsage struct { Size ByteSize `json:"size,omitempty"` SizeInBytes int64 `json:"size_in_bytes"` @@ -83,3 +83,5 @@ func NewFieldSizeUsage() *FieldSizeUsage { return r } + +// false diff --git a/typedapi/types/fieldsort.go b/typedapi/types/fieldsort.go index f10aec68ce..3096c65182 100644 --- a/typedapi/types/fieldsort.go +++ b/typedapi/types/fieldsort.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -36,7 +36,7 @@ import ( // FieldSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/sort.ts#L43-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/sort.ts#L43-L52 type FieldSort struct { Format *string `json:"format,omitempty"` Missing Missing `json:"missing,omitempty"` @@ -120,3 +120,13 @@ func NewFieldSort() *FieldSort { return r } + +// true + +type FieldSortVariant interface { + FieldSortCaster() *FieldSort +} + +func (s *FieldSort) FieldSortCaster() *FieldSort { + return s +} diff --git a/typedapi/types/fieldstat.go b/typedapi/types/fieldstat.go index 8adc8523cf..9c8ab5d55f 100644 --- a/typedapi/types/fieldstat.go +++ b/typedapi/types/fieldstat.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FieldStat type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/text_structure/find_structure/types.ts#L23-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/text_structure/_types/Structure.ts#L23-L33 type FieldStat struct { Cardinality int `json:"cardinality"` Count int `json:"count"` @@ -195,3 +195,5 @@ func NewFieldStat() *FieldStat { return r } + +// false diff --git a/typedapi/types/fieldstatistics.go b/typedapi/types/fieldstatistics.go index 36c940a939..932939f913 100644 --- a/typedapi/types/fieldstatistics.go +++ b/typedapi/types/fieldstatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FieldStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/termvectors/types.ts#L28-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/termvectors/types.ts#L28-L32 type FieldStatistics struct { DocCount int `json:"doc_count"` SumDocFreq int64 `json:"sum_doc_freq"` @@ -110,3 +110,5 @@ func NewFieldStatistics() *FieldStatistics { return r } + +// false diff --git a/typedapi/types/fieldsuggester.go b/typedapi/types/fieldsuggester.go index 8f8453869d..3791e48b56 100644 --- a/typedapi/types/fieldsuggester.go +++ b/typedapi/types/fieldsuggester.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,8 +31,9 @@ import ( // FieldSuggester type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L109-L142 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L109-L142 type FieldSuggester struct { + AdditionalFieldSuggesterProperty map[string]json.RawMessage `json:"-"` // Completion Provides auto-complete/search-as-you-type functionality. Completion *CompletionSuggester `json:"completion,omitempty"` // Phrase Provides access to word alternatives on a per token basis within a certain @@ -115,14 +116,68 @@ func (s *FieldSuggester) UnmarshalJSON(data []byte) error { } s.Text = &o + default: + + if key, ok := t.(string); ok { + if s.AdditionalFieldSuggesterProperty == nil { + s.AdditionalFieldSuggesterProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalFieldSuggesterProperty", err) + } + s.AdditionalFieldSuggesterProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s FieldSuggester) MarshalJSON() ([]byte, error) { + type opt FieldSuggester + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalFieldSuggesterProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalFieldSuggesterProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewFieldSuggester returns a FieldSuggester. func NewFieldSuggester() *FieldSuggester { - r := &FieldSuggester{} + r := &FieldSuggester{ + AdditionalFieldSuggesterProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type FieldSuggesterVariant interface { + FieldSuggesterCaster() *FieldSuggester +} + +func (s *FieldSuggester) FieldSuggesterCaster() *FieldSuggester { + return s +} diff --git a/typedapi/types/fieldsummary.go b/typedapi/types/fieldsummary.go index 4122abf913..9d4525dad6 100644 --- a/typedapi/types/fieldsummary.go +++ b/typedapi/types/fieldsummary.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // FieldSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L57-L66 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L57-L66 type FieldSummary struct { Any uint `json:"any"` DocValues uint `json:"doc_values"` @@ -40,3 +40,5 @@ func NewFieldSummary() *FieldSummary { return r } + +// false diff --git a/typedapi/types/fieldsusagebody.go b/typedapi/types/fieldsusagebody.go deleted file mode 100644 index 4081f4669c..0000000000 --- a/typedapi/types/fieldsusagebody.go +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "encoding/json" - "fmt" -) - -// FieldsUsageBody type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L32-L39 -type FieldsUsageBody struct { - FieldsUsageBody map[string]UsageStatsIndex `json:"-"` - Shards_ ShardStatistics `json:"_shards"` -} - -// MarhsalJSON overrides marshalling for types with additional properties -func (s FieldsUsageBody) MarshalJSON() ([]byte, error) { - type opt FieldsUsageBody - // We transform the struct to a map without the embedded additional properties map - tmp := make(map[string]any, 0) - - data, err := json.Marshal(opt(s)) - if err != nil { - return nil, err - } - err = json.Unmarshal(data, &tmp) - if err != nil { - return nil, err - } - - // We inline the additional fields from the underlying map - for key, value := range s.FieldsUsageBody { - tmp[fmt.Sprintf("%s", key)] = value - } - delete(tmp, "FieldsUsageBody") - - data, err = json.Marshal(tmp) - if err != nil { - return nil, err - } - - return data, nil -} - -// NewFieldsUsageBody returns a FieldsUsageBody. -func NewFieldsUsageBody() *FieldsUsageBody { - r := &FieldsUsageBody{ - FieldsUsageBody: make(map[string]UsageStatsIndex, 0), - } - - return r -} diff --git a/typedapi/types/fieldtypes.go b/typedapi/types/fieldtypes.go index 90b89dadd3..8121e01974 100644 --- a/typedapi/types/fieldtypes.go +++ b/typedapi/types/fieldtypes.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FieldTypes type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L136-L167 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L136-L167 type FieldTypes struct { // Count The number of occurrences of the field type in selected nodes. Count int `json:"count"` @@ -176,3 +176,5 @@ func NewFieldTypes() *FieldTypes { return r } + +// false diff --git a/typedapi/types/fieldtypesmappings.go b/typedapi/types/fieldtypesmappings.go index 6e38f68ae8..8471358708 100644 --- a/typedapi/types/fieldtypesmappings.go +++ b/typedapi/types/fieldtypesmappings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FieldTypesMappings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L109-L134 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L109-L134 type FieldTypesMappings struct { // FieldTypes Contains statistics about field data types used in selected nodes. FieldTypes []FieldTypes `json:"field_types"` @@ -136,3 +136,5 @@ func NewFieldTypesMappings() *FieldTypesMappings { return r } + +// false diff --git a/typedapi/types/fieldvalue.go b/typedapi/types/fieldvalue.go index 09f239bf1b..7d61de0b73 100644 --- a/typedapi/types/fieldvalue.go +++ b/typedapi/types/fieldvalue.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -27,7 +27,10 @@ package types // string // bool // nil -// json.RawMessage // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L25-L37 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L25-L31 type FieldValue any + +type FieldValueVariant interface { + FieldValueCaster() *FieldValue +} diff --git a/typedapi/types/fieldvaluefactorscorefunction.go b/typedapi/types/fieldvaluefactorscorefunction.go index df23ca2189..7f6b7c37dc 100644 --- a/typedapi/types/fieldvaluefactorscorefunction.go +++ b/typedapi/types/fieldvaluefactorscorefunction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // FieldValueFactorScoreFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L149-L168 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L149-L168 type FieldValueFactorScoreFunction struct { // Factor Optional factor to multiply the field value with. Factor *Float64 `json:"factor,omitempty"` @@ -115,3 +115,13 @@ func NewFieldValueFactorScoreFunction() *FieldValueFactorScoreFunction { return r } + +// true + +type FieldValueFactorScoreFunctionVariant interface { + FieldValueFactorScoreFunctionCaster() *FieldValueFactorScoreFunction +} + +func (s *FieldValueFactorScoreFunction) FieldValueFactorScoreFunctionCaster() *FieldValueFactorScoreFunction { + return s +} diff --git a/typedapi/types/filecountsnapshotstats.go b/typedapi/types/filecountsnapshotstats.go index e268c9f46e..887713d70a 100644 --- a/typedapi/types/filecountsnapshotstats.go +++ b/typedapi/types/filecountsnapshotstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FileCountSnapshotStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/FileCountSnapshotStats.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/FileCountSnapshotStats.ts#L22-L25 type FileCountSnapshotStats struct { FileCount int `json:"file_count"` SizeInBytes int64 `json:"size_in_bytes"` @@ -94,3 +94,5 @@ func NewFileCountSnapshotStats() *FileCountSnapshotStats { return r } + +// false diff --git a/typedapi/types/filedetails.go b/typedapi/types/filedetails.go index 611c0924a1..14ea1b7603 100644 --- a/typedapi/types/filedetails.go +++ b/typedapi/types/filedetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FileDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/recovery/types.ts#L50-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/recovery/types.ts#L50-L54 type FileDetails struct { Length int64 `json:"length"` Name string `json:"name"` @@ -106,3 +106,5 @@ func NewFileDetails() *FileDetails { return r } + +// false diff --git a/typedapi/types/baseindicator.go b/typedapi/types/filesettingsindicator.go similarity index 74% rename from typedapi/types/baseindicator.go rename to typedapi/types/filesettingsindicator.go index bb9ac0cf4b..65e56b2d90 100644 --- a/typedapi/types/baseindicator.go +++ b/typedapi/types/filesettingsindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,17 +31,18 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indicatorhealthstatus" ) -// BaseIndicator type. +// FileSettingsIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L43-L48 -type BaseIndicator struct { +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L207-L211 +type FileSettingsIndicator struct { + Details *FileSettingsIndicatorDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` Impacts []Impact `json:"impacts,omitempty"` Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` Symptom string `json:"symptom"` } -func (s *BaseIndicator) UnmarshalJSON(data []byte) error { +func (s *FileSettingsIndicator) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -56,6 +57,11 @@ func (s *BaseIndicator) UnmarshalJSON(data []byte) error { switch t { + case "details": + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + case "diagnosis": if err := dec.Decode(&s.Diagnosis); err != nil { return fmt.Errorf("%s | %w", "Diagnosis", err) @@ -88,9 +94,11 @@ func (s *BaseIndicator) UnmarshalJSON(data []byte) error { return nil } -// NewBaseIndicator returns a BaseIndicator. -func NewBaseIndicator() *BaseIndicator { - r := &BaseIndicator{} +// NewFileSettingsIndicator returns a FileSettingsIndicator. +func NewFileSettingsIndicator() *FileSettingsIndicator { + r := &FileSettingsIndicator{} return r } + +// false diff --git a/typedapi/types/filesettingsindicatordetails.go b/typedapi/types/filesettingsindicatordetails.go new file mode 100644 index 0000000000..f3e15d597f --- /dev/null +++ b/typedapi/types/filesettingsindicatordetails.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FileSettingsIndicatorDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L213-L216 +type FileSettingsIndicatorDetails struct { + FailureStreak int64 `json:"failure_streak"` + MostRecentFailure string `json:"most_recent_failure"` +} + +func (s *FileSettingsIndicatorDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "failure_streak": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FailureStreak", err) + } + s.FailureStreak = value + case float64: + f := int64(v) + s.FailureStreak = f + } + + case "most_recent_failure": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MostRecentFailure", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MostRecentFailure = o + + } + } + return nil +} + +// NewFileSettingsIndicatorDetails returns a FileSettingsIndicatorDetails. +func NewFileSettingsIndicatorDetails() *FileSettingsIndicatorDetails { + r := &FileSettingsIndicatorDetails{} + + return r +} + +// false diff --git a/typedapi/types/filesystem.go b/typedapi/types/filesystem.go index f3253267c3..69c53aaa70 100644 --- a/typedapi/types/filesystem.go +++ b/typedapi/types/filesystem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FileSystem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L769-L787 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L769-L787 type FileSystem struct { // Data List of all file stores. Data []DataPathStats `json:"data,omitempty"` @@ -100,3 +100,5 @@ func NewFileSystem() *FileSystem { return r } + +// false diff --git a/typedapi/types/filesystemtotal.go b/typedapi/types/filesystemtotal.go index 91064019fe..0dc9f75a95 100644 --- a/typedapi/types/filesystemtotal.go +++ b/typedapi/types/filesystemtotal.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FileSystemTotal type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L828-L857 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L828-L857 type FileSystemTotal struct { // Available Total disk space available to this Java virtual machine on all file stores. // Depending on OS or process level restrictions, this might appear less than @@ -163,3 +163,5 @@ func NewFileSystemTotal() *FileSystemTotal { return r } + +// false diff --git a/typedapi/types/fillmaskinferenceoptions.go b/typedapi/types/fillmaskinferenceoptions.go index 86ad267e5a..76ed1d6985 100644 --- a/typedapi/types/fillmaskinferenceoptions.go +++ b/typedapi/types/fillmaskinferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FillMaskInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L266-L280 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L253-L268 type FillMaskInferenceOptions struct { // MaskToken The string/token which will be removed from incoming documents and replaced // with the inference prediction(s). @@ -50,6 +50,7 @@ type FillMaskInferenceOptions struct { ResultsField *string `json:"results_field,omitempty"` // Tokenization The tokenization options to update when inferring Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` + Vocabulary Vocabulary `json:"vocabulary"` } func (s *FillMaskInferenceOptions) UnmarshalJSON(data []byte) error { @@ -112,6 +113,11 @@ func (s *FillMaskInferenceOptions) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Tokenization", err) } + case "vocabulary": + if err := dec.Decode(&s.Vocabulary); err != nil { + return fmt.Errorf("%s | %w", "Vocabulary", err) + } + } } return nil @@ -123,3 +129,13 @@ func NewFillMaskInferenceOptions() *FillMaskInferenceOptions { return r } + +// true + +type FillMaskInferenceOptionsVariant interface { + FillMaskInferenceOptionsCaster() *FillMaskInferenceOptions +} + +func (s *FillMaskInferenceOptions) FillMaskInferenceOptionsCaster() *FillMaskInferenceOptions { + return s +} diff --git a/typedapi/types/fillmaskinferenceupdateoptions.go b/typedapi/types/fillmaskinferenceupdateoptions.go index 41709c1d98..8eea8d66b8 100644 --- a/typedapi/types/fillmaskinferenceupdateoptions.go +++ b/typedapi/types/fillmaskinferenceupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FillMaskInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L411-L418 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L399-L406 type FillMaskInferenceUpdateOptions struct { // NumTopClasses Specifies the number of top class predictions to return. Defaults to 0. NumTopClasses *int `json:"num_top_classes,omitempty"` @@ -101,3 +101,13 @@ func NewFillMaskInferenceUpdateOptions() *FillMaskInferenceUpdateOptions { return r } + +// true + +type FillMaskInferenceUpdateOptionsVariant interface { + FillMaskInferenceUpdateOptionsCaster() *FillMaskInferenceUpdateOptions +} + +func (s *FillMaskInferenceUpdateOptions) FillMaskInferenceUpdateOptionsCaster() *FillMaskInferenceUpdateOptions { + return s +} diff --git a/typedapi/types/filteraggregate.go b/typedapi/types/filteraggregate.go index d10bbe2047..dddb7ae787 100644 --- a/typedapi/types/filteraggregate.go +++ b/typedapi/types/filteraggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // FilterAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L552-L556 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L552-L556 type FilterAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -631,8 +631,10 @@ func (s FilterAggregate) MarshalJSON() ([]byte, error) { // NewFilterAggregate returns a FilterAggregate. func NewFilterAggregate() *FilterAggregate { r := &FilterAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/filteringadvancedsnippet.go b/typedapi/types/filteringadvancedsnippet.go index 2ea3688961..80cb80c031 100644 --- a/typedapi/types/filteringadvancedsnippet.go +++ b/typedapi/types/filteringadvancedsnippet.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // FilteringAdvancedSnippet type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L192-L196 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L192-L196 type FilteringAdvancedSnippet struct { CreatedAt DateTime `json:"created_at,omitempty"` UpdatedAt DateTime `json:"updated_at,omitempty"` @@ -78,3 +78,13 @@ func NewFilteringAdvancedSnippet() *FilteringAdvancedSnippet { return r } + +// true + +type FilteringAdvancedSnippetVariant interface { + FilteringAdvancedSnippetCaster() *FilteringAdvancedSnippet +} + +func (s *FilteringAdvancedSnippet) FilteringAdvancedSnippetCaster() *FilteringAdvancedSnippet { + return s +} diff --git a/typedapi/types/filteringconfig.go b/typedapi/types/filteringconfig.go index b8bee32e85..14e315f0f6 100644 --- a/typedapi/types/filteringconfig.go +++ b/typedapi/types/filteringconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FilteringConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L209-L213 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L209-L213 type FilteringConfig struct { Active FilteringRules `json:"active"` Domain *string `json:"domain,omitempty"` @@ -86,3 +86,13 @@ func NewFilteringConfig() *FilteringConfig { return r } + +// true + +type FilteringConfigVariant interface { + FilteringConfigCaster() *FilteringConfig +} + +func (s *FilteringConfig) FilteringConfigCaster() *FilteringConfig { + return s +} diff --git a/typedapi/types/filteringrule.go b/typedapi/types/filteringrule.go index 42b37a903c..b2b8d292d6 100644 --- a/typedapi/types/filteringrule.go +++ b/typedapi/types/filteringrule.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // FilteringRule type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L170-L179 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L170-L179 type FilteringRule struct { CreatedAt DateTime `json:"created_at,omitempty"` Field string `json:"field"` @@ -130,3 +130,13 @@ func NewFilteringRule() *FilteringRule { return r } + +// true + +type FilteringRuleVariant interface { + FilteringRuleCaster() *FilteringRule +} + +func (s *FilteringRule) FilteringRuleCaster() *FilteringRule { + return s +} diff --git a/typedapi/types/filteringrules.go b/typedapi/types/filteringrules.go index 4010ffa59b..ac36a3d4ed 100644 --- a/typedapi/types/filteringrules.go +++ b/typedapi/types/filteringrules.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // FilteringRules type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L203-L207 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L203-L207 type FilteringRules struct { AdvancedSnippet FilteringAdvancedSnippet `json:"advanced_snippet"` Rules []FilteringRule `json:"rules"` @@ -35,3 +35,13 @@ func NewFilteringRules() *FilteringRules { return r } + +// true + +type FilteringRulesVariant interface { + FilteringRulesCaster() *FilteringRules +} + +func (s *FilteringRules) FilteringRulesCaster() *FilteringRules { + return s +} diff --git a/typedapi/types/filteringrulesvalidation.go b/typedapi/types/filteringrulesvalidation.go index 363060a02a..5af3620f45 100644 --- a/typedapi/types/filteringrulesvalidation.go +++ b/typedapi/types/filteringrulesvalidation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // FilteringRulesValidation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L198-L201 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L198-L201 type FilteringRulesValidation struct { Errors []FilteringValidation `json:"errors"` State filteringvalidationstate.FilteringValidationState `json:"state"` @@ -38,3 +38,13 @@ func NewFilteringRulesValidation() *FilteringRulesValidation { return r } + +// true + +type FilteringRulesValidationVariant interface { + FilteringRulesValidationCaster() *FilteringRulesValidation +} + +func (s *FilteringRulesValidation) FilteringRulesValidationCaster() *FilteringRulesValidation { + return s +} diff --git a/typedapi/types/filteringvalidation.go b/typedapi/types/filteringvalidation.go index 45cb83b64e..0704e1c320 100644 --- a/typedapi/types/filteringvalidation.go +++ b/typedapi/types/filteringvalidation.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // FilteringValidation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L181-L184 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L181-L184 type FilteringValidation struct { Ids []string `json:"ids"` Messages []string `json:"messages"` @@ -34,3 +34,13 @@ func NewFilteringValidation() *FilteringValidation { return r } + +// true + +type FilteringValidationVariant interface { + FilteringValidationCaster() *FilteringValidation +} + +func (s *FilteringValidation) FilteringValidationCaster() *FilteringValidation { + return s +} diff --git a/typedapi/types/filterref.go b/typedapi/types/filterref.go index 65e87c59db..680b3f8707 100644 --- a/typedapi/types/filterref.go +++ b/typedapi/types/filterref.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // FilterRef type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Filter.ts#L31-L41 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Filter.ts#L31-L41 type FilterRef struct { // FilterId The identifier for the filter. FilterId string `json:"filter_id"` @@ -77,3 +77,13 @@ func NewFilterRef() *FilterRef { return r } + +// true + +type FilterRefVariant interface { + FilterRefCaster() *FilterRef +} + +func (s *FilterRef) FilterRefCaster() *FilterRef { + return s +} diff --git a/typedapi/types/filtersaggregate.go b/typedapi/types/filtersaggregate.go index 7e2ec50829..a752db6d4d 100644 --- a/typedapi/types/filtersaggregate.go +++ b/typedapi/types/filtersaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // FiltersAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L639-L643 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L639-L643 type FiltersAggregate struct { Buckets BucketsFiltersBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewFiltersAggregate() *FiltersAggregate { return r } + +// false diff --git a/typedapi/types/filtersaggregation.go b/typedapi/types/filtersaggregation.go index e3fd73ded1..20c004938a 100644 --- a/typedapi/types/filtersaggregation.go +++ b/typedapi/types/filtersaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FiltersAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L374-L394 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L374-L394 type FiltersAggregation struct { // Filters Collection of queries from which to build buckets. Filters BucketsQuery `json:"filters,omitempty"` @@ -132,3 +132,13 @@ func NewFiltersAggregation() *FiltersAggregation { return r } + +// true + +type FiltersAggregationVariant interface { + FiltersAggregationCaster() *FiltersAggregation +} + +func (s *FiltersAggregation) FiltersAggregationCaster() *FiltersAggregation { + return s +} diff --git a/typedapi/types/filtersbucket.go b/typedapi/types/filtersbucket.go index 426a6f79a2..d4955dbe78 100644 --- a/typedapi/types/filtersbucket.go +++ b/typedapi/types/filtersbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // FiltersBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L645-L645 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L645-L645 type FiltersBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -625,8 +625,10 @@ func (s FiltersBucket) MarshalJSON() ([]byte, error) { // NewFiltersBucket returns a FiltersBucket. func NewFiltersBucket() *FiltersBucket { r := &FiltersBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/fingerprintanalyzer.go b/typedapi/types/fingerprintanalyzer.go index 82cc8f1a04..beede652ed 100644 --- a/typedapi/types/fingerprintanalyzer.go +++ b/typedapi/types/fingerprintanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,15 +31,23 @@ import ( // FingerprintAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L37-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L37-L64 type FingerprintAnalyzer struct { - MaxOutputSize int `json:"max_output_size"` - PreserveOriginal bool `json:"preserve_original"` - Separator string `json:"separator"` - Stopwords []string `json:"stopwords,omitempty"` - StopwordsPath *string `json:"stopwords_path,omitempty"` - Type string `json:"type,omitempty"` - Version *string `json:"version,omitempty"` + // MaxOutputSize The maximum token size to emit. Tokens larger than this size will be + // discarded. + // Defaults to `255` + MaxOutputSize *int `json:"max_output_size,omitempty"` + // Separator The character to use to concatenate the terms. + // Defaults to a space. + Separator *string `json:"separator,omitempty"` + // Stopwords A pre-defined stop words list like `_english_` or an array containing a list + // of stop words. + // Defaults to `_none_`. + Stopwords []string `json:"stopwords,omitempty"` + // StopwordsPath The path to a file containing stop words. + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` } func (s *FingerprintAnalyzer) UnmarshalJSON(data []byte) error { @@ -67,24 +75,10 @@ func (s *FingerprintAnalyzer) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "MaxOutputSize", err) } - s.MaxOutputSize = value + s.MaxOutputSize = &value case float64: f := int(v) - s.MaxOutputSize = f - } - - case "preserve_original": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "PreserveOriginal", err) - } - s.PreserveOriginal = value - case bool: - s.PreserveOriginal = v + s.MaxOutputSize = &f } case "separator": @@ -97,7 +91,7 @@ func (s *FingerprintAnalyzer) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Separator = o + s.Separator = &o case "stopwords": rawMsg := json.RawMessage{} @@ -146,13 +140,12 @@ func (s *FingerprintAnalyzer) UnmarshalJSON(data []byte) error { func (s FingerprintAnalyzer) MarshalJSON() ([]byte, error) { type innerFingerprintAnalyzer FingerprintAnalyzer tmp := innerFingerprintAnalyzer{ - MaxOutputSize: s.MaxOutputSize, - PreserveOriginal: s.PreserveOriginal, - Separator: s.Separator, - Stopwords: s.Stopwords, - StopwordsPath: s.StopwordsPath, - Type: s.Type, - Version: s.Version, + MaxOutputSize: s.MaxOutputSize, + Separator: s.Separator, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + Version: s.Version, } tmp.Type = "fingerprint" @@ -166,3 +159,13 @@ func NewFingerprintAnalyzer() *FingerprintAnalyzer { return r } + +// true + +type FingerprintAnalyzerVariant interface { + FingerprintAnalyzerCaster() *FingerprintAnalyzer +} + +func (s *FingerprintAnalyzer) FingerprintAnalyzerCaster() *FingerprintAnalyzer { + return s +} diff --git a/typedapi/types/fingerprintprocessor.go b/typedapi/types/fingerprintprocessor.go index 61e81dc1fa..81b5b31bb5 100644 --- a/typedapi/types/fingerprintprocessor.go +++ b/typedapi/types/fingerprintprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // FingerprintProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L864-L892 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L905-L933 type FingerprintProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -43,7 +43,7 @@ type FingerprintProcessor struct { // only the field value. Fields []string `json:"fields"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If true, the processor ignores any missing fields. If all fields are @@ -107,16 +107,9 @@ func (s *FingerprintProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -196,3 +189,13 @@ func NewFingerprintProcessor() *FingerprintProcessor { return r } + +// true + +type FingerprintProcessorVariant interface { + FingerprintProcessorCaster() *FingerprintProcessor +} + +func (s *FingerprintProcessor) FingerprintProcessorCaster() *FingerprintProcessor { + return s +} diff --git a/typedapi/types/fingerprinttokenfilter.go b/typedapi/types/fingerprinttokenfilter.go index 8f40d16f47..15fdfd2ec0 100644 --- a/typedapi/types/fingerprinttokenfilter.go +++ b/typedapi/types/fingerprinttokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FingerprintTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L195-L199 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L195-L199 type FingerprintTokenFilter struct { MaxOutputSize *int `json:"max_output_size,omitempty"` Separator *string `json:"separator,omitempty"` @@ -118,3 +118,13 @@ func NewFingerprintTokenFilter() *FingerprintTokenFilter { return r } + +// true + +type FingerprintTokenFilterVariant interface { + FingerprintTokenFilterCaster() *FingerprintTokenFilter +} + +func (s *FingerprintTokenFilter) FingerprintTokenFilterCaster() *FingerprintTokenFilter { + return s +} diff --git a/typedapi/types/finnishanalyzer.go b/typedapi/types/finnishanalyzer.go index e9dc949f94..1ace9d7f8e 100644 --- a/typedapi/types/finnishanalyzer.go +++ b/typedapi/types/finnishanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FinnishAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L154-L159 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L165-L170 type FinnishAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewFinnishAnalyzer() *FinnishAnalyzer { return r } + +// true + +type FinnishAnalyzerVariant interface { + FinnishAnalyzerCaster() *FinnishAnalyzer +} + +func (s *FinnishAnalyzer) FinnishAnalyzerCaster() *FinnishAnalyzer { + return s +} diff --git a/typedapi/types/flattened.go b/typedapi/types/flattened.go index a8d8e2fb98..67af5d9785 100644 --- a/typedapi/types/flattened.go +++ b/typedapi/types/flattened.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Flattened type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L354-L356 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L366-L368 type Flattened struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -108,3 +108,5 @@ func NewFlattened() *Flattened { return r } + +// false diff --git a/typedapi/types/flattenedproperty.go b/typedapi/types/flattenedproperty.go index a83c9d3257..164ed8f930 100644 --- a/typedapi/types/flattenedproperty.go +++ b/typedapi/types/flattenedproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,11 +30,12 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // FlattenedProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/complex.ts#L25-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/complex.ts#L26-L37 type FlattenedProperty struct { Boost *Float64 `json:"boost,omitempty"` DepthLimit *int `json:"depth_limit,omitempty"` @@ -46,12 +47,13 @@ type FlattenedProperty struct { Index *bool `json:"index,omitempty"` IndexOptions *indexoptions.IndexOptions `json:"index_options,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *string `json:"null_value,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - SplitQueriesOnWhitespace *bool `json:"split_queries_on_whitespace,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *string `json:"null_value,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Similarity *string `json:"similarity,omitempty"` + SplitQueriesOnWhitespace *bool `json:"split_queries_on_whitespace,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { @@ -153,301 +155,313 @@ func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -527,301 +541,313 @@ func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -853,6 +879,11 @@ func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { s.SplitQueriesOnWhitespace = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -881,6 +912,7 @@ func (s FlattenedProperty) MarshalJSON() ([]byte, error) { Properties: s.Properties, Similarity: s.Similarity, SplitQueriesOnWhitespace: s.SplitQueriesOnWhitespace, + SyntheticSourceKeep: s.SyntheticSourceKeep, Type: s.Type, } @@ -892,10 +924,20 @@ func (s FlattenedProperty) MarshalJSON() ([]byte, error) { // NewFlattenedProperty returns a FlattenedProperty. func NewFlattenedProperty() *FlattenedProperty { r := &FlattenedProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type FlattenedPropertyVariant interface { + FlattenedPropertyCaster() *FlattenedProperty +} + +func (s *FlattenedProperty) FlattenedPropertyCaster() *FlattenedProperty { + return s +} diff --git a/typedapi/types/floatnumberproperty.go b/typedapi/types/floatnumberproperty.go index 73ea535f9b..708e841a6d 100644 --- a/typedapi/types/floatnumberproperty.go +++ b/typedapi/types/floatnumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // FloatNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L142-L145 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L146-L149 type FloatNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,12 +48,13 @@ type FloatNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *float32 `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *float32 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -161,301 +163,313 @@ func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -553,301 +567,313 @@ func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -872,6 +898,11 @@ func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -920,6 +951,7 @@ func (s FloatNumberProperty) MarshalJSON() ([]byte, error) { Properties: s.Properties, Script: s.Script, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -933,10 +965,20 @@ func (s FloatNumberProperty) MarshalJSON() ([]byte, error) { // NewFloatNumberProperty returns a FloatNumberProperty. func NewFloatNumberProperty() *FloatNumberProperty { r := &FloatNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type FloatNumberPropertyVariant interface { + FloatNumberPropertyCaster() *FloatNumberProperty +} + +func (s *FloatNumberProperty) FloatNumberPropertyCaster() *FloatNumberProperty { + return s +} diff --git a/typedapi/types/floatrangeproperty.go b/typedapi/types/floatrangeproperty.go index 36a7e58ac4..be40a1dd5e 100644 --- a/typedapi/types/floatrangeproperty.go +++ b/typedapi/types/floatrangeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // FloatRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/range.ts#L38-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/range.ts#L38-L40 type FloatRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -44,10 +45,11 @@ type FloatRangeProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { @@ -149,301 +151,313 @@ func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -506,301 +520,313 @@ func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -820,6 +846,11 @@ func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -834,18 +865,19 @@ func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { func (s FloatRangeProperty) MarshalJSON() ([]byte, error) { type innerFloatRangeProperty FloatRangeProperty tmp := innerFloatRangeProperty{ - Boost: s.Boost, - Coerce: s.Coerce, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Index: s.Index, - Meta: s.Meta, - Properties: s.Properties, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "float_range" @@ -856,10 +888,20 @@ func (s FloatRangeProperty) MarshalJSON() ([]byte, error) { // NewFloatRangeProperty returns a FloatRangeProperty. func NewFloatRangeProperty() *FloatRangeProperty { r := &FloatRangeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type FloatRangePropertyVariant interface { + FloatRangePropertyCaster() *FloatRangeProperty +} + +func (s *FloatRangeProperty) FloatRangePropertyCaster() *FloatRangeProperty { + return s +} diff --git a/typedapi/types/flushstats.go b/typedapi/types/flushstats.go index a4b3df77a6..9529f085d5 100644 --- a/typedapi/types/flushstats.go +++ b/typedapi/types/flushstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FlushStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L123-L128 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L126-L131 type FlushStats struct { Periodic int64 `json:"periodic"` Total int64 `json:"total"` @@ -105,3 +105,5 @@ func NewFlushStats() *FlushStats { return r } + +// false diff --git a/typedapi/types/followerindex.go b/typedapi/types/followerindex.go index 561829b4ae..5322874380 100644 --- a/typedapi/types/followerindex.go +++ b/typedapi/types/followerindex.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,13 +32,19 @@ import ( // FollowerIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/follow_info/types.ts#L24-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/follow_info/types.ts#L24-L35 type FollowerIndex struct { - FollowerIndex string `json:"follower_index"` - LeaderIndex string `json:"leader_index"` - Parameters *FollowerIndexParameters `json:"parameters,omitempty"` - RemoteCluster string `json:"remote_cluster"` - Status followerindexstatus.FollowerIndexStatus `json:"status"` + // FollowerIndex The name of the follower index. + FollowerIndex string `json:"follower_index"` + // LeaderIndex The name of the index in the leader cluster that is followed. + LeaderIndex string `json:"leader_index"` + // Parameters An object that encapsulates cross-cluster replication parameters. If the + // follower index's status is paused, this object is omitted. + Parameters *FollowerIndexParameters `json:"parameters,omitempty"` + // RemoteCluster The remote cluster that contains the leader index. + RemoteCluster string `json:"remote_cluster"` + // Status The status of the index following: `active` or `paused`. + Status followerindexstatus.FollowerIndexStatus `json:"status"` } func (s *FollowerIndex) UnmarshalJSON(data []byte) error { @@ -92,3 +98,5 @@ func NewFollowerIndex() *FollowerIndex { return r } + +// false diff --git a/typedapi/types/followerindexparameters.go b/typedapi/types/followerindexparameters.go index 668e332f3f..be01bbb463 100644 --- a/typedapi/types/followerindexparameters.go +++ b/typedapi/types/followerindexparameters.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FollowerIndexParameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/follow_info/types.ts#L37-L83 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/follow_info/types.ts#L42-L88 type FollowerIndexParameters struct { // MaxOutstandingReadRequests The maximum number of outstanding reads requests from the remote cluster. MaxOutstandingReadRequests *int64 `json:"max_outstanding_read_requests,omitempty"` @@ -198,3 +198,5 @@ func NewFollowerIndexParameters() *FollowerIndexParameters { return r } + +// false diff --git a/typedapi/types/followindexstats.go b/typedapi/types/followindexstats.go index 8cf83c1bec..c075a22411 100644 --- a/typedapi/types/followindexstats.go +++ b/typedapi/types/followindexstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,9 +30,11 @@ import ( // FollowIndexStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/_types/FollowIndexStats.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/_types/FollowIndexStats.ts#L30-L35 type FollowIndexStats struct { - Index string `json:"index"` + // Index The name of the follower index. + Index string `json:"index"` + // Shards An array of shard-level following task statistics. Shards []CcrShardStats `json:"shards"` } @@ -72,3 +74,5 @@ func NewFollowIndexStats() *FollowIndexStats { return r } + +// false diff --git a/typedapi/types/followstats.go b/typedapi/types/followstats.go index 10d16ca8b5..3c59395789 100644 --- a/typedapi/types/followstats.go +++ b/typedapi/types/followstats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // FollowStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/stats/types.ts.ts#L40-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/stats/types.ts.ts#L49-L51 type FollowStats struct { Indices []FollowIndexStats `json:"indices"` } @@ -33,3 +33,5 @@ func NewFollowStats() *FollowStats { return r } + +// false diff --git a/typedapi/types/forcemergeaction.go b/typedapi/types/forcemergeaction.go index 9aabab5fa3..2d7b34d5f5 100644 --- a/typedapi/types/forcemergeaction.go +++ b/typedapi/types/forcemergeaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ForceMergeAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/_types/Phase.ts#L126-L129 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/_types/Phase.ts#L123-L126 type ForceMergeAction struct { IndexCodec *string `json:"index_codec,omitempty"` MaxNumSegments int `json:"max_num_segments"` @@ -91,3 +91,13 @@ func NewForceMergeAction() *ForceMergeAction { return r } + +// true + +type ForceMergeActionVariant interface { + ForceMergeActionCaster() *ForceMergeAction +} + +func (s *ForceMergeAction) ForceMergeActionCaster() *ForceMergeAction { + return s +} diff --git a/typedapi/types/foreachprocessor.go b/typedapi/types/foreachprocessor.go index 12df019278..9bd5098a46 100644 --- a/typedapi/types/foreachprocessor.go +++ b/typedapi/types/foreachprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ForeachProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L894-L908 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L935-L949 type ForeachProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type ForeachProcessor struct { // Field Field containing array or object values. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true`, the processor silently exits without changing the document if the @@ -48,7 +48,7 @@ type ForeachProcessor struct { // OnFailure Handle failures for the processor. OnFailure []ProcessorContainer `json:"on_failure,omitempty"` // Processor Ingest processor to run on each element. - Processor *ProcessorContainer `json:"processor,omitempty"` + Processor ProcessorContainer `json:"processor"` // Tag Identifier for the processor. // Useful for debugging and metrics. Tag *string `json:"tag,omitempty"` @@ -87,16 +87,9 @@ func (s *ForeachProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -159,3 +152,13 @@ func NewForeachProcessor() *ForeachProcessor { return r } + +// true + +type ForeachProcessorVariant interface { + ForeachProcessorCaster() *ForeachProcessor +} + +func (s *ForeachProcessor) ForeachProcessorCaster() *ForeachProcessor { + return s +} diff --git a/typedapi/types/formattablemetricaggregation.go b/typedapi/types/formattablemetricaggregation.go deleted file mode 100644 index 602d3d5eee..0000000000 --- a/typedapi/types/formattablemetricaggregation.go +++ /dev/null @@ -1,97 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// FormattableMetricAggregation type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L51-L53 -type FormattableMetricAggregation struct { - // Field The field on which to run the aggregation. - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` - // Missing The value to apply to documents that do not have a value. - // By default, documents without a value are ignored. - Missing Missing `json:"missing,omitempty"` - Script *Script `json:"script,omitempty"` -} - -func (s *FormattableMetricAggregation) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "field": - if err := dec.Decode(&s.Field); err != nil { - return fmt.Errorf("%s | %w", "Field", err) - } - - case "format": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Format", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Format = &o - - case "missing": - if err := dec.Decode(&s.Missing); err != nil { - return fmt.Errorf("%s | %w", "Missing", err) - } - - case "script": - if err := dec.Decode(&s.Script); err != nil { - return fmt.Errorf("%s | %w", "Script", err) - } - - } - } - return nil -} - -// NewFormattableMetricAggregation returns a FormattableMetricAggregation. -func NewFormattableMetricAggregation() *FormattableMetricAggregation { - r := &FormattableMetricAggregation{} - - return r -} diff --git a/typedapi/types/foundstatus.go b/typedapi/types/foundstatus.go index 623ddaf0e3..8efd9761b0 100644 --- a/typedapi/types/foundstatus.go +++ b/typedapi/types/foundstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FoundStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/delete_privileges/types.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/delete_privileges/types.ts#L20-L22 type FoundStatus struct { Found bool `json:"found"` } @@ -76,3 +76,5 @@ func NewFoundStatus() *FoundStatus { return r } + +// false diff --git a/typedapi/types/frenchanalyzer.go b/typedapi/types/frenchanalyzer.go index 4bd7417de5..8a06a2b68c 100644 --- a/typedapi/types/frenchanalyzer.go +++ b/typedapi/types/frenchanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FrenchAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L161-L166 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L172-L177 type FrenchAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewFrenchAnalyzer() *FrenchAnalyzer { return r } + +// true + +type FrenchAnalyzerVariant interface { + FrenchAnalyzerCaster() *FrenchAnalyzer +} + +func (s *FrenchAnalyzer) FrenchAnalyzerCaster() *FrenchAnalyzer { + return s +} diff --git a/typedapi/types/frequencyencodingpreprocessor.go b/typedapi/types/frequencyencodingpreprocessor.go index adf5dd233a..4965b36731 100644 --- a/typedapi/types/frequencyencodingpreprocessor.go +++ b/typedapi/types/frequencyencodingpreprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FrequencyEncodingPreprocessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model/types.ts#L38-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model/types.ts#L38-L42 type FrequencyEncodingPreprocessor struct { FeatureName string `json:"feature_name"` Field string `json:"field"` @@ -93,8 +93,18 @@ func (s *FrequencyEncodingPreprocessor) UnmarshalJSON(data []byte) error { // NewFrequencyEncodingPreprocessor returns a FrequencyEncodingPreprocessor. func NewFrequencyEncodingPreprocessor() *FrequencyEncodingPreprocessor { r := &FrequencyEncodingPreprocessor{ - FrequencyMap: make(map[string]Float64, 0), + FrequencyMap: make(map[string]Float64), } return r } + +// true + +type FrequencyEncodingPreprocessorVariant interface { + FrequencyEncodingPreprocessorCaster() *FrequencyEncodingPreprocessor +} + +func (s *FrequencyEncodingPreprocessor) FrequencyEncodingPreprocessorCaster() *FrequencyEncodingPreprocessor { + return s +} diff --git a/typedapi/types/frequentitemsetsaggregate.go b/typedapi/types/frequentitemsetsaggregate.go index 13ecd4722d..2662c9b7e5 100644 --- a/typedapi/types/frequentitemsetsaggregate.go +++ b/typedapi/types/frequentitemsetsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // FrequentItemSetsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L722-L723 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L722-L723 type FrequentItemSetsAggregate struct { Buckets BucketsFrequentItemSetsBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewFrequentItemSetsAggregate() *FrequentItemSetsAggregate { return r } + +// false diff --git a/typedapi/types/frequentitemsetsaggregation.go b/typedapi/types/frequentitemsetsaggregation.go index 28800dfc23..7ecfea985c 100644 --- a/typedapi/types/frequentitemsetsaggregation.go +++ b/typedapi/types/frequentitemsetsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FrequentItemSetsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L1240-L1267 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L1240-L1267 type FrequentItemSetsAggregation struct { // Fields Fields to analyze. Fields []FrequentItemSetsField `json:"fields"` @@ -129,3 +129,13 @@ func NewFrequentItemSetsAggregation() *FrequentItemSetsAggregation { return r } + +// true + +type FrequentItemSetsAggregationVariant interface { + FrequentItemSetsAggregationCaster() *FrequentItemSetsAggregation +} + +func (s *FrequentItemSetsAggregation) FrequentItemSetsAggregationCaster() *FrequentItemSetsAggregation { + return s +} diff --git a/typedapi/types/frequentitemsetsbucket.go b/typedapi/types/frequentitemsetsbucket.go index 25d05425e1..1fca79194b 100644 --- a/typedapi/types/frequentitemsetsbucket.go +++ b/typedapi/types/frequentitemsetsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // FrequentItemSetsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L725-L728 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L725-L728 type FrequentItemSetsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -651,9 +651,11 @@ func (s FrequentItemSetsBucket) MarshalJSON() ([]byte, error) { // NewFrequentItemSetsBucket returns a FrequentItemSetsBucket. func NewFrequentItemSetsBucket() *FrequentItemSetsBucket { r := &FrequentItemSetsBucket{ - Aggregations: make(map[string]Aggregate, 0), - Key: make(map[string][]string, 0), + Aggregations: make(map[string]Aggregate), + Key: make(map[string][]string), } return r } + +// false diff --git a/typedapi/types/frequentitemsetsfield.go b/typedapi/types/frequentitemsetsfield.go index 092d3b26e5..7798eb199d 100644 --- a/typedapi/types/frequentitemsetsfield.go +++ b/typedapi/types/frequentitemsetsfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // FrequentItemSetsField type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L1226-L1238 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L1226-L1238 type FrequentItemSetsField struct { // Exclude Values to exclude. // Can be regular expression strings or arrays of strings of exact terms. @@ -124,3 +124,13 @@ func NewFrequentItemSetsField() *FrequentItemSetsField { return r } + +// true + +type FrequentItemSetsFieldVariant interface { + FrequentItemSetsFieldCaster() *FrequentItemSetsField +} + +func (s *FrequentItemSetsField) FrequentItemSetsFieldCaster() *FrequentItemSetsField { + return s +} diff --git a/typedapi/types/functionscore.go b/typedapi/types/functionscore.go index db48ab4567..c636035b66 100644 --- a/typedapi/types/functionscore.go +++ b/typedapi/types/functionscore.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,8 +31,9 @@ import ( // FunctionScore type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L226-L266 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L226-L266 type FunctionScore struct { + AdditionalFunctionScoreProperty map[string]json.RawMessage `json:"-"` // Exp Function that scores a document with a exponential decay, depending on the // distance of a numeric field value of the document from an origin. Exp DecayFunction `json:"exp,omitempty"` @@ -152,14 +153,68 @@ func (s *FunctionScore) UnmarshalJSON(data []byte) error { s.Weight = &f } + default: + + if key, ok := t.(string); ok { + if s.AdditionalFunctionScoreProperty == nil { + s.AdditionalFunctionScoreProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalFunctionScoreProperty", err) + } + s.AdditionalFunctionScoreProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s FunctionScore) MarshalJSON() ([]byte, error) { + type opt FunctionScore + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalFunctionScoreProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalFunctionScoreProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewFunctionScore returns a FunctionScore. func NewFunctionScore() *FunctionScore { - r := &FunctionScore{} + r := &FunctionScore{ + AdditionalFunctionScoreProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type FunctionScoreVariant interface { + FunctionScoreCaster() *FunctionScore +} + +func (s *FunctionScore) FunctionScoreCaster() *FunctionScore { + return s +} diff --git a/typedapi/types/functionscorequery.go b/typedapi/types/functionscorequery.go index e63e43d0a3..ba8fa39b4e 100644 --- a/typedapi/types/functionscorequery.go +++ b/typedapi/types/functionscorequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // FunctionScoreQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L105-L135 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L105-L135 type FunctionScoreQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -169,3 +169,13 @@ func NewFunctionScoreQuery() *FunctionScoreQuery { return r } + +// true + +type FunctionScoreQueryVariant interface { + FunctionScoreQueryCaster() *FunctionScoreQuery +} + +func (s *FunctionScoreQuery) FunctionScoreQueryCaster() *FunctionScoreQuery { + return s +} diff --git a/typedapi/types/fuzziness.go b/typedapi/types/fuzziness.go index aa3e768bbf..64969a7bab 100644 --- a/typedapi/types/fuzziness.go +++ b/typedapi/types/fuzziness.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // string // int // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L134-L135 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L130-L131 type Fuzziness any + +type FuzzinessVariant interface { + FuzzinessCaster() *Fuzziness +} diff --git a/typedapi/types/fuzzyquery.go b/typedapi/types/fuzzyquery.go index e0f62c6c55..8963dda610 100644 --- a/typedapi/types/fuzzyquery.go +++ b/typedapi/types/fuzzyquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // FuzzyQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L48-L86 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L48-L86 type FuzzyQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -189,3 +189,13 @@ func NewFuzzyQuery() *FuzzyQuery { return r } + +// true + +type FuzzyQueryVariant interface { + FuzzyQueryCaster() *FuzzyQuery +} + +func (s *FuzzyQuery) FuzzyQueryCaster() *FuzzyQuery { + return s +} diff --git a/typedapi/types/galiciananalyzer.go b/typedapi/types/galiciananalyzer.go index a0d7ef302e..131e49d9a4 100644 --- a/typedapi/types/galiciananalyzer.go +++ b/typedapi/types/galiciananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GalicianAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L168-L173 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L179-L184 type GalicianAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewGalicianAnalyzer() *GalicianAnalyzer { return r } + +// true + +type GalicianAnalyzerVariant interface { + GalicianAnalyzerCaster() *GalicianAnalyzer +} + +func (s *GalicianAnalyzer) GalicianAnalyzerCaster() *GalicianAnalyzer { + return s +} diff --git a/typedapi/types/garbagecollector.go b/typedapi/types/garbagecollector.go index 98d5d85d02..ca597bf924 100644 --- a/typedapi/types/garbagecollector.go +++ b/typedapi/types/garbagecollector.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // GarbageCollector type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L994-L999 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L994-L999 type GarbageCollector struct { // Collectors Contains statistics about JVM garbage collectors for the node. Collectors map[string]GarbageCollectorTotal `json:"collectors,omitempty"` @@ -31,8 +31,10 @@ type GarbageCollector struct { // NewGarbageCollector returns a GarbageCollector. func NewGarbageCollector() *GarbageCollector { r := &GarbageCollector{ - Collectors: make(map[string]GarbageCollectorTotal, 0), + Collectors: make(map[string]GarbageCollectorTotal), } return r } + +// false diff --git a/typedapi/types/garbagecollectortotal.go b/typedapi/types/garbagecollectortotal.go index c119407535..715d9ce827 100644 --- a/typedapi/types/garbagecollectortotal.go +++ b/typedapi/types/garbagecollectortotal.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GarbageCollectorTotal type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L1001-L1014 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L1001-L1014 type GarbageCollectorTotal struct { // CollectionCount Total number of JVM garbage collectors that collect objects. CollectionCount *int64 `json:"collection_count,omitempty"` @@ -109,3 +109,5 @@ func NewGarbageCollectorTotal() *GarbageCollectorTotal { return r } + +// false diff --git a/typedapi/types/gcsrepository.go b/typedapi/types/gcsrepository.go index f133e623a1..4b927c8ead 100644 --- a/typedapi/types/gcsrepository.go +++ b/typedapi/types/gcsrepository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,11 +30,13 @@ import ( // GcsRepository type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotRepository.ts#L45-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotRepository.ts#L52-L62 type GcsRepository struct { + // Settings The repository settings. Settings GcsRepositorySettings `json:"settings"` - Type string `json:"type,omitempty"` - Uuid *string `json:"uuid,omitempty"` + // Type The Google Cloud Storage repository type. + Type string `json:"type,omitempty"` + Uuid *string `json:"uuid,omitempty"` } func (s *GcsRepository) UnmarshalJSON(data []byte) error { @@ -92,3 +94,13 @@ func NewGcsRepository() *GcsRepository { return r } + +// true + +type GcsRepositoryVariant interface { + GcsRepositoryCaster() *GcsRepository +} + +func (s *GcsRepository) GcsRepositoryCaster() *GcsRepository { + return s +} diff --git a/typedapi/types/gcsrepositorysettings.go b/typedapi/types/gcsrepositorysettings.go index 9ef7f4e433..0bc5f7a5ca 100644 --- a/typedapi/types/gcsrepositorysettings.go +++ b/typedapi/types/gcsrepositorysettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,17 +31,63 @@ import ( // GcsRepositorySettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotRepository.ts#L85-L91 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotRepository.ts#L198-L235 type GcsRepositorySettings struct { - ApplicationName *string `json:"application_name,omitempty"` - BasePath *string `json:"base_path,omitempty"` - Bucket string `json:"bucket"` - ChunkSize ByteSize `json:"chunk_size,omitempty"` - Client *string `json:"client,omitempty"` - Compress *bool `json:"compress,omitempty"` - MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // ApplicationName The name used by the client when it uses the Google Cloud Storage service. + ApplicationName *string `json:"application_name,omitempty"` + // BasePath The path to the repository data within the bucket. + // It defaults to the root of the bucket. + // + // NOTE: Don't set `base_path` when configuring a snapshot repository for + // Elastic Cloud Enterprise. + // Elastic Cloud Enterprise automatically generates the `base_path` for each + // deployment so that multiple deployments can share the same bucket. + BasePath *string `json:"base_path,omitempty"` + // Bucket The name of the bucket to be used for snapshots. + Bucket string `json:"bucket"` + // ChunkSize Big files can be broken down into multiple smaller blobs in the blob store + // during snapshotting. + // It is not recommended to change this value from its default unless there is + // an explicit reason for limiting the size of blobs in the repository. + // Setting a value lower than the default can result in an increased number of + // API calls to the blob store during snapshot create and restore operations + // compared to using the default value and thus make both operations slower and + // more costly. + // Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. + // The default varies by repository type. + ChunkSize ByteSize `json:"chunk_size,omitempty"` + // Client The name of the client to use to connect to Google Cloud Storage. + Client *string `json:"client,omitempty"` + // Compress When set to `true`, metadata files are stored in compressed format. + // This setting doesn't affect index files that are already compressed by + // default. + Compress *bool `json:"compress,omitempty"` + // MaxRestoreBytesPerSec The maximum snapshot restore rate per node. + // It defaults to unlimited. + // Note that restores are also throttled through recovery settings. + MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // MaxSnapshotBytesPerSec The maximum snapshot creation rate per node. + // It defaults to 40mb per second. + // Note that if the recovery settings for managed services are set, then it + // defaults to unlimited, and the rate is additionally throttled through + // recovery settings. MaxSnapshotBytesPerSec ByteSize `json:"max_snapshot_bytes_per_sec,omitempty"` - Readonly *bool `json:"readonly,omitempty"` + // Readonly If `true`, the repository is read-only. + // The cluster can retrieve and restore snapshots from the repository but not + // write to the repository or create snapshots in it. + // + // Only a cluster with write access can create snapshots in the repository. + // All other clusters connected to the repository should have the `readonly` + // parameter set to `true`. + // + // If `false`, the cluster can write to the repository and create snapshots in + // it. + // + // IMPORTANT: If you register the same snapshot repository with multiple + // clusters, only one cluster should have write access to the repository. + // Having multiple clusters write to the repository at the same time risks + // corrupting the contents of the repository. + Readonly *bool `json:"readonly,omitempty"` } func (s *GcsRepositorySettings) UnmarshalJSON(data []byte) error { @@ -161,3 +207,13 @@ func NewGcsRepositorySettings() *GcsRepositorySettings { return r } + +// true + +type GcsRepositorySettingsVariant interface { + GcsRepositorySettingsCaster() *GcsRepositorySettings +} + +func (s *GcsRepositorySettings) GcsRepositorySettingsCaster() *GcsRepositorySettings { + return s +} diff --git a/typedapi/types/geoboundingboxquery.go b/typedapi/types/geoboundingboxquery.go index 45b22931f4..2405b49a0e 100644 --- a/typedapi/types/geoboundingboxquery.go +++ b/typedapi/types/geoboundingboxquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // GeoBoundingBoxQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/geo.ts#L32-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/geo.ts#L35-L57 type GeoBoundingBoxQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -172,8 +172,18 @@ func (s GeoBoundingBoxQuery) MarshalJSON() ([]byte, error) { // NewGeoBoundingBoxQuery returns a GeoBoundingBoxQuery. func NewGeoBoundingBoxQuery() *GeoBoundingBoxQuery { r := &GeoBoundingBoxQuery{ - GeoBoundingBoxQuery: make(map[string]GeoBounds, 0), + GeoBoundingBoxQuery: make(map[string]GeoBounds), } return r } + +// true + +type GeoBoundingBoxQueryVariant interface { + GeoBoundingBoxQueryCaster() *GeoBoundingBoxQuery +} + +func (s *GeoBoundingBoxQuery) GeoBoundingBoxQueryCaster() *GeoBoundingBoxQuery { + return s +} diff --git a/typedapi/types/geobounds.go b/typedapi/types/geobounds.go index 955e0ffb31..adf1a1ee8a 100644 --- a/typedapi/types/geobounds.go +++ b/typedapi/types/geobounds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -27,5 +27,9 @@ package types // TopRightBottomLeftGeoBounds // WktGeoBounds // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Geo.ts#L135-L148 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Geo.ts#L135-L148 type GeoBounds any + +type GeoBoundsVariant interface { + GeoBoundsCaster() *GeoBounds +} diff --git a/typedapi/types/geoboundsaggregate.go b/typedapi/types/geoboundsaggregate.go index 0d686516c9..b9cc3be51e 100644 --- a/typedapi/types/geoboundsaggregate.go +++ b/typedapi/types/geoboundsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // GeoBoundsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L327-L333 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L327-L333 type GeoBoundsAggregate struct { Bounds GeoBounds `json:"bounds,omitempty"` Meta Metadata `json:"meta,omitempty"` @@ -130,3 +130,5 @@ func NewGeoBoundsAggregate() *GeoBoundsAggregate { return r } + +// false diff --git a/typedapi/types/geoboundsaggregation.go b/typedapi/types/geoboundsaggregation.go index 55627c4d39..7d95c9c45d 100644 --- a/typedapi/types/geoboundsaggregation.go +++ b/typedapi/types/geoboundsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GeoBoundsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L108-L117 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L108-L117 type GeoBoundsAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -99,3 +99,13 @@ func NewGeoBoundsAggregation() *GeoBoundsAggregation { return r } + +// true + +type GeoBoundsAggregationVariant interface { + GeoBoundsAggregationCaster() *GeoBoundsAggregation +} + +func (s *GeoBoundsAggregation) GeoBoundsAggregationCaster() *GeoBoundsAggregation { + return s +} diff --git a/typedapi/types/geocentroidaggregate.go b/typedapi/types/geocentroidaggregate.go index c02f8ef7fd..5b37805665 100644 --- a/typedapi/types/geocentroidaggregate.go +++ b/typedapi/types/geocentroidaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GeoCentroidAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L335-L342 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L335-L342 type GeoCentroidAggregate struct { Count int64 `json:"count"` Location GeoLocation `json:"location,omitempty"` @@ -129,3 +129,5 @@ func NewGeoCentroidAggregate() *GeoCentroidAggregate { return r } + +// false diff --git a/typedapi/types/geocentroidaggregation.go b/typedapi/types/geocentroidaggregation.go index 8ce28c1947..7ed230cfc8 100644 --- a/typedapi/types/geocentroidaggregation.go +++ b/typedapi/types/geocentroidaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GeoCentroidAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L119-L122 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L119-L122 type GeoCentroidAggregation struct { Count *int64 `json:"count,omitempty"` // Field The field on which to run the aggregation. @@ -144,3 +144,13 @@ func NewGeoCentroidAggregation() *GeoCentroidAggregation { return r } + +// true + +type GeoCentroidAggregationVariant interface { + GeoCentroidAggregationCaster() *GeoCentroidAggregation +} + +func (s *GeoCentroidAggregation) GeoCentroidAggregationCaster() *GeoCentroidAggregation { + return s +} diff --git a/typedapi/types/geodecayfunction.go b/typedapi/types/geodecayfunction.go index d864f090da..fbb728b010 100644 --- a/typedapi/types/geodecayfunction.go +++ b/typedapi/types/geodecayfunction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,7 +29,7 @@ import ( // GeoDecayFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L210-L213 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L210-L213 type GeoDecayFunction struct { DecayFunctionBaseGeoLocationDistance map[string]DecayPlacementGeoLocationDistance `json:"-"` // MultiValueMode Determines how the distance is calculated when a field used for computing the @@ -69,8 +69,18 @@ func (s GeoDecayFunction) MarshalJSON() ([]byte, error) { // NewGeoDecayFunction returns a GeoDecayFunction. func NewGeoDecayFunction() *GeoDecayFunction { r := &GeoDecayFunction{ - DecayFunctionBaseGeoLocationDistance: make(map[string]DecayPlacementGeoLocationDistance, 0), + DecayFunctionBaseGeoLocationDistance: make(map[string]DecayPlacementGeoLocationDistance), } return r } + +// true + +type GeoDecayFunctionVariant interface { + GeoDecayFunctionCaster() *GeoDecayFunction +} + +func (s *GeoDecayFunction) GeoDecayFunctionCaster() *GeoDecayFunction { + return s +} diff --git a/typedapi/types/geodistanceaggregate.go b/typedapi/types/geodistanceaggregate.go index 0bf2c8ea7a..191beb5735 100644 --- a/typedapi/types/geodistanceaggregate.go +++ b/typedapi/types/geodistanceaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // GeoDistanceAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L617-L622 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L617-L622 type GeoDistanceAggregate struct { Buckets BucketsRangeBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewGeoDistanceAggregate() *GeoDistanceAggregate { return r } + +// false diff --git a/typedapi/types/geodistanceaggregation.go b/typedapi/types/geodistanceaggregation.go index 078b024589..d05c4c990d 100644 --- a/typedapi/types/geodistanceaggregation.go +++ b/typedapi/types/geodistanceaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // GeoDistanceAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L396-L419 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L396-L419 type GeoDistanceAggregation struct { // DistanceType The distance calculation type. DistanceType *geodistancetype.GeoDistanceType `json:"distance_type,omitempty"` @@ -138,3 +138,13 @@ func NewGeoDistanceAggregation() *GeoDistanceAggregation { return r } + +// true + +type GeoDistanceAggregationVariant interface { + GeoDistanceAggregationCaster() *GeoDistanceAggregation +} + +func (s *GeoDistanceAggregation) GeoDistanceAggregationCaster() *GeoDistanceAggregation { + return s +} diff --git a/typedapi/types/geodistancefeaturequery.go b/typedapi/types/geodistancefeaturequery.go index 9d70e62b37..520ee2f178 100644 --- a/typedapi/types/geodistancefeaturequery.go +++ b/typedapi/types/geodistancefeaturequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GeoDistanceFeatureQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L66-L69 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L66-L69 type GeoDistanceFeatureQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -171,3 +171,13 @@ func NewGeoDistanceFeatureQuery() *GeoDistanceFeatureQuery { return r } + +// true + +type GeoDistanceFeatureQueryVariant interface { + GeoDistanceFeatureQueryCaster() *GeoDistanceFeatureQuery +} + +func (s *GeoDistanceFeatureQuery) GeoDistanceFeatureQueryCaster() *GeoDistanceFeatureQuery { + return s +} diff --git a/typedapi/types/geodistancequery.go b/typedapi/types/geodistancequery.go index dfb374c859..f36e2dd628 100644 --- a/typedapi/types/geodistancequery.go +++ b/typedapi/types/geodistancequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // GeoDistanceQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/geo.ts#L61-L93 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/geo.ts#L64-L96 type GeoDistanceQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -183,8 +183,18 @@ func (s GeoDistanceQuery) MarshalJSON() ([]byte, error) { // NewGeoDistanceQuery returns a GeoDistanceQuery. func NewGeoDistanceQuery() *GeoDistanceQuery { r := &GeoDistanceQuery{ - GeoDistanceQuery: make(map[string]GeoLocation, 0), + GeoDistanceQuery: make(map[string]GeoLocation), } return r } + +// true + +type GeoDistanceQueryVariant interface { + GeoDistanceQueryCaster() *GeoDistanceQuery +} + +func (s *GeoDistanceQuery) GeoDistanceQueryCaster() *GeoDistanceQuery { + return s +} diff --git a/typedapi/types/geodistancesort.go b/typedapi/types/geodistancesort.go index 305a983b77..1c48c32af6 100644 --- a/typedapi/types/geodistancesort.go +++ b/typedapi/types/geodistancesort.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -36,7 +36,7 @@ import ( // GeoDistanceSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/sort.ts#L58-L70 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/sort.ts#L58-L70 type GeoDistanceSort struct { DistanceType *geodistancetype.GeoDistanceType `json:"distance_type,omitempty"` GeoDistanceSort map[string][]GeoLocation `json:"-"` @@ -161,8 +161,18 @@ func (s GeoDistanceSort) MarshalJSON() ([]byte, error) { // NewGeoDistanceSort returns a GeoDistanceSort. func NewGeoDistanceSort() *GeoDistanceSort { r := &GeoDistanceSort{ - GeoDistanceSort: make(map[string][]GeoLocation, 0), + GeoDistanceSort: make(map[string][]GeoLocation), } return r } + +// true + +type GeoDistanceSortVariant interface { + GeoDistanceSortCaster() *GeoDistanceSort +} + +func (s *GeoDistanceSort) GeoDistanceSortCaster() *GeoDistanceSort { + return s +} diff --git a/typedapi/types/geogridprocessor.go b/typedapi/types/geogridprocessor.go index 60b0ee418e..f54acd04a5 100644 --- a/typedapi/types/geogridprocessor.go +++ b/typedapi/types/geogridprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // GeoGridProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L384-L425 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L389-L430 type GeoGridProcessor struct { // ChildrenField If specified and children tiles exist, save those tile addresses to this // field as an array of strings. @@ -46,7 +46,7 @@ type GeoGridProcessor struct { // The field format is determined by the `tile_type`. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without @@ -118,16 +118,9 @@ func (s *GeoGridProcessor) UnmarshalJSON(data []byte) error { s.Field = o case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -215,3 +208,13 @@ func NewGeoGridProcessor() *GeoGridProcessor { return r } + +// true + +type GeoGridProcessorVariant interface { + GeoGridProcessorCaster() *GeoGridProcessor +} + +func (s *GeoGridProcessor) GeoGridProcessorCaster() *GeoGridProcessor { + return s +} diff --git a/typedapi/types/geogridquery.go b/typedapi/types/geogridquery.go new file mode 100644 index 0000000000..35cca1d36b --- /dev/null +++ b/typedapi/types/geogridquery.go @@ -0,0 +1,171 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoGridQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/geo.ts#L98-L103 +type GeoGridQuery struct { + AdditionalGeoGridQueryProperty map[string]json.RawMessage `json:"-"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + Geogrid *string `json:"geogrid,omitempty"` + Geohash *string `json:"geohash,omitempty"` + Geohex *string `json:"geohex,omitempty"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *GeoGridQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "geogrid": + if err := dec.Decode(&s.Geogrid); err != nil { + return fmt.Errorf("%s | %w", "Geogrid", err) + } + + case "geohash": + if err := dec.Decode(&s.Geohash); err != nil { + return fmt.Errorf("%s | %w", "Geohash", err) + } + + case "geohex": + if err := dec.Decode(&s.Geohex); err != nil { + return fmt.Errorf("%s | %w", "Geohex", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + default: + + if key, ok := t.(string); ok { + if s.AdditionalGeoGridQueryProperty == nil { + s.AdditionalGeoGridQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalGeoGridQueryProperty", err) + } + s.AdditionalGeoGridQueryProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s GeoGridQuery) MarshalJSON() ([]byte, error) { + type opt GeoGridQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalGeoGridQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalGeoGridQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewGeoGridQuery returns a GeoGridQuery. +func NewGeoGridQuery() *GeoGridQuery { + r := &GeoGridQuery{ + AdditionalGeoGridQueryProperty: make(map[string]json.RawMessage), + } + + return r +} + +// true + +type GeoGridQueryVariant interface { + GeoGridQueryCaster() *GeoGridQuery +} + +func (s *GeoGridQuery) GeoGridQueryCaster() *GeoGridQuery { + return s +} diff --git a/typedapi/types/geohashgridaggregate.go b/typedapi/types/geohashgridaggregate.go index 915de98c45..1fdb990336 100644 --- a/typedapi/types/geohashgridaggregate.go +++ b/typedapi/types/geohashgridaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // GeoHashGridAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L566-L568 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L566-L568 type GeoHashGridAggregate struct { Buckets BucketsGeoHashGridBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewGeoHashGridAggregate() *GeoHashGridAggregate { return r } + +// false diff --git a/typedapi/types/geohashgridaggregation.go b/typedapi/types/geohashgridaggregation.go index b94a3c781b..ae4f717613 100644 --- a/typedapi/types/geohashgridaggregation.go +++ b/typedapi/types/geohashgridaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GeoHashGridAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L421-L449 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L421-L449 type GeoHashGridAggregation struct { // Bounds The bounding box to filter the points in each bucket. Bounds GeoBounds `json:"bounds,omitempty"` @@ -181,3 +181,13 @@ func NewGeoHashGridAggregation() *GeoHashGridAggregation { return r } + +// true + +type GeoHashGridAggregationVariant interface { + GeoHashGridAggregationCaster() *GeoHashGridAggregation +} + +func (s *GeoHashGridAggregation) GeoHashGridAggregationCaster() *GeoHashGridAggregation { + return s +} diff --git a/typedapi/types/geohashgridbucket.go b/typedapi/types/geohashgridbucket.go index f6069acc06..553dad94d1 100644 --- a/typedapi/types/geohashgridbucket.go +++ b/typedapi/types/geohashgridbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // GeoHashGridBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L570-L572 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L570-L572 type GeoHashGridBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -631,8 +631,10 @@ func (s GeoHashGridBucket) MarshalJSON() ([]byte, error) { // NewGeoHashGridBucket returns a GeoHashGridBucket. func NewGeoHashGridBucket() *GeoHashGridBucket { r := &GeoHashGridBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/geohashlocation.go b/typedapi/types/geohashlocation.go index 73b8a1e578..bfe36343fe 100644 --- a/typedapi/types/geohashlocation.go +++ b/typedapi/types/geohashlocation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // GeoHashLocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Geo.ts#L131-L133 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Geo.ts#L131-L133 type GeoHashLocation struct { Geohash string `json:"geohash"` } @@ -66,3 +66,13 @@ func NewGeoHashLocation() *GeoHashLocation { return r } + +// true + +type GeoHashLocationVariant interface { + GeoHashLocationCaster() *GeoHashLocation +} + +func (s *GeoHashLocation) GeoHashLocationCaster() *GeoHashLocation { + return s +} diff --git a/typedapi/types/geohashprecision.go b/typedapi/types/geohashprecision.go index e14ea23e9f..2b0b78fb1b 100644 --- a/typedapi/types/geohashprecision.go +++ b/typedapi/types/geohashprecision.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // int // string // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Geo.ts#L86-L90 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Geo.ts#L86-L90 type GeoHashPrecision any + +type GeoHashPrecisionVariant interface { + GeoHashPrecisionCaster() *GeoHashPrecision +} diff --git a/typedapi/types/geohexgridaggregate.go b/typedapi/types/geohexgridaggregate.go index 1e287eebcc..1a390c41d2 100644 --- a/typedapi/types/geohexgridaggregate.go +++ b/typedapi/types/geohexgridaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // GeoHexGridAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L585-L586 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L585-L586 type GeoHexGridAggregate struct { Buckets BucketsGeoHexGridBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewGeoHexGridAggregate() *GeoHexGridAggregate { return r } + +// false diff --git a/typedapi/types/geohexgridaggregation.go b/typedapi/types/geohexgridaggregation.go index 0124073b64..06077232b7 100644 --- a/typedapi/types/geohexgridaggregation.go +++ b/typedapi/types/geohexgridaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GeohexGridAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L479-L504 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L479-L504 type GeohexGridAggregation struct { // Bounds Bounding box used to filter the geo-points in each bucket. Bounds GeoBounds `json:"bounds,omitempty"` @@ -189,3 +189,13 @@ func NewGeohexGridAggregation() *GeohexGridAggregation { return r } + +// true + +type GeohexGridAggregationVariant interface { + GeohexGridAggregationCaster() *GeohexGridAggregation +} + +func (s *GeohexGridAggregation) GeohexGridAggregationCaster() *GeohexGridAggregation { + return s +} diff --git a/typedapi/types/geohexgridbucket.go b/typedapi/types/geohexgridbucket.go index 9600bb1bbd..d0db9e126f 100644 --- a/typedapi/types/geohexgridbucket.go +++ b/typedapi/types/geohexgridbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // GeoHexGridBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L588-L590 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L588-L590 type GeoHexGridBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -631,8 +631,10 @@ func (s GeoHexGridBucket) MarshalJSON() ([]byte, error) { // NewGeoHexGridBucket returns a GeoHexGridBucket. func NewGeoHexGridBucket() *GeoHexGridBucket { r := &GeoHexGridBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/databaseconfigurationmetadata.go b/typedapi/types/geoipdatabaseconfigurationmetadata.go similarity index 77% rename from typedapi/types/databaseconfigurationmetadata.go rename to typedapi/types/geoipdatabaseconfigurationmetadata.go index e869ace560..ff7fc0ef62 100644 --- a/typedapi/types/databaseconfigurationmetadata.go +++ b/typedapi/types/geoipdatabaseconfigurationmetadata.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,17 +29,17 @@ import ( "strconv" ) -// DatabaseConfigurationMetadata type. +// GeoipDatabaseConfigurationMetadata type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/get_geoip_database/GetGeoipDatabaseResponse.ts#L29-L34 -type DatabaseConfigurationMetadata struct { +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/get_geoip_database/GetGeoipDatabaseResponse.ts#L29-L34 +type GeoipDatabaseConfigurationMetadata struct { Database DatabaseConfiguration `json:"database"` Id string `json:"id"` ModifiedDateMillis int64 `json:"modified_date_millis"` Version int64 `json:"version"` } -func (s *DatabaseConfigurationMetadata) UnmarshalJSON(data []byte) error { +func (s *GeoipDatabaseConfigurationMetadata) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -89,9 +89,11 @@ func (s *DatabaseConfigurationMetadata) UnmarshalJSON(data []byte) error { return nil } -// NewDatabaseConfigurationMetadata returns a DatabaseConfigurationMetadata. -func NewDatabaseConfigurationMetadata() *DatabaseConfigurationMetadata { - r := &DatabaseConfigurationMetadata{} +// NewGeoipDatabaseConfigurationMetadata returns a GeoipDatabaseConfigurationMetadata. +func NewGeoipDatabaseConfigurationMetadata() *GeoipDatabaseConfigurationMetadata { + r := &GeoipDatabaseConfigurationMetadata{} return r } + +// false diff --git a/typedapi/types/geoipdownloadstatistics.go b/typedapi/types/geoipdownloadstatistics.go index 9865b36269..33aa428bef 100644 --- a/typedapi/types/geoipdownloadstatistics.go +++ b/typedapi/types/geoipdownloadstatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GeoIpDownloadStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/geo_ip_stats/types.ts#L24-L37 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/geo_ip_stats/types.ts#L24-L37 type GeoIpDownloadStatistics struct { // DatabasesCount Current number of databases available for use. DatabasesCount int `json:"databases_count"` @@ -158,3 +158,5 @@ func NewGeoIpDownloadStatistics() *GeoIpDownloadStatistics { return r } + +// false diff --git a/typedapi/types/geoipnodedatabasename.go b/typedapi/types/geoipnodedatabasename.go index 461ad57866..9d771da7f3 100644 --- a/typedapi/types/geoipnodedatabasename.go +++ b/typedapi/types/geoipnodedatabasename.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // GeoIpNodeDatabaseName type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/geo_ip_stats/types.ts#L47-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/geo_ip_stats/types.ts#L47-L50 type GeoIpNodeDatabaseName struct { // Name Name of the database. Name string `json:"name"` @@ -67,3 +67,5 @@ func NewGeoIpNodeDatabaseName() *GeoIpNodeDatabaseName { return r } + +// false diff --git a/typedapi/types/geoipnodedatabases.go b/typedapi/types/geoipnodedatabases.go index fc76beb386..006d437de5 100644 --- a/typedapi/types/geoipnodedatabases.go +++ b/typedapi/types/geoipnodedatabases.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // GeoIpNodeDatabases type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/geo_ip_stats/types.ts#L39-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/geo_ip_stats/types.ts#L39-L45 type GeoIpNodeDatabases struct { // Databases Downloaded databases for the node. Databases []GeoIpNodeDatabaseName `json:"databases"` @@ -38,3 +38,5 @@ func NewGeoIpNodeDatabases() *GeoIpNodeDatabases { return r } + +// false diff --git a/typedapi/types/geoipprocessor.go b/typedapi/types/geoipprocessor.go index 7674dd53ee..4e05b3ee5d 100644 --- a/typedapi/types/geoipprocessor.go +++ b/typedapi/types/geoipprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GeoIpProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L438-L472 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L443-L477 type GeoIpProcessor struct { // DatabaseFile The database filename referring to a database the module ships with // (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom @@ -51,7 +51,7 @@ type GeoIpProcessor struct { // field contains an array. FirstOnly *bool `json:"first_only,omitempty"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without @@ -143,16 +143,9 @@ func (s *GeoIpProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -220,3 +213,13 @@ func NewGeoIpProcessor() *GeoIpProcessor { return r } + +// true + +type GeoIpProcessorVariant interface { + GeoIpProcessorCaster() *GeoIpProcessor +} + +func (s *GeoIpProcessor) GeoIpProcessorCaster() *GeoIpProcessor { + return s +} diff --git a/typedapi/types/geoline.go b/typedapi/types/geoline.go index 8c2f64c1f4..f43f35da21 100644 --- a/typedapi/types/geoline.go +++ b/typedapi/types/geoline.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GeoLine type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Geo.ts#L56-L62 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Geo.ts#L56-L62 type GeoLine struct { // Coordinates Array of `[lon, lat]` coordinates Coordinates [][]Float64 `json:"coordinates"` @@ -82,3 +82,5 @@ func NewGeoLine() *GeoLine { return r } + +// false diff --git a/typedapi/types/geolineaggregate.go b/typedapi/types/geolineaggregate.go index 691e692c4a..709d65542b 100644 --- a/typedapi/types/geolineaggregate.go +++ b/typedapi/types/geolineaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GeoLineAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L902-L912 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L902-L912 type GeoLineAggregate struct { Geometry GeoLine `json:"geometry"` Meta Metadata `json:"meta,omitempty"` @@ -92,3 +92,5 @@ func NewGeoLineAggregate() *GeoLineAggregate { return r } + +// false diff --git a/typedapi/types/geolineaggregation.go b/typedapi/types/geolineaggregation.go index 202fb8f362..0e1fe138a4 100644 --- a/typedapi/types/geolineaggregation.go +++ b/typedapi/types/geolineaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // GeoLineAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L124-L149 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L124-L149 type GeoLineAggregation struct { // IncludeSort When `true`, returns an additional array of the sort values in the feature // properties. @@ -123,3 +123,13 @@ func NewGeoLineAggregation() *GeoLineAggregation { return r } + +// true + +type GeoLineAggregationVariant interface { + GeoLineAggregationCaster() *GeoLineAggregation +} + +func (s *GeoLineAggregation) GeoLineAggregationCaster() *GeoLineAggregation { + return s +} diff --git a/typedapi/types/geolinepoint.go b/typedapi/types/geolinepoint.go index e0a9a36743..23b13b1ba9 100644 --- a/typedapi/types/geolinepoint.go +++ b/typedapi/types/geolinepoint.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // GeoLinePoint type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L158-L163 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L158-L163 type GeoLinePoint struct { // Field The name of the geo_point field. Field string `json:"field"` @@ -67,3 +67,13 @@ func NewGeoLinePoint() *GeoLinePoint { return r } + +// true + +type GeoLinePointVariant interface { + GeoLinePointCaster() *GeoLinePoint +} + +func (s *GeoLinePoint) GeoLinePointCaster() *GeoLinePoint { + return s +} diff --git a/typedapi/types/geolinesort.go b/typedapi/types/geolinesort.go index a967e5953f..040d5eda99 100644 --- a/typedapi/types/geolinesort.go +++ b/typedapi/types/geolinesort.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // GeoLineSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L151-L156 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L151-L156 type GeoLineSort struct { // Field The name of the numeric field to use as the sort key for ordering the points. Field string `json:"field"` @@ -67,3 +67,13 @@ func NewGeoLineSort() *GeoLineSort { return r } + +// true + +type GeoLineSortVariant interface { + GeoLineSortCaster() *GeoLineSort +} + +func (s *GeoLineSort) GeoLineSortCaster() *GeoLineSort { + return s +} diff --git a/typedapi/types/geolocation.go b/typedapi/types/geolocation.go index 6f6745c0e2..ca7e87ac9e 100644 --- a/typedapi/types/geolocation.go +++ b/typedapi/types/geolocation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -27,5 +27,9 @@ package types // []Float64 // string // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Geo.ts#L104-L118 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Geo.ts#L104-L118 type GeoLocation any + +type GeoLocationVariant interface { + GeoLocationCaster() *GeoLocation +} diff --git a/typedapi/types/geopointproperty.go b/typedapi/types/geopointproperty.go index bab15f2fc1..859852937e 100644 --- a/typedapi/types/geopointproperty.go +++ b/typedapi/types/geopointproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,11 +30,12 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // GeoPointProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/geo.ts#L24-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/geo.ts#L24-L32 type GeoPointProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -45,13 +46,14 @@ type GeoPointProperty struct { IgnoreZValue *bool `json:"ignore_z_value,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue GeoLocation `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue GeoLocation `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { @@ -123,301 +125,313 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -558,301 +572,313 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -877,6 +903,11 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -891,21 +922,22 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { func (s GeoPointProperty) MarshalJSON() ([]byte, error) { type innerGeoPointProperty GeoPointProperty tmp := innerGeoPointProperty{ - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - IgnoreMalformed: s.IgnoreMalformed, - IgnoreZValue: s.IgnoreZValue, - Index: s.Index, - Meta: s.Meta, - NullValue: s.NullValue, - OnScriptError: s.OnScriptError, - Properties: s.Properties, - Script: s.Script, - Store: s.Store, - Type: s.Type, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + IgnoreZValue: s.IgnoreZValue, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "geo_point" @@ -916,10 +948,20 @@ func (s GeoPointProperty) MarshalJSON() ([]byte, error) { // NewGeoPointProperty returns a GeoPointProperty. func NewGeoPointProperty() *GeoPointProperty { r := &GeoPointProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type GeoPointPropertyVariant interface { + GeoPointPropertyCaster() *GeoPointProperty +} + +func (s *GeoPointProperty) GeoPointPropertyCaster() *GeoPointProperty { + return s +} diff --git a/typedapi/types/geopolygonpoints.go b/typedapi/types/geopolygonpoints.go index 66adec5106..938a11d685 100644 --- a/typedapi/types/geopolygonpoints.go +++ b/typedapi/types/geopolygonpoints.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // GeoPolygonPoints type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/geo.ts#L95-L97 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/geo.ts#L105-L107 type GeoPolygonPoints struct { Points []GeoLocation `json:"points"` } @@ -33,3 +33,13 @@ func NewGeoPolygonPoints() *GeoPolygonPoints { return r } + +// true + +type GeoPolygonPointsVariant interface { + GeoPolygonPointsCaster() *GeoPolygonPoints +} + +func (s *GeoPolygonPoints) GeoPolygonPointsCaster() *GeoPolygonPoints { + return s +} diff --git a/typedapi/types/geopolygonquery.go b/typedapi/types/geopolygonquery.go index 966f9fdf8d..13948e31a1 100644 --- a/typedapi/types/geopolygonquery.go +++ b/typedapi/types/geopolygonquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // GeoPolygonQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/geo.ts#L99-L111 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/geo.ts#L109-L121 type GeoPolygonQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -159,8 +159,18 @@ func (s GeoPolygonQuery) MarshalJSON() ([]byte, error) { // NewGeoPolygonQuery returns a GeoPolygonQuery. func NewGeoPolygonQuery() *GeoPolygonQuery { r := &GeoPolygonQuery{ - GeoPolygonQuery: make(map[string]GeoPolygonPoints, 0), + GeoPolygonQuery: make(map[string]GeoPolygonPoints), } return r } + +// true + +type GeoPolygonQueryVariant interface { + GeoPolygonQueryCaster() *GeoPolygonQuery +} + +func (s *GeoPolygonQuery) GeoPolygonQueryCaster() *GeoPolygonQuery { + return s +} diff --git a/typedapi/types/georesults.go b/typedapi/types/georesults.go index b5ba727ebb..808dbaedfd 100644 --- a/typedapi/types/georesults.go +++ b/typedapi/types/georesults.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,12 +31,12 @@ import ( // GeoResults type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Anomaly.ts#L145-L154 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Anomaly.ts#L146-L155 type GeoResults struct { // ActualPoint The actual value for the bucket formatted as a `geo_point`. - ActualPoint string `json:"actual_point"` + ActualPoint *string `json:"actual_point,omitempty"` // TypicalPoint The typical value for the bucket formatted as a `geo_point`. - TypicalPoint string `json:"typical_point"` + TypicalPoint *string `json:"typical_point,omitempty"` } func (s *GeoResults) UnmarshalJSON(data []byte) error { @@ -64,7 +64,7 @@ func (s *GeoResults) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.ActualPoint = o + s.ActualPoint = &o case "typical_point": var tmp json.RawMessage @@ -76,7 +76,7 @@ func (s *GeoResults) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.TypicalPoint = o + s.TypicalPoint = &o } } @@ -89,3 +89,5 @@ func NewGeoResults() *GeoResults { return r } + +// false diff --git a/typedapi/types/geoshapefieldquery.go b/typedapi/types/geoshapefieldquery.go index 099c909878..cbcb69f87d 100644 --- a/typedapi/types/geoshapefieldquery.go +++ b/typedapi/types/geoshapefieldquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // GeoShapeFieldQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/geo.ts#L118-L129 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/geo.ts#L128-L139 type GeoShapeFieldQuery struct { // IndexedShape Query using an indexed shape retrieved from the the specified document and // path. @@ -83,3 +83,13 @@ func NewGeoShapeFieldQuery() *GeoShapeFieldQuery { return r } + +// true + +type GeoShapeFieldQueryVariant interface { + GeoShapeFieldQueryCaster() *GeoShapeFieldQuery +} + +func (s *GeoShapeFieldQuery) GeoShapeFieldQueryCaster() *GeoShapeFieldQuery { + return s +} diff --git a/typedapi/types/geoshapeproperty.go b/typedapi/types/geoshapeproperty.go index a7d22f4a20..7527d5e70a 100644 --- a/typedapi/types/geoshapeproperty.go +++ b/typedapi/types/geoshapeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,11 +31,12 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoorientation" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geostrategy" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // GeoShapeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/geo.ts#L41-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/geo.ts#L41-L54 type GeoShapeProperty struct { Coerce *bool `json:"coerce,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -46,12 +47,13 @@ type GeoShapeProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` IgnoreZValue *bool `json:"ignore_z_value,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Orientation *geoorientation.GeoOrientation `json:"orientation,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Strategy *geostrategy.GeoStrategy `json:"strategy,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Orientation *geoorientation.GeoOrientation `json:"orientation,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + Strategy *geostrategy.GeoStrategy `json:"strategy,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { @@ -137,301 +139,313 @@ func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -513,301 +527,313 @@ func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -832,6 +858,11 @@ func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Strategy", err) } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -846,20 +877,21 @@ func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { func (s GeoShapeProperty) MarshalJSON() ([]byte, error) { type innerGeoShapeProperty GeoShapeProperty tmp := innerGeoShapeProperty{ - Coerce: s.Coerce, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - IgnoreMalformed: s.IgnoreMalformed, - IgnoreZValue: s.IgnoreZValue, - Meta: s.Meta, - Orientation: s.Orientation, - Properties: s.Properties, - Store: s.Store, - Strategy: s.Strategy, - Type: s.Type, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + IgnoreZValue: s.IgnoreZValue, + Meta: s.Meta, + Orientation: s.Orientation, + Properties: s.Properties, + Store: s.Store, + Strategy: s.Strategy, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "geo_shape" @@ -870,10 +902,20 @@ func (s GeoShapeProperty) MarshalJSON() ([]byte, error) { // NewGeoShapeProperty returns a GeoShapeProperty. func NewGeoShapeProperty() *GeoShapeProperty { r := &GeoShapeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type GeoShapePropertyVariant interface { + GeoShapePropertyCaster() *GeoShapeProperty +} + +func (s *GeoShapeProperty) GeoShapePropertyCaster() *GeoShapeProperty { + return s +} diff --git a/typedapi/types/geoshapequery.go b/typedapi/types/geoshapequery.go index f99160c69b..4c210008c9 100644 --- a/typedapi/types/geoshapequery.go +++ b/typedapi/types/geoshapequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GeoShapeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/geo.ts#L131-L147 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/geo.ts#L141-L157 type GeoShapeQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -154,8 +154,18 @@ func (s GeoShapeQuery) MarshalJSON() ([]byte, error) { // NewGeoShapeQuery returns a GeoShapeQuery. func NewGeoShapeQuery() *GeoShapeQuery { r := &GeoShapeQuery{ - GeoShapeQuery: make(map[string]GeoShapeFieldQuery, 0), + GeoShapeQuery: make(map[string]GeoShapeFieldQuery), } return r } + +// true + +type GeoShapeQueryVariant interface { + GeoShapeQueryCaster() *GeoShapeQuery +} + +func (s *GeoShapeQuery) GeoShapeQueryCaster() *GeoShapeQuery { + return s +} diff --git a/typedapi/types/geotilegridaggregate.go b/typedapi/types/geotilegridaggregate.go index 9e2bd674f1..a466185b8c 100644 --- a/typedapi/types/geotilegridaggregate.go +++ b/typedapi/types/geotilegridaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // GeoTileGridAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L574-L579 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L574-L579 type GeoTileGridAggregate struct { Buckets BucketsGeoTileGridBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewGeoTileGridAggregate() *GeoTileGridAggregate { return r } + +// false diff --git a/typedapi/types/geotilegridaggregation.go b/typedapi/types/geotilegridaggregation.go index c90f18d65b..af8c52f47b 100644 --- a/typedapi/types/geotilegridaggregation.go +++ b/typedapi/types/geotilegridaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GeoTileGridAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L451-L477 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L451-L477 type GeoTileGridAggregation struct { // Bounds A bounding box to filter the geo-points or geo-shapes in each bucket. Bounds GeoBounds `json:"bounds,omitempty"` @@ -181,3 +181,13 @@ func NewGeoTileGridAggregation() *GeoTileGridAggregation { return r } + +// true + +type GeoTileGridAggregationVariant interface { + GeoTileGridAggregationCaster() *GeoTileGridAggregation +} + +func (s *GeoTileGridAggregation) GeoTileGridAggregationCaster() *GeoTileGridAggregation { + return s +} diff --git a/typedapi/types/geotilegridbucket.go b/typedapi/types/geotilegridbucket.go index 67008097cc..517b64ed37 100644 --- a/typedapi/types/geotilegridbucket.go +++ b/typedapi/types/geotilegridbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // GeoTileGridBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L581-L583 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L581-L583 type GeoTileGridBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -631,8 +631,10 @@ func (s GeoTileGridBucket) MarshalJSON() ([]byte, error) { // NewGeoTileGridBucket returns a GeoTileGridBucket. func NewGeoTileGridBucket() *GeoTileGridBucket { r := &GeoTileGridBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/germananalyzer.go b/typedapi/types/germananalyzer.go index c11bfc6381..418b6c7448 100644 --- a/typedapi/types/germananalyzer.go +++ b/typedapi/types/germananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GermanAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L175-L180 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L186-L191 type GermanAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewGermanAnalyzer() *GermanAnalyzer { return r } + +// true + +type GermanAnalyzerVariant interface { + GermanAnalyzerCaster() *GermanAnalyzer +} + +func (s *GermanAnalyzer) GermanAnalyzerCaster() *GermanAnalyzer { + return s +} diff --git a/typedapi/types/getmigrationfeature.go b/typedapi/types/getmigrationfeature.go index 86616b782a..761dd1e03e 100644 --- a/typedapi/types/getmigrationfeature.go +++ b/typedapi/types/getmigrationfeature.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // GetMigrationFeature type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L37-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L37-L42 type GetMigrationFeature struct { FeatureName string `json:"feature_name"` Indices []MigrationFeatureIndexInfo `json:"indices"` @@ -94,3 +94,5 @@ func NewGetMigrationFeature() *GetMigrationFeature { return r } + +// false diff --git a/typedapi/types/getresult.go b/typedapi/types/getresult.go index 044cb260ff..dd8f9d54c5 100644 --- a/typedapi/types/getresult.go +++ b/typedapi/types/getresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,18 +31,32 @@ import ( // GetResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/get/types.ts#L25-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/get/types.ts#L25-L67 type GetResult struct { - Fields map[string]json.RawMessage `json:"fields,omitempty"` - Found bool `json:"found"` - Id_ string `json:"_id"` - Ignored_ []string `json:"_ignored,omitempty"` - Index_ string `json:"_index"` - PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` - Routing_ *string `json:"_routing,omitempty"` - SeqNo_ *int64 `json:"_seq_no,omitempty"` - Source_ json.RawMessage `json:"_source,omitempty"` - Version_ *int64 `json:"_version,omitempty"` + // Fields If the `stored_fields` parameter is set to `true` and `found` is `true`, it + // contains the document fields stored in the index. + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Found Indicates whether the document exists. + Found bool `json:"found"` + // Id_ The unique identifier for the document. + Id_ string `json:"_id"` + Ignored_ []string `json:"_ignored,omitempty"` + // Index_ The name of the index the document belongs to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Routing_ The explicit routing, if set. + Routing_ *string `json:"_routing,omitempty"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Source_ If `found` is `true`, it contains the document data formatted in JSON. + // If the `_source` parameter is set to `false` or the `stored_fields` parameter + // is set to `true`, it is excluded. + Source_ json.RawMessage `json:"_source,omitempty"` + // Version_ The document version, which is ncremented each time the document is updated. + Version_ *int64 `json:"_version,omitempty"` } func (s *GetResult) UnmarshalJSON(data []byte) error { @@ -147,8 +161,10 @@ func (s *GetResult) UnmarshalJSON(data []byte) error { // NewGetResult returns a GetResult. func NewGetResult() *GetResult { r := &GetResult{ - Fields: make(map[string]json.RawMessage, 0), + Fields: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/getscriptcontext.go b/typedapi/types/getscriptcontext.go index 7bee6dd677..23811ee78a 100644 --- a/typedapi/types/getscriptcontext.go +++ b/typedapi/types/getscriptcontext.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // GetScriptContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/get_script_context/types.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/get_script_context/types.ts#L22-L25 type GetScriptContext struct { Methods []ContextMethod `json:"methods"` Name string `json:"name"` @@ -72,3 +72,5 @@ func NewGetScriptContext() *GetScriptContext { return r } + +// false diff --git a/typedapi/types/getstats.go b/typedapi/types/getstats.go index c3f88c88f3..8622dd468d 100644 --- a/typedapi/types/getstats.go +++ b/typedapi/types/getstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GetStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L130-L141 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L133-L144 type GetStats struct { Current int64 `json:"current"` ExistsTime Duration `json:"exists_time,omitempty"` @@ -161,3 +161,5 @@ func NewGetStats() *GetStats { return r } + +// false diff --git a/typedapi/types/getuserprofileerrors.go b/typedapi/types/getuserprofileerrors.go index 0d104b0999..74cd342fec 100644 --- a/typedapi/types/getuserprofileerrors.go +++ b/typedapi/types/getuserprofileerrors.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GetUserProfileErrors type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_user_profile/types.ts#L25-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_user_profile/types.ts#L25-L28 type GetUserProfileErrors struct { Count int64 `json:"count"` Details map[string]ErrorCause `json:"details"` @@ -83,8 +83,10 @@ func (s *GetUserProfileErrors) UnmarshalJSON(data []byte) error { // NewGetUserProfileErrors returns a GetUserProfileErrors. func NewGetUserProfileErrors() *GetUserProfileErrors { r := &GetUserProfileErrors{ - Details: make(map[string]ErrorCause, 0), + Details: make(map[string]ErrorCause), } return r } + +// false diff --git a/typedapi/types/globalaggregate.go b/typedapi/types/globalaggregate.go index 59f1d5b0fe..6772b767f0 100644 --- a/typedapi/types/globalaggregate.go +++ b/typedapi/types/globalaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // GlobalAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L546-L550 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L546-L550 type GlobalAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -631,8 +631,10 @@ func (s GlobalAggregate) MarshalJSON() ([]byte, error) { // NewGlobalAggregate returns a GlobalAggregate. func NewGlobalAggregate() *GlobalAggregate { r := &GlobalAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/globalaggregation.go b/typedapi/types/globalaggregation.go index 2bd5310947..8ece3266c8 100644 --- a/typedapi/types/globalaggregation.go +++ b/typedapi/types/globalaggregation.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // GlobalAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L506-L506 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L506-L506 type GlobalAggregation struct { } @@ -32,3 +32,13 @@ func NewGlobalAggregation() *GlobalAggregation { return r } + +// true + +type GlobalAggregationVariant interface { + GlobalAggregationCaster() *GlobalAggregation +} + +func (s *GlobalAggregation) GlobalAggregationCaster() *GlobalAggregation { + return s +} diff --git a/typedapi/types/globalprivilege.go b/typedapi/types/globalprivilege.go index 86ef7b6623..081770c1fa 100644 --- a/typedapi/types/globalprivilege.go +++ b/typedapi/types/globalprivilege.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // GlobalPrivilege type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Privileges.ts#L368-L370 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L430-L432 type GlobalPrivilege struct { Application ApplicationGlobalUserPrivileges `json:"application"` } @@ -33,3 +33,13 @@ func NewGlobalPrivilege() *GlobalPrivilege { return r } + +// true + +type GlobalPrivilegeVariant interface { + GlobalPrivilegeCaster() *GlobalPrivilege +} + +func (s *GlobalPrivilege) GlobalPrivilegeCaster() *GlobalPrivilege { + return s +} diff --git a/typedapi/types/googlenormalizeddistanceheuristic.go b/typedapi/types/googlenormalizeddistanceheuristic.go index a942063c52..168a5cdd02 100644 --- a/typedapi/types/googlenormalizeddistanceheuristic.go +++ b/typedapi/types/googlenormalizeddistanceheuristic.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GoogleNormalizedDistanceHeuristic type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L793-L798 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L793-L798 type GoogleNormalizedDistanceHeuristic struct { // BackgroundIsSuperset Set to `false` if you defined a custom background filter that represents a // different set of documents that you want to compare to. @@ -78,3 +78,13 @@ func NewGoogleNormalizedDistanceHeuristic() *GoogleNormalizedDistanceHeuristic { return r } + +// true + +type GoogleNormalizedDistanceHeuristicVariant interface { + GoogleNormalizedDistanceHeuristicCaster() *GoogleNormalizedDistanceHeuristic +} + +func (s *GoogleNormalizedDistanceHeuristic) GoogleNormalizedDistanceHeuristicCaster() *GoogleNormalizedDistanceHeuristic { + return s +} diff --git a/typedapi/types/grantapikey.go b/typedapi/types/grantapikey.go index c6044ed1ce..6bd7a53091 100644 --- a/typedapi/types/grantapikey.go +++ b/typedapi/types/grantapikey.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // GrantApiKey type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/grant_api_key/types.ts#L25-L46 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/grant_api_key/types.ts#L25-L45 type GrantApiKey struct { // Expiration Expiration time for the API key. By default, API keys never expire. Expiration *string `json:"expiration,omitempty"` @@ -41,7 +41,6 @@ type GrantApiKey struct { Metadata Metadata `json:"metadata,omitempty"` Name string `json:"name"` // RoleDescriptors The role descriptors for this API key. - // This parameter is optional. // When it is not specified or is an empty array, the API key has a point in // time snapshot of permissions of the specified user or access token. // If you supply role descriptors, the resultant permissions are an intersection @@ -111,3 +110,13 @@ func NewGrantApiKey() *GrantApiKey { return r } + +// true + +type GrantApiKeyVariant interface { + GrantApiKeyCaster() *GrantApiKey +} + +func (s *GrantApiKey) GrantApiKeyCaster() *GrantApiKey { + return s +} diff --git a/typedapi/types/greaterthanvalidation.go b/typedapi/types/greaterthanvalidation.go index 7e7bed2282..bf306f7d89 100644 --- a/typedapi/types/greaterthanvalidation.go +++ b/typedapi/types/greaterthanvalidation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GreaterThanValidation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L63-L66 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L63-L66 type GreaterThanValidation struct { Constraint Float64 `json:"constraint"` Type string `json:"type,omitempty"` @@ -97,3 +97,13 @@ func NewGreaterThanValidation() *GreaterThanValidation { return r } + +// true + +type GreaterThanValidationVariant interface { + GreaterThanValidationCaster() *GreaterThanValidation +} + +func (s *GreaterThanValidation) GreaterThanValidationCaster() *GreaterThanValidation { + return s +} diff --git a/typedapi/types/greekanalyzer.go b/typedapi/types/greekanalyzer.go index 8628c633cf..a3d15cfcce 100644 --- a/typedapi/types/greekanalyzer.go +++ b/typedapi/types/greekanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GreekAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L182-L186 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L193-L197 type GreekAnalyzer struct { Stopwords []string `json:"stopwords,omitempty"` StopwordsPath *string `json:"stopwords_path,omitempty"` @@ -111,3 +111,13 @@ func NewGreekAnalyzer() *GreekAnalyzer { return r } + +// true + +type GreekAnalyzerVariant interface { + GreekAnalyzerCaster() *GreekAnalyzer +} + +func (s *GreekAnalyzer) GreekAnalyzerCaster() *GreekAnalyzer { + return s +} diff --git a/typedapi/types/grokprocessor.go b/typedapi/types/grokprocessor.go index e5fe00543a..17cc44ccef 100644 --- a/typedapi/types/grokprocessor.go +++ b/typedapi/types/grokprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GrokProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L910-L941 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L951-L982 type GrokProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -42,7 +42,7 @@ type GrokProcessor struct { // Field The field to use for grok expression parsing. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly @@ -111,16 +111,9 @@ func (s *GrokProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -202,8 +195,18 @@ func (s *GrokProcessor) UnmarshalJSON(data []byte) error { // NewGrokProcessor returns a GrokProcessor. func NewGrokProcessor() *GrokProcessor { r := &GrokProcessor{ - PatternDefinitions: make(map[string]string, 0), + PatternDefinitions: make(map[string]string), } return r } + +// true + +type GrokProcessorVariant interface { + GrokProcessorCaster() *GrokProcessor +} + +func (s *GrokProcessor) GrokProcessorCaster() *GrokProcessor { + return s +} diff --git a/typedapi/types/groupings.go b/typedapi/types/groupings.go index 9ee1d942d8..c33ba7b23c 100644 --- a/typedapi/types/groupings.go +++ b/typedapi/types/groupings.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Groupings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/_types/Groupings.ts#L24-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/_types/Groupings.ts#L24-L40 type Groupings struct { // DateHistogram A date histogram group aggregates a date field into time-based buckets. // This group is mandatory; you currently cannot roll up documents without a @@ -45,3 +45,13 @@ func NewGroupings() *Groupings { return r } + +// true + +type GroupingsVariant interface { + GroupingsCaster() *Groupings +} + +func (s *Groupings) GroupingsCaster() *Groupings { + return s +} diff --git a/typedapi/types/gsubprocessor.go b/typedapi/types/gsubprocessor.go index 4368041764..6f51bb40ab 100644 --- a/typedapi/types/gsubprocessor.go +++ b/typedapi/types/gsubprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // GsubProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L943-L967 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L984-L1008 type GsubProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type GsubProcessor struct { // Field The field to apply the replacement to. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly @@ -92,16 +92,9 @@ func (s *GsubProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -188,3 +181,13 @@ func NewGsubProcessor() *GsubProcessor { return r } + +// true + +type GsubProcessorVariant interface { + GsubProcessorCaster() *GsubProcessor +} + +func (s *GsubProcessor) GsubProcessorCaster() *GsubProcessor { + return s +} diff --git a/typedapi/types/halffloatnumberproperty.go b/typedapi/types/halffloatnumberproperty.go index 95f865ac15..4bf8b3d94d 100644 --- a/typedapi/types/halffloatnumberproperty.go +++ b/typedapi/types/halffloatnumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // HalfFloatNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L147-L150 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L151-L154 type HalfFloatNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,12 +48,13 @@ type HalfFloatNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *float32 `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *float32 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -161,301 +163,313 @@ func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -553,301 +567,313 @@ func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -872,6 +898,11 @@ func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -920,6 +951,7 @@ func (s HalfFloatNumberProperty) MarshalJSON() ([]byte, error) { Properties: s.Properties, Script: s.Script, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -933,10 +965,20 @@ func (s HalfFloatNumberProperty) MarshalJSON() ([]byte, error) { // NewHalfFloatNumberProperty returns a HalfFloatNumberProperty. func NewHalfFloatNumberProperty() *HalfFloatNumberProperty { r := &HalfFloatNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type HalfFloatNumberPropertyVariant interface { + HalfFloatNumberPropertyCaster() *HalfFloatNumberProperty +} + +func (s *HalfFloatNumberProperty) HalfFloatNumberPropertyCaster() *HalfFloatNumberProperty { + return s +} diff --git a/typedapi/types/haschildquery.go b/typedapi/types/haschildquery.go index d39d060dfb..d64136614c 100644 --- a/typedapi/types/haschildquery.go +++ b/typedapi/types/haschildquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // HasChildQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/joining.ts#L41-L79 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/joining.ts#L41-L79 type HasChildQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -59,7 +59,7 @@ type HasChildQuery struct { // Query Query you wish to run on child documents of the `type` field. // If a child document matches the search, the query returns the parent // document. - Query *Query `json:"query,omitempty"` + Query Query `json:"query"` QueryName_ *string `json:"_name,omitempty"` // ScoreMode Indicates how scores for matching child documents affect the root parent // document’s relevance score. @@ -188,3 +188,13 @@ func NewHasChildQuery() *HasChildQuery { return r } + +// true + +type HasChildQueryVariant interface { + HasChildQueryCaster() *HasChildQuery +} + +func (s *HasChildQuery) HasChildQueryCaster() *HasChildQuery { + return s +} diff --git a/typedapi/types/hasparentquery.go b/typedapi/types/hasparentquery.go index 9914c88260..e650d019b4 100644 --- a/typedapi/types/hasparentquery.go +++ b/typedapi/types/hasparentquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HasParentQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/joining.ts#L81-L110 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/joining.ts#L81-L110 type HasParentQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -51,7 +51,7 @@ type HasParentQuery struct { // Query Query you wish to run on parent documents of the `parent_type` field. // If a parent document matches the search, the query returns its child // documents. - Query *Query `json:"query,omitempty"` + Query Query `json:"query"` QueryName_ *string `json:"_name,omitempty"` // Score Indicates whether the relevance score of a matching parent document is // aggregated into its child documents. @@ -155,3 +155,13 @@ func NewHasParentQuery() *HasParentQuery { return r } + +// true + +type HasParentQueryVariant interface { + HasParentQueryCaster() *HasParentQuery +} + +func (s *HasParentQuery) HasParentQueryCaster() *HasParentQuery { + return s +} diff --git a/typedapi/types/hasprivilegesuserprofileerrors.go b/typedapi/types/hasprivilegesuserprofileerrors.go index 3da99334ef..c25bf1c17d 100644 --- a/typedapi/types/hasprivilegesuserprofileerrors.go +++ b/typedapi/types/hasprivilegesuserprofileerrors.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HasPrivilegesUserProfileErrors type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/has_privileges_user_profile/types.ts#L39-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/has_privileges_user_profile/types.ts#L39-L42 type HasPrivilegesUserProfileErrors struct { Count int64 `json:"count"` Details map[string]ErrorCause `json:"details"` @@ -83,8 +83,10 @@ func (s *HasPrivilegesUserProfileErrors) UnmarshalJSON(data []byte) error { // NewHasPrivilegesUserProfileErrors returns a HasPrivilegesUserProfileErrors. func NewHasPrivilegesUserProfileErrors() *HasPrivilegesUserProfileErrors { r := &HasPrivilegesUserProfileErrors{ - Details: make(map[string]ErrorCause, 0), + Details: make(map[string]ErrorCause), } return r } + +// false diff --git a/typedapi/types/hdrmethod.go b/typedapi/types/hdrmethod.go index 76ab447c9d..467073f834 100644 --- a/typedapi/types/hdrmethod.go +++ b/typedapi/types/hdrmethod.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HdrMethod type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L225-L230 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L225-L230 type HdrMethod struct { // NumberOfSignificantValueDigits Specifies the resolution of values for the histogram in number of significant // digits. @@ -80,3 +80,13 @@ func NewHdrMethod() *HdrMethod { return r } + +// true + +type HdrMethodVariant interface { + HdrMethodCaster() *HdrMethod +} + +func (s *HdrMethod) HdrMethodCaster() *HdrMethod { + return s +} diff --git a/typedapi/types/hdrpercentileranksaggregate.go b/typedapi/types/hdrpercentileranksaggregate.go index a3cf31b9eb..c8705911ed 100644 --- a/typedapi/types/hdrpercentileranksaggregate.go +++ b/typedapi/types/hdrpercentileranksaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // HdrPercentileRanksAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L171-L172 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L171-L172 type HdrPercentileRanksAggregate struct { Meta Metadata `json:"meta,omitempty"` Values Percentiles `json:"values"` @@ -64,7 +64,7 @@ func (s *HdrPercentileRanksAggregate) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(source) switch rawMsg[0] { case '{': - o := make(KeyedPercentiles, 0) + o := make(map[string]string, 0) if err := localDec.Decode(&o); err != nil { return fmt.Errorf("%s | %w", "Values", err) } @@ -88,3 +88,5 @@ func NewHdrPercentileRanksAggregate() *HdrPercentileRanksAggregate { return r } + +// false diff --git a/typedapi/types/hdrpercentilesaggregate.go b/typedapi/types/hdrpercentilesaggregate.go index ce6ba99871..dce0c57247 100644 --- a/typedapi/types/hdrpercentilesaggregate.go +++ b/typedapi/types/hdrpercentilesaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // HdrPercentilesAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L168-L169 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L168-L169 type HdrPercentilesAggregate struct { Meta Metadata `json:"meta,omitempty"` Values Percentiles `json:"values"` @@ -64,7 +64,7 @@ func (s *HdrPercentilesAggregate) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(source) switch rawMsg[0] { case '{': - o := make(KeyedPercentiles, 0) + o := make(map[string]string, 0) if err := localDec.Decode(&o); err != nil { return fmt.Errorf("%s | %w", "Values", err) } @@ -88,3 +88,5 @@ func NewHdrPercentilesAggregate() *HdrPercentilesAggregate { return r } + +// false diff --git a/typedapi/types/healthrecord.go b/typedapi/types/healthrecord.go index b5d78ee0a0..df3152f798 100644 --- a/typedapi/types/healthrecord.go +++ b/typedapi/types/healthrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HealthRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/health/types.ts#L23-L99 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/health/types.ts#L23-L99 type HealthRecord struct { // ActiveShardsPercent active number of shards in percent ActiveShardsPercent *string `json:"active_shards_percent,omitempty"` @@ -257,3 +257,5 @@ func NewHealthRecord() *HealthRecord { return r } + +// false diff --git a/typedapi/types/healthresponsebody.go b/typedapi/types/healthresponsebody.go deleted file mode 100644 index fbf45f1efe..0000000000 --- a/typedapi/types/healthresponsebody.go +++ /dev/null @@ -1,327 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/healthstatus" -) - -// HealthResponseBody type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/health/ClusterHealthResponse.ts#L39-L74 -type HealthResponseBody struct { - // ActivePrimaryShards The number of active primary shards. - ActivePrimaryShards int `json:"active_primary_shards"` - // ActiveShards The total number of active primary and replica shards. - ActiveShards int `json:"active_shards"` - // ActiveShardsPercentAsNumber The ratio of active shards in the cluster expressed as a percentage. - ActiveShardsPercentAsNumber Percentage `json:"active_shards_percent_as_number"` - // ClusterName The name of the cluster. - ClusterName string `json:"cluster_name"` - // DelayedUnassignedShards The number of shards whose allocation has been delayed by the timeout - // settings. - DelayedUnassignedShards int `json:"delayed_unassigned_shards"` - Indices map[string]IndexHealthStats `json:"indices,omitempty"` - // InitializingShards The number of shards that are under initialization. - InitializingShards int `json:"initializing_shards"` - // NumberOfDataNodes The number of nodes that are dedicated data nodes. - NumberOfDataNodes int `json:"number_of_data_nodes"` - // NumberOfInFlightFetch The number of unfinished fetches. - NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"` - // NumberOfNodes The number of nodes within the cluster. - NumberOfNodes int `json:"number_of_nodes"` - // NumberOfPendingTasks The number of cluster-level changes that have not yet been executed. - NumberOfPendingTasks int `json:"number_of_pending_tasks"` - // RelocatingShards The number of shards that are under relocation. - RelocatingShards int `json:"relocating_shards"` - Status healthstatus.HealthStatus `json:"status"` - // TaskMaxWaitingInQueue The time since the earliest initiated task is waiting for being performed. - TaskMaxWaitingInQueue Duration `json:"task_max_waiting_in_queue,omitempty"` - // TaskMaxWaitingInQueueMillis The time expressed in milliseconds since the earliest initiated task is - // waiting for being performed. - TaskMaxWaitingInQueueMillis int64 `json:"task_max_waiting_in_queue_millis"` - // TimedOut If false the response returned within the period of time that is specified by - // the timeout parameter (30s by default) - TimedOut bool `json:"timed_out"` - // UnassignedPrimaryShards The number of primary shards that are not allocated. - UnassignedPrimaryShards int `json:"unassigned_primary_shards"` - // UnassignedShards The number of shards that are not allocated. - UnassignedShards int `json:"unassigned_shards"` -} - -func (s *HealthResponseBody) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "active_primary_shards": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "ActivePrimaryShards", err) - } - s.ActivePrimaryShards = value - case float64: - f := int(v) - s.ActivePrimaryShards = f - } - - case "active_shards": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "ActiveShards", err) - } - s.ActiveShards = value - case float64: - f := int(v) - s.ActiveShards = f - } - - case "active_shards_percent_as_number": - if err := dec.Decode(&s.ActiveShardsPercentAsNumber); err != nil { - return fmt.Errorf("%s | %w", "ActiveShardsPercentAsNumber", err) - } - - case "cluster_name": - if err := dec.Decode(&s.ClusterName); err != nil { - return fmt.Errorf("%s | %w", "ClusterName", err) - } - - case "delayed_unassigned_shards": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "DelayedUnassignedShards", err) - } - s.DelayedUnassignedShards = value - case float64: - f := int(v) - s.DelayedUnassignedShards = f - } - - case "indices": - if s.Indices == nil { - s.Indices = make(map[string]IndexHealthStats, 0) - } - if err := dec.Decode(&s.Indices); err != nil { - return fmt.Errorf("%s | %w", "Indices", err) - } - - case "initializing_shards": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "InitializingShards", err) - } - s.InitializingShards = value - case float64: - f := int(v) - s.InitializingShards = f - } - - case "number_of_data_nodes": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "NumberOfDataNodes", err) - } - s.NumberOfDataNodes = value - case float64: - f := int(v) - s.NumberOfDataNodes = f - } - - case "number_of_in_flight_fetch": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "NumberOfInFlightFetch", err) - } - s.NumberOfInFlightFetch = value - case float64: - f := int(v) - s.NumberOfInFlightFetch = f - } - - case "number_of_nodes": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "NumberOfNodes", err) - } - s.NumberOfNodes = value - case float64: - f := int(v) - s.NumberOfNodes = f - } - - case "number_of_pending_tasks": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "NumberOfPendingTasks", err) - } - s.NumberOfPendingTasks = value - case float64: - f := int(v) - s.NumberOfPendingTasks = f - } - - case "relocating_shards": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "RelocatingShards", err) - } - s.RelocatingShards = value - case float64: - f := int(v) - s.RelocatingShards = f - } - - case "status": - if err := dec.Decode(&s.Status); err != nil { - return fmt.Errorf("%s | %w", "Status", err) - } - - case "task_max_waiting_in_queue": - if err := dec.Decode(&s.TaskMaxWaitingInQueue); err != nil { - return fmt.Errorf("%s | %w", "TaskMaxWaitingInQueue", err) - } - - case "task_max_waiting_in_queue_millis": - if err := dec.Decode(&s.TaskMaxWaitingInQueueMillis); err != nil { - return fmt.Errorf("%s | %w", "TaskMaxWaitingInQueueMillis", err) - } - - case "timed_out": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "TimedOut", err) - } - s.TimedOut = value - case bool: - s.TimedOut = v - } - - case "unassigned_primary_shards": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "UnassignedPrimaryShards", err) - } - s.UnassignedPrimaryShards = value - case float64: - f := int(v) - s.UnassignedPrimaryShards = f - } - - case "unassigned_shards": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "UnassignedShards", err) - } - s.UnassignedShards = value - case float64: - f := int(v) - s.UnassignedShards = f - } - - } - } - return nil -} - -// NewHealthResponseBody returns a HealthResponseBody. -func NewHealthResponseBody() *HealthResponseBody { - r := &HealthResponseBody{ - Indices: make(map[string]IndexHealthStats, 0), - } - - return r -} diff --git a/typedapi/types/healthstatistics.go b/typedapi/types/healthstatistics.go index 1fa68fb763..079a5ff4a2 100644 --- a/typedapi/types/healthstatistics.go +++ b/typedapi/types/healthstatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HealthStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L151-L153 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L150-L152 type HealthStatistics struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -97,3 +97,5 @@ func NewHealthStatistics() *HealthStatistics { return r } + +// false diff --git a/typedapi/types/highlight.go b/typedapi/types/highlight.go index 7c286360ce..ff7d2865a2 100644 --- a/typedapi/types/highlight.go +++ b/typedapi/types/highlight.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -38,7 +38,7 @@ import ( // Highlight type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/highlighting.ts#L152-L155 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/highlighting.ts#L152-L155 type Highlight struct { // BoundaryChars A string that contains each boundary character. BoundaryChars *string `json:"boundary_chars,omitempty"` @@ -378,9 +378,19 @@ func (s *Highlight) UnmarshalJSON(data []byte) error { // NewHighlight returns a Highlight. func NewHighlight() *Highlight { r := &Highlight{ - Fields: make(map[string]HighlightField, 0), - Options: make(map[string]json.RawMessage, 0), + Fields: make(map[string]HighlightField), + Options: make(map[string]json.RawMessage), } return r } + +// true + +type HighlightVariant interface { + HighlightCaster() *Highlight +} + +func (s *Highlight) HighlightCaster() *Highlight { + return s +} diff --git a/typedapi/types/highlightfield.go b/typedapi/types/highlightfield.go index 8b6e067a69..6487a75beb 100644 --- a/typedapi/types/highlightfield.go +++ b/typedapi/types/highlightfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -37,7 +37,7 @@ import ( // HighlightField type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/highlighting.ts#L192-L195 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/highlighting.ts#L192-L195 type HighlightField struct { // BoundaryChars A string that contains each boundary character. BoundaryChars *string `json:"boundary_chars,omitempty"` @@ -396,8 +396,18 @@ func (s *HighlightField) UnmarshalJSON(data []byte) error { // NewHighlightField returns a HighlightField. func NewHighlightField() *HighlightField { r := &HighlightField{ - Options: make(map[string]json.RawMessage, 0), + Options: make(map[string]json.RawMessage), } return r } + +// true + +type HighlightFieldVariant interface { + HighlightFieldCaster() *HighlightField +} + +func (s *HighlightField) HighlightFieldCaster() *HighlightField { + return s +} diff --git a/typedapi/types/hindianalyzer.go b/typedapi/types/hindianalyzer.go index 386a45510b..a2283909b4 100644 --- a/typedapi/types/hindianalyzer.go +++ b/typedapi/types/hindianalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HindiAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L188-L193 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L199-L204 type HindiAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewHindiAnalyzer() *HindiAnalyzer { return r } + +// true + +type HindiAnalyzerVariant interface { + HindiAnalyzerCaster() *HindiAnalyzer +} + +func (s *HindiAnalyzer) HindiAnalyzerCaster() *HindiAnalyzer { + return s +} diff --git a/typedapi/types/hint.go b/typedapi/types/hint.go index 894c1e8600..4439c8641d 100644 --- a/typedapi/types/hint.go +++ b/typedapi/types/hint.go @@ -16,27 +16,37 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Hint type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/suggest_user_profiles/types.ts#L23-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/suggest_user_profiles/types.ts#L23-L34 type Hint struct { // Labels A single key-value pair to match against the labels section // of a profile. A profile is considered matching if it matches // at least one of the strings. Labels map[string][]string `json:"labels,omitempty"` - // Uids A list of Profile UIDs to match against. + // Uids A list of profile UIDs to match against. Uids []string `json:"uids,omitempty"` } // NewHint returns a Hint. func NewHint() *Hint { r := &Hint{ - Labels: make(map[string][]string, 0), + Labels: make(map[string][]string), } return r } + +// true + +type HintVariant interface { + HintCaster() *Hint +} + +func (s *Hint) HintCaster() *Hint { + return s +} diff --git a/typedapi/types/histogramaggregate.go b/typedapi/types/histogramaggregate.go index 7cc74925c2..437ea44089 100644 --- a/typedapi/types/histogramaggregate.go +++ b/typedapi/types/histogramaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // HistogramAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L372-L376 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L372-L376 type HistogramAggregate struct { Buckets BucketsHistogramBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewHistogramAggregate() *HistogramAggregate { return r } + +// false diff --git a/typedapi/types/histogramaggregation.go b/typedapi/types/histogramaggregation.go index 6f409e95d9..ee9c84e9a9 100644 --- a/typedapi/types/histogramaggregation.go +++ b/typedapi/types/histogramaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // HistogramAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L519-L565 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L519-L565 type HistogramAggregation struct { // ExtendedBounds Enables extending the bounds of the histogram beyond the data itself. ExtendedBounds *ExtendedBoundsdouble `json:"extended_bounds,omitempty"` @@ -223,3 +223,13 @@ func NewHistogramAggregation() *HistogramAggregation { return r } + +// true + +type HistogramAggregationVariant interface { + HistogramAggregationCaster() *HistogramAggregation +} + +func (s *HistogramAggregation) HistogramAggregationCaster() *HistogramAggregation { + return s +} diff --git a/typedapi/types/histogrambucket.go b/typedapi/types/histogrambucket.go index 083565ae5b..3c38c8419b 100644 --- a/typedapi/types/histogrambucket.go +++ b/typedapi/types/histogrambucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // HistogramBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L378-L381 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L378-L381 type HistogramBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -655,8 +655,10 @@ func (s HistogramBucket) MarshalJSON() ([]byte, error) { // NewHistogramBucket returns a HistogramBucket. func NewHistogramBucket() *HistogramBucket { r := &HistogramBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/histogramgrouping.go b/typedapi/types/histogramgrouping.go index 6f3ae07dd6..c6958be7d2 100644 --- a/typedapi/types/histogramgrouping.go +++ b/typedapi/types/histogramgrouping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HistogramGrouping type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/_types/Groupings.ts#L84-L97 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/_types/Groupings.ts#L84-L97 type HistogramGrouping struct { // Fields The set of fields that you wish to build histograms for. // All fields specified must be some kind of numeric. @@ -102,3 +102,13 @@ func NewHistogramGrouping() *HistogramGrouping { return r } + +// true + +type HistogramGroupingVariant interface { + HistogramGroupingCaster() *HistogramGrouping +} + +func (s *HistogramGrouping) HistogramGroupingCaster() *HistogramGrouping { + return s +} diff --git a/typedapi/types/histogramproperty.go b/typedapi/types/histogramproperty.go index 3872dc375f..177f8e6be9 100644 --- a/typedapi/types/histogramproperty.go +++ b/typedapi/types/histogramproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,20 +29,22 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // HistogramProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/specialized.ts#L60-L63 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/specialized.ts#L69-L72 type HistogramProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *HistogramProperty) UnmarshalJSON(data []byte) error { @@ -84,301 +86,313 @@ func (s *HistogramProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -441,306 +455,323 @@ func (s *HistogramProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -755,13 +786,14 @@ func (s *HistogramProperty) UnmarshalJSON(data []byte) error { func (s HistogramProperty) MarshalJSON() ([]byte, error) { type innerHistogramProperty HistogramProperty tmp := innerHistogramProperty{ - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - IgnoreMalformed: s.IgnoreMalformed, - Meta: s.Meta, - Properties: s.Properties, - Type: s.Type, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Meta: s.Meta, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "histogram" @@ -772,10 +804,20 @@ func (s HistogramProperty) MarshalJSON() ([]byte, error) { // NewHistogramProperty returns a HistogramProperty. func NewHistogramProperty() *HistogramProperty { r := &HistogramProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type HistogramPropertyVariant interface { + HistogramPropertyCaster() *HistogramProperty +} + +func (s *HistogramProperty) HistogramPropertyCaster() *HistogramProperty { + return s +} diff --git a/typedapi/types/hit.go b/typedapi/types/hit.go index 74317f05bb..0f1ea301de 100644 --- a/typedapi/types/hit.go +++ b/typedapi/types/hit.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,28 +31,28 @@ import ( // Hit type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/hits.ts#L41-L66 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/hits.ts#L40-L65 type Hit struct { - Explanation_ *Explanation `json:"_explanation,omitempty"` - Fields map[string]json.RawMessage `json:"fields,omitempty"` - Highlight map[string][]string `json:"highlight,omitempty"` - Id_ *string `json:"_id,omitempty"` - IgnoredFieldValues map[string][]FieldValue `json:"ignored_field_values,omitempty"` - Ignored_ []string `json:"_ignored,omitempty"` - Index_ string `json:"_index"` - InnerHits map[string]InnerHitsResult `json:"inner_hits,omitempty"` - MatchedQueries any `json:"matched_queries,omitempty"` - Nested_ *NestedIdentity `json:"_nested,omitempty"` - Node_ *string `json:"_node,omitempty"` - PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` - Rank_ *int `json:"_rank,omitempty"` - Routing_ *string `json:"_routing,omitempty"` - Score_ *Float64 `json:"_score,omitempty"` - SeqNo_ *int64 `json:"_seq_no,omitempty"` - Shard_ *string `json:"_shard,omitempty"` - Sort []FieldValue `json:"sort,omitempty"` - Source_ json.RawMessage `json:"_source,omitempty"` - Version_ *int64 `json:"_version,omitempty"` + Explanation_ *Explanation `json:"_explanation,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + Highlight map[string][]string `json:"highlight,omitempty"` + Id_ *string `json:"_id,omitempty"` + IgnoredFieldValues map[string][]json.RawMessage `json:"ignored_field_values,omitempty"` + Ignored_ []string `json:"_ignored,omitempty"` + Index_ string `json:"_index"` + InnerHits map[string]InnerHitsResult `json:"inner_hits,omitempty"` + MatchedQueries any `json:"matched_queries,omitempty"` + Nested_ *NestedIdentity `json:"_nested,omitempty"` + Node_ *string `json:"_node,omitempty"` + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + Rank_ *int `json:"_rank,omitempty"` + Routing_ *string `json:"_routing,omitempty"` + Score_ *Float64 `json:"_score,omitempty"` + SeqNo_ *int64 `json:"_seq_no,omitempty"` + Shard_ *string `json:"_shard,omitempty"` + Sort []FieldValue `json:"sort,omitempty"` + Source_ json.RawMessage `json:"_source,omitempty"` + Version_ *int64 `json:"_version,omitempty"` } func (s *Hit) UnmarshalJSON(data []byte) error { @@ -98,7 +98,7 @@ func (s *Hit) UnmarshalJSON(data []byte) error { case "ignored_field_values": if s.IgnoredFieldValues == nil { - s.IgnoredFieldValues = make(map[string][]FieldValue, 0) + s.IgnoredFieldValues = make(map[string][]json.RawMessage, 0) } if err := dec.Decode(&s.IgnoredFieldValues); err != nil { return fmt.Errorf("%s | %w", "IgnoredFieldValues", err) @@ -248,11 +248,13 @@ func (s *Hit) UnmarshalJSON(data []byte) error { // NewHit returns a Hit. func NewHit() *Hit { r := &Hit{ - Fields: make(map[string]json.RawMessage, 0), - Highlight: make(map[string][]string, 0), - IgnoredFieldValues: make(map[string][]FieldValue, 0), - InnerHits: make(map[string]InnerHitsResult, 0), + Fields: make(map[string]json.RawMessage), + Highlight: make(map[string][]string), + IgnoredFieldValues: make(map[string][]json.RawMessage), + InnerHits: make(map[string]InnerHitsResult), } return r } + +// false diff --git a/typedapi/types/hitsevent.go b/typedapi/types/hitsevent.go index de48ca014d..35035b821b 100644 --- a/typedapi/types/hitsevent.go +++ b/typedapi/types/hitsevent.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HitsEvent type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/eql/_types/EqlHits.ts#L41-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/eql/_types/EqlHits.ts#L41-L54 type HitsEvent struct { Fields map[string][]json.RawMessage `json:"fields,omitempty"` // Id_ Unique identifier for the event. This ID is only unique within the index. @@ -105,8 +105,10 @@ func (s *HitsEvent) UnmarshalJSON(data []byte) error { // NewHitsEvent returns a HitsEvent. func NewHitsEvent() *HitsEvent { r := &HitsEvent{ - Fields: make(map[string][]json.RawMessage, 0), + Fields: make(map[string][]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/hitsmetadata.go b/typedapi/types/hitsmetadata.go index 1bbae1153e..d6f7c0a6c5 100644 --- a/typedapi/types/hitsmetadata.go +++ b/typedapi/types/hitsmetadata.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // HitsMetadata type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/hits.ts#L68-L74 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/hits.ts#L67-L73 type HitsMetadata struct { Hits []Hit `json:"hits"` MaxScore *Float64 `json:"max_score,omitempty"` @@ -80,3 +80,5 @@ func NewHitsMetadata() *HitsMetadata { return r } + +// false diff --git a/typedapi/types/hitssequence.go b/typedapi/types/hitssequence.go index 350401bdf5..572376c443 100644 --- a/typedapi/types/hitssequence.go +++ b/typedapi/types/hitssequence.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // HitsSequence type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/eql/_types/EqlHits.ts#L56-L64 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/eql/_types/EqlHits.ts#L56-L64 type HitsSequence struct { // Events Contains events matching the query. Each object represents a matching event. Events []HitsEvent `json:"events"` @@ -41,3 +41,5 @@ func NewHitsSequence() *HitsSequence { return r } + +// false diff --git a/typedapi/types/holtlinearmodelsettings.go b/typedapi/types/holtlinearmodelsettings.go index cc80b0c0ff..8c4d8f1525 100644 --- a/typedapi/types/holtlinearmodelsettings.go +++ b/typedapi/types/holtlinearmodelsettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HoltLinearModelSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L297-L300 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L297-L300 type HoltLinearModelSettings struct { Alpha *float32 `json:"alpha,omitempty"` Beta *float32 `json:"beta,omitempty"` @@ -95,3 +95,13 @@ func NewHoltLinearModelSettings() *HoltLinearModelSettings { return r } + +// true + +type HoltLinearModelSettingsVariant interface { + HoltLinearModelSettingsCaster() *HoltLinearModelSettings +} + +func (s *HoltLinearModelSettings) HoltLinearModelSettingsCaster() *HoltLinearModelSettings { + return s +} diff --git a/typedapi/types/holtmovingaverageaggregation.go b/typedapi/types/holtmovingaverageaggregation.go index 8990e179d9..45f37ca803 100644 --- a/typedapi/types/holtmovingaverageaggregation.go +++ b/typedapi/types/holtmovingaverageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // HoltMovingAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L283-L286 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L283-L286 type HoltMovingAverageAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -173,3 +173,13 @@ func NewHoltMovingAverageAggregation() *HoltMovingAverageAggregation { return r } + +// true + +type HoltMovingAverageAggregationVariant interface { + HoltMovingAverageAggregationCaster() *HoltMovingAverageAggregation +} + +func (s *HoltMovingAverageAggregation) HoltMovingAverageAggregationCaster() *HoltMovingAverageAggregation { + return s +} diff --git a/typedapi/types/holtwintersmodelsettings.go b/typedapi/types/holtwintersmodelsettings.go index 7088e98990..4e2b79a9ff 100644 --- a/typedapi/types/holtwintersmodelsettings.go +++ b/typedapi/types/holtwintersmodelsettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // HoltWintersModelSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L301-L308 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L301-L308 type HoltWintersModelSettings struct { Alpha *float32 `json:"alpha,omitempty"` Beta *float32 `json:"beta,omitempty"` @@ -152,3 +152,13 @@ func NewHoltWintersModelSettings() *HoltWintersModelSettings { return r } + +// true + +type HoltWintersModelSettingsVariant interface { + HoltWintersModelSettingsCaster() *HoltWintersModelSettings +} + +func (s *HoltWintersModelSettings) HoltWintersModelSettingsCaster() *HoltWintersModelSettings { + return s +} diff --git a/typedapi/types/holtwintersmovingaverageaggregation.go b/typedapi/types/holtwintersmovingaverageaggregation.go index 1a34d75b2a..335a8902a4 100644 --- a/typedapi/types/holtwintersmovingaverageaggregation.go +++ b/typedapi/types/holtwintersmovingaverageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // HoltWintersMovingAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L288-L291 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L288-L291 type HoltWintersMovingAverageAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -173,3 +173,13 @@ func NewHoltWintersMovingAverageAggregation() *HoltWintersMovingAverageAggregati return r } + +// true + +type HoltWintersMovingAverageAggregationVariant interface { + HoltWintersMovingAverageAggregationCaster() *HoltWintersMovingAverageAggregation +} + +func (s *HoltWintersMovingAverageAggregation) HoltWintersMovingAverageAggregationCaster() *HoltWintersMovingAverageAggregation { + return s +} diff --git a/typedapi/types/hop.go b/typedapi/types/hop.go index 146024e573..cf7ca6c40e 100644 --- a/typedapi/types/hop.go +++ b/typedapi/types/hop.go @@ -16,20 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Hop type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/graph/_types/Hop.ts#L23-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/graph/_types/Hop.ts#L23-L36 type Hop struct { // Connections Specifies one or more fields from which you want to extract terms that are // associated with the specified vertices. Connections *Hop `json:"connections,omitempty"` // Query An optional guiding query that constrains the Graph API as it explores // connected terms. - Query Query `json:"query"` + Query *Query `json:"query,omitempty"` // Vertices Contains the fields you are interested in. Vertices []VertexDefinition `json:"vertices"` } @@ -40,3 +40,13 @@ func NewHop() *Hop { return r } + +// true + +type HopVariant interface { + HopCaster() *Hop +} + +func (s *Hop) HopCaster() *Hop { + return s +} diff --git a/typedapi/types/hourandminute.go b/typedapi/types/hourandminute.go index ef8a32498c..6c5cfa6f95 100644 --- a/typedapi/types/hourandminute.go +++ b/typedapi/types/hourandminute.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // HourAndMinute type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Schedule.ts#L105-L108 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Schedule.ts#L106-L109 type HourAndMinute struct { Hour []int `json:"hour"` Minute []int `json:"minute"` @@ -34,3 +34,13 @@ func NewHourAndMinute() *HourAndMinute { return r } + +// true + +type HourAndMinuteVariant interface { + HourAndMinuteCaster() *HourAndMinute +} + +func (s *HourAndMinute) HourAndMinuteCaster() *HourAndMinute { + return s +} diff --git a/typedapi/types/hourlyschedule.go b/typedapi/types/hourlyschedule.go index cbd21c3a51..4dfceca05d 100644 --- a/typedapi/types/hourlyschedule.go +++ b/typedapi/types/hourlyschedule.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // HourlySchedule type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Schedule.ts#L47-L49 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Schedule.ts#L47-L49 type HourlySchedule struct { Minute []int `json:"minute"` } @@ -33,3 +33,13 @@ func NewHourlySchedule() *HourlySchedule { return r } + +// true + +type HourlyScheduleVariant interface { + HourlyScheduleCaster() *HourlySchedule +} + +func (s *HourlySchedule) HourlyScheduleCaster() *HourlySchedule { + return s +} diff --git a/typedapi/types/htmlstripcharfilter.go b/typedapi/types/htmlstripcharfilter.go index aeaa02c254..83c0cc4f85 100644 --- a/typedapi/types/htmlstripcharfilter.go +++ b/typedapi/types/htmlstripcharfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // HtmlStripCharFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/char_filters.ts#L46-L49 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/char_filters.ts#L46-L49 type HtmlStripCharFilter struct { EscapedTags []string `json:"escaped_tags,omitempty"` Type string `json:"type,omitempty"` @@ -92,3 +92,13 @@ func NewHtmlStripCharFilter() *HtmlStripCharFilter { return r } + +// true + +type HtmlStripCharFilterVariant interface { + HtmlStripCharFilterCaster() *HtmlStripCharFilter +} + +func (s *HtmlStripCharFilter) HtmlStripCharFilterCaster() *HtmlStripCharFilter { + return s +} diff --git a/typedapi/types/htmlstripprocessor.go b/typedapi/types/htmlstripprocessor.go index 14fb37c953..268053e468 100644 --- a/typedapi/types/htmlstripprocessor.go +++ b/typedapi/types/htmlstripprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HtmlStripProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L969-L985 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1010-L1026 type HtmlStripProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type HtmlStripProcessor struct { // Field The string-valued field to remove HTML tags from. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly @@ -88,16 +88,9 @@ func (s *HtmlStripProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -160,3 +153,13 @@ func NewHtmlStripProcessor() *HtmlStripProcessor { return r } + +// true + +type HtmlStripProcessorVariant interface { + HtmlStripProcessorCaster() *HtmlStripProcessor +} + +func (s *HtmlStripProcessor) HtmlStripProcessorCaster() *HtmlStripProcessor { + return s +} diff --git a/typedapi/types/http.go b/typedapi/types/http.go index 63cdacb4f1..bee00a2aff 100644 --- a/typedapi/types/http.go +++ b/typedapi/types/http.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Http type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L669-L688 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L669-L688 type Http struct { // Clients Information on current and recently-closed HTTP client connections. // Clients that have been closed longer than the @@ -113,8 +113,10 @@ func (s *Http) UnmarshalJSON(data []byte) error { // NewHttp returns a Http. func NewHttp() *Http { r := &Http{ - Routes: make(map[string]HttpRoute, 0), + Routes: make(map[string]HttpRoute), } return r } + +// false diff --git a/typedapi/types/httpemailattachment.go b/typedapi/types/httpemailattachment.go index 9f59526989..c004b60b2f 100644 --- a/typedapi/types/httpemailattachment.go +++ b/typedapi/types/httpemailattachment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HttpEmailAttachment type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L218-L222 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L218-L222 type HttpEmailAttachment struct { ContentType *string `json:"content_type,omitempty"` Inline *bool `json:"inline,omitempty"` @@ -95,3 +95,13 @@ func NewHttpEmailAttachment() *HttpEmailAttachment { return r } + +// true + +type HttpEmailAttachmentVariant interface { + HttpEmailAttachmentCaster() *HttpEmailAttachment +} + +func (s *HttpEmailAttachment) HttpEmailAttachmentCaster() *HttpEmailAttachment { + return s +} diff --git a/typedapi/types/httpheaders.go b/typedapi/types/httpheaders.go index 18f13f8a4c..2c294ee02f 100644 --- a/typedapi/types/httpheaders.go +++ b/typedapi/types/httpheaders.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // HttpHeaders type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L159-L159 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L155-L155 type HttpHeaders map[string][]string + +type HttpHeadersVariant interface { + HttpHeadersCaster() *HttpHeaders +} diff --git a/typedapi/types/httpinput.go b/typedapi/types/httpinput.go index 3034139d9b..215efb7ed7 100644 --- a/typedapi/types/httpinput.go +++ b/typedapi/types/httpinput.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // HttpInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Input.ts#L44-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Input.ts#L44-L48 type HttpInput struct { Extract []string `json:"extract,omitempty"` Request *HttpInputRequestDefinition `json:"request,omitempty"` @@ -39,3 +39,13 @@ func NewHttpInput() *HttpInput { return r } + +// true + +type HttpInputVariant interface { + HttpInputCaster() *HttpInput +} + +func (s *HttpInput) HttpInputCaster() *HttpInput { + return s +} diff --git a/typedapi/types/httpinputauthentication.go b/typedapi/types/httpinputauthentication.go index 47d37696c0..6af29787e7 100644 --- a/typedapi/types/httpinputauthentication.go +++ b/typedapi/types/httpinputauthentication.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // HttpInputAuthentication type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Input.ts#L50-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Input.ts#L50-L52 type HttpInputAuthentication struct { Basic HttpInputBasicAuthentication `json:"basic"` } @@ -33,3 +33,13 @@ func NewHttpInputAuthentication() *HttpInputAuthentication { return r } + +// true + +type HttpInputAuthenticationVariant interface { + HttpInputAuthenticationCaster() *HttpInputAuthentication +} + +func (s *HttpInputAuthentication) HttpInputAuthenticationCaster() *HttpInputAuthentication { + return s +} diff --git a/typedapi/types/httpinputbasicauthentication.go b/typedapi/types/httpinputbasicauthentication.go index f91742e690..88b117ef46 100644 --- a/typedapi/types/httpinputbasicauthentication.go +++ b/typedapi/types/httpinputbasicauthentication.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // HttpInputBasicAuthentication type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Input.ts#L54-L57 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Input.ts#L54-L57 type HttpInputBasicAuthentication struct { Password string `json:"password"` Username string `json:"username"` @@ -72,3 +72,13 @@ func NewHttpInputBasicAuthentication() *HttpInputBasicAuthentication { return r } + +// true + +type HttpInputBasicAuthenticationVariant interface { + HttpInputBasicAuthenticationCaster() *HttpInputBasicAuthentication +} + +func (s *HttpInputBasicAuthentication) HttpInputBasicAuthenticationCaster() *HttpInputBasicAuthentication { + return s +} diff --git a/typedapi/types/httpinputproxy.go b/typedapi/types/httpinputproxy.go index f3ccd0772b..c8b923662c 100644 --- a/typedapi/types/httpinputproxy.go +++ b/typedapi/types/httpinputproxy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // HttpInputProxy type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Input.ts#L67-L70 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Input.ts#L67-L70 type HttpInputProxy struct { Host string `json:"host"` Port uint `json:"port"` @@ -72,3 +72,13 @@ func NewHttpInputProxy() *HttpInputProxy { return r } + +// true + +type HttpInputProxyVariant interface { + HttpInputProxyCaster() *HttpInputProxy +} + +func (s *HttpInputProxy) HttpInputProxyCaster() *HttpInputProxy { + return s +} diff --git a/typedapi/types/httpinputrequestdefinition.go b/typedapi/types/httpinputrequestdefinition.go index 9ef214d4b2..a1c2d83607 100644 --- a/typedapi/types/httpinputrequestdefinition.go +++ b/typedapi/types/httpinputrequestdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // HttpInputRequestDefinition type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Input.ts#L72-L86 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Input.ts#L72-L86 type HttpInputRequestDefinition struct { Auth *HttpInputAuthentication `json:"auth,omitempty"` Body *string `json:"body,omitempty"` @@ -166,9 +166,19 @@ func (s *HttpInputRequestDefinition) UnmarshalJSON(data []byte) error { // NewHttpInputRequestDefinition returns a HttpInputRequestDefinition. func NewHttpInputRequestDefinition() *HttpInputRequestDefinition { r := &HttpInputRequestDefinition{ - Headers: make(map[string]string, 0), - Params: make(map[string]string, 0), + Headers: make(map[string]string), + Params: make(map[string]string), } return r } + +// true + +type HttpInputRequestDefinitionVariant interface { + HttpInputRequestDefinitionCaster() *HttpInputRequestDefinition +} + +func (s *HttpInputRequestDefinition) HttpInputRequestDefinitionCaster() *HttpInputRequestDefinition { + return s +} diff --git a/typedapi/types/httpinputrequestresult.go b/typedapi/types/httpinputrequestresult.go index a574e20e23..0c0db22075 100644 --- a/typedapi/types/httpinputrequestresult.go +++ b/typedapi/types/httpinputrequestresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // HttpInputRequestResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L300-L300 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L300-L300 type HttpInputRequestResult struct { Auth *HttpInputAuthentication `json:"auth,omitempty"` Body *string `json:"body,omitempty"` @@ -166,9 +166,11 @@ func (s *HttpInputRequestResult) UnmarshalJSON(data []byte) error { // NewHttpInputRequestResult returns a HttpInputRequestResult. func NewHttpInputRequestResult() *HttpInputRequestResult { r := &HttpInputRequestResult{ - Headers: make(map[string]string, 0), - Params: make(map[string]string, 0), + Headers: make(map[string]string), + Params: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/httpinputresponseresult.go b/typedapi/types/httpinputresponseresult.go index 887c9c0fb7..b4734616b9 100644 --- a/typedapi/types/httpinputresponseresult.go +++ b/typedapi/types/httpinputresponseresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HttpInputResponseResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L302-L306 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L302-L306 type HttpInputResponseResult struct { Body string `json:"body"` Headers HttpHeaders `json:"headers"` @@ -97,3 +97,5 @@ func NewHttpInputResponseResult() *HttpInputResponseResult { return r } + +// false diff --git a/typedapi/types/httproute.go b/typedapi/types/httproute.go index b1001cd067..5b17ee6ba9 100644 --- a/typedapi/types/httproute.go +++ b/typedapi/types/httproute.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // HttpRoute type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L690-L693 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L690-L693 type HttpRoute struct { Requests HttpRouteRequests `json:"requests"` Responses HttpRouteResponses `json:"responses"` @@ -34,3 +34,5 @@ func NewHttpRoute() *HttpRoute { return r } + +// false diff --git a/typedapi/types/httprouterequests.go b/typedapi/types/httprouterequests.go index 50ccf679f4..215a6d7fcf 100644 --- a/typedapi/types/httprouterequests.go +++ b/typedapi/types/httprouterequests.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HttpRouteRequests type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L695-L699 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L695-L699 type HttpRouteRequests struct { Count int64 `json:"count"` SizeHistogram []SizeHttpHistogram `json:"size_histogram"` @@ -99,3 +99,5 @@ func NewHttpRouteRequests() *HttpRouteRequests { return r } + +// false diff --git a/typedapi/types/httprouteresponses.go b/typedapi/types/httprouteresponses.go index 1c5d373e63..47a81fb806 100644 --- a/typedapi/types/httprouteresponses.go +++ b/typedapi/types/httprouteresponses.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HttpRouteResponses type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L701-L706 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L701-L706 type HttpRouteResponses struct { Count int64 `json:"count"` HandlingTimeHistogram []TimeHttpHistogram `json:"handling_time_histogram"` @@ -105,3 +105,5 @@ func NewHttpRouteResponses() *HttpRouteResponses { return r } + +// false diff --git a/typedapi/types/hungariananalyzer.go b/typedapi/types/hungariananalyzer.go index 40772e382d..d1ca999aa4 100644 --- a/typedapi/types/hungariananalyzer.go +++ b/typedapi/types/hungariananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HungarianAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L195-L200 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L206-L211 type HungarianAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewHungarianAnalyzer() *HungarianAnalyzer { return r } + +// true + +type HungarianAnalyzerVariant interface { + HungarianAnalyzerCaster() *HungarianAnalyzer +} + +func (s *HungarianAnalyzer) HungarianAnalyzerCaster() *HungarianAnalyzer { + return s +} diff --git a/typedapi/types/hunspelltokenfilter.go b/typedapi/types/hunspelltokenfilter.go index e5b53ce54e..87c2a86b0f 100644 --- a/typedapi/types/hunspelltokenfilter.go +++ b/typedapi/types/hunspelltokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HunspellTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L201-L207 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L201-L207 type HunspellTokenFilter struct { Dedup *bool `json:"dedup,omitempty"` Dictionary *string `json:"dictionary,omitempty"` @@ -146,3 +146,13 @@ func NewHunspellTokenFilter() *HunspellTokenFilter { return r } + +// true + +type HunspellTokenFilterVariant interface { + HunspellTokenFilterCaster() *HunspellTokenFilter +} + +func (s *HunspellTokenFilter) HunspellTokenFilterCaster() *HunspellTokenFilter { + return s +} diff --git a/typedapi/types/hyperparameter.go b/typedapi/types/hyperparameter.go index 22e762228e..1273d4db2d 100644 --- a/typedapi/types/hyperparameter.go +++ b/typedapi/types/hyperparameter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Hyperparameter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L216-L230 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L273-L287 type Hyperparameter struct { // AbsoluteImportance A positive number showing how much the parameter influences the variation of // the loss function. For hyperparameters with values that are not specified by @@ -144,3 +144,5 @@ func NewHyperparameter() *Hyperparameter { return r } + +// false diff --git a/typedapi/types/hyperparameters.go b/typedapi/types/hyperparameters.go index a68c0e0c5d..278ce48be9 100644 --- a/typedapi/types/hyperparameters.go +++ b/typedapi/types/hyperparameters.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Hyperparameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L418-L524 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L420-L526 type Hyperparameters struct { // Alpha Advanced configuration option. // Machine learning uses loss guided tree growing, which means that the decision @@ -382,3 +382,5 @@ func NewHyperparameters() *Hyperparameters { return r } + +// false diff --git a/typedapi/types/hyphenationdecompoundertokenfilter.go b/typedapi/types/hyphenationdecompoundertokenfilter.go index 2adc628d31..a97b83c9e3 100644 --- a/typedapi/types/hyphenationdecompoundertokenfilter.go +++ b/typedapi/types/hyphenationdecompoundertokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // HyphenationDecompounderTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L57-L59 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L57-L59 type HyphenationDecompounderTokenFilter struct { HyphenationPatternsPath *string `json:"hyphenation_patterns_path,omitempty"` MaxSubwordSize *int `json:"max_subword_size,omitempty"` @@ -191,3 +191,13 @@ func NewHyphenationDecompounderTokenFilter() *HyphenationDecompounderTokenFilter return r } + +// true + +type HyphenationDecompounderTokenFilterVariant interface { + HyphenationDecompounderTokenFilterCaster() *HyphenationDecompounderTokenFilter +} + +func (s *HyphenationDecompounderTokenFilter) HyphenationDecompounderTokenFilterCaster() *HyphenationDecompounderTokenFilter { + return s +} diff --git a/typedapi/types/icuanalyzer.go b/typedapi/types/icuanalyzer.go index 591586c8e2..5f6d9c7047 100644 --- a/typedapi/types/icuanalyzer.go +++ b/typedapi/types/icuanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,7 +29,7 @@ import ( // IcuAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/icu-plugin.ts#L67-L71 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/icu-plugin.ts#L67-L71 type IcuAnalyzer struct { Method icunormalizationtype.IcuNormalizationType `json:"method"` Mode icunormalizationmode.IcuNormalizationMode `json:"mode"` @@ -56,3 +56,13 @@ func NewIcuAnalyzer() *IcuAnalyzer { return r } + +// true + +type IcuAnalyzerVariant interface { + IcuAnalyzerCaster() *IcuAnalyzer +} + +func (s *IcuAnalyzer) IcuAnalyzerCaster() *IcuAnalyzer { + return s +} diff --git a/typedapi/types/icucollationproperty.go b/typedapi/types/icucollationproperty.go index 7ef49d242f..cebb4d1c1b 100644 --- a/typedapi/types/icucollationproperty.go +++ b/typedapi/types/icucollationproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,11 +34,12 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationdecomposition" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationstrength" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // IcuCollationProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/specialized.ts#L94-L118 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/specialized.ts#L103-L127 type IcuCollationProperty struct { Alternate *icucollationalternate.IcuCollationAlternate `json:"alternate,omitempty"` CaseFirst *icucollationcasefirst.IcuCollationCaseFirst `json:"case_first,omitempty"` @@ -60,15 +61,16 @@ type IcuCollationProperty struct { Norms *bool `json:"norms,omitempty"` // NullValue Accepts a string value which is substituted for any explicit null values. // Defaults to null, which means the field is treated as missing. - NullValue *string `json:"null_value,omitempty"` - Numeric *bool `json:"numeric,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Rules *string `json:"rules,omitempty"` - Store *bool `json:"store,omitempty"` - Strength *icucollationstrength.IcuCollationStrength `json:"strength,omitempty"` - Type string `json:"type,omitempty"` - VariableTop *string `json:"variable_top,omitempty"` - Variant *string `json:"variant,omitempty"` + NullValue *string `json:"null_value,omitempty"` + Numeric *bool `json:"numeric,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Rules *string `json:"rules,omitempty"` + Store *bool `json:"store,omitempty"` + Strength *icucollationstrength.IcuCollationStrength `json:"strength,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` + VariableTop *string `json:"variable_top,omitempty"` + Variant *string `json:"variant,omitempty"` } func (s *IcuCollationProperty) UnmarshalJSON(data []byte) error { @@ -181,301 +183,313 @@ func (s *IcuCollationProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -609,301 +623,313 @@ func (s *IcuCollationProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -940,6 +966,11 @@ func (s *IcuCollationProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Strength", err) } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -1000,6 +1031,7 @@ func (s IcuCollationProperty) MarshalJSON() ([]byte, error) { Rules: s.Rules, Store: s.Store, Strength: s.Strength, + SyntheticSourceKeep: s.SyntheticSourceKeep, Type: s.Type, VariableTop: s.VariableTop, Variant: s.Variant, @@ -1013,10 +1045,20 @@ func (s IcuCollationProperty) MarshalJSON() ([]byte, error) { // NewIcuCollationProperty returns a IcuCollationProperty. func NewIcuCollationProperty() *IcuCollationProperty { r := &IcuCollationProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type IcuCollationPropertyVariant interface { + IcuCollationPropertyCaster() *IcuCollationProperty +} + +func (s *IcuCollationProperty) IcuCollationPropertyCaster() *IcuCollationProperty { + return s +} diff --git a/typedapi/types/icucollationtokenfilter.go b/typedapi/types/icucollationtokenfilter.go index a112bbee0a..e0f52de21a 100644 --- a/typedapi/types/icucollationtokenfilter.go +++ b/typedapi/types/icucollationtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -36,7 +36,7 @@ import ( // IcuCollationTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/icu-plugin.ts#L51-L65 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/icu-plugin.ts#L51-L65 type IcuCollationTokenFilter struct { Alternate *icucollationalternate.IcuCollationAlternate `json:"alternate,omitempty"` CaseFirst *icucollationcasefirst.IcuCollationCaseFirst `json:"case_first,omitempty"` @@ -237,3 +237,13 @@ func NewIcuCollationTokenFilter() *IcuCollationTokenFilter { return r } + +// true + +type IcuCollationTokenFilterVariant interface { + IcuCollationTokenFilterCaster() *IcuCollationTokenFilter +} + +func (s *IcuCollationTokenFilter) IcuCollationTokenFilterCaster() *IcuCollationTokenFilter { + return s +} diff --git a/typedapi/types/icufoldingtokenfilter.go b/typedapi/types/icufoldingtokenfilter.go index 0a12401cd6..dcce8b7188 100644 --- a/typedapi/types/icufoldingtokenfilter.go +++ b/typedapi/types/icufoldingtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IcuFoldingTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/icu-plugin.ts#L46-L49 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/icu-plugin.ts#L46-L49 type IcuFoldingTokenFilter struct { Type string `json:"type,omitempty"` UnicodeSetFilter string `json:"unicode_set_filter"` @@ -100,3 +100,13 @@ func NewIcuFoldingTokenFilter() *IcuFoldingTokenFilter { return r } + +// true + +type IcuFoldingTokenFilterVariant interface { + IcuFoldingTokenFilterCaster() *IcuFoldingTokenFilter +} + +func (s *IcuFoldingTokenFilter) IcuFoldingTokenFilterCaster() *IcuFoldingTokenFilter { + return s +} diff --git a/typedapi/types/icunormalizationcharfilter.go b/typedapi/types/icunormalizationcharfilter.go index 3ccf46b832..ae19a2a788 100644 --- a/typedapi/types/icunormalizationcharfilter.go +++ b/typedapi/types/icunormalizationcharfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // IcuNormalizationCharFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/icu-plugin.ts#L40-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/icu-plugin.ts#L40-L44 type IcuNormalizationCharFilter struct { Mode *icunormalizationmode.IcuNormalizationMode `json:"mode,omitempty"` Name *icunormalizationtype.IcuNormalizationType `json:"name,omitempty"` @@ -102,3 +102,13 @@ func NewIcuNormalizationCharFilter() *IcuNormalizationCharFilter { return r } + +// true + +type IcuNormalizationCharFilterVariant interface { + IcuNormalizationCharFilterCaster() *IcuNormalizationCharFilter +} + +func (s *IcuNormalizationCharFilter) IcuNormalizationCharFilterCaster() *IcuNormalizationCharFilter { + return s +} diff --git a/typedapi/types/icunormalizationtokenfilter.go b/typedapi/types/icunormalizationtokenfilter.go index bfbab53f29..14badf5584 100644 --- a/typedapi/types/icunormalizationtokenfilter.go +++ b/typedapi/types/icunormalizationtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // IcuNormalizationTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/icu-plugin.ts#L35-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/icu-plugin.ts#L35-L38 type IcuNormalizationTokenFilter struct { Name icunormalizationtype.IcuNormalizationType `json:"name"` Type string `json:"type,omitempty"` @@ -94,3 +94,13 @@ func NewIcuNormalizationTokenFilter() *IcuNormalizationTokenFilter { return r } + +// true + +type IcuNormalizationTokenFilterVariant interface { + IcuNormalizationTokenFilterCaster() *IcuNormalizationTokenFilter +} + +func (s *IcuNormalizationTokenFilter) IcuNormalizationTokenFilterCaster() *IcuNormalizationTokenFilter { + return s +} diff --git a/typedapi/types/icutokenizer.go b/typedapi/types/icutokenizer.go index 3af71d4a31..ce76659a66 100644 --- a/typedapi/types/icutokenizer.go +++ b/typedapi/types/icutokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IcuTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/icu-plugin.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/icu-plugin.ts#L30-L33 type IcuTokenizer struct { RuleFiles string `json:"rule_files"` Type string `json:"type,omitempty"` @@ -100,3 +100,13 @@ func NewIcuTokenizer() *IcuTokenizer { return r } + +// true + +type IcuTokenizerVariant interface { + IcuTokenizerCaster() *IcuTokenizer +} + +func (s *IcuTokenizer) IcuTokenizerCaster() *IcuTokenizer { + return s +} diff --git a/typedapi/types/icutransformtokenfilter.go b/typedapi/types/icutransformtokenfilter.go index a473c21225..78df2a9684 100644 --- a/typedapi/types/icutransformtokenfilter.go +++ b/typedapi/types/icutransformtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // IcuTransformTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/icu-plugin.ts#L24-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/icu-plugin.ts#L24-L28 type IcuTransformTokenFilter struct { Dir *icutransformdirection.IcuTransformDirection `json:"dir,omitempty"` Id string `json:"id"` @@ -109,3 +109,13 @@ func NewIcuTransformTokenFilter() *IcuTransformTokenFilter { return r } + +// true + +type IcuTransformTokenFilterVariant interface { + IcuTransformTokenFilterCaster() *IcuTransformTokenFilter +} + +func (s *IcuTransformTokenFilter) IcuTransformTokenFilterCaster() *IcuTransformTokenFilter { + return s +} diff --git a/typedapi/types/ids.go b/typedapi/types/ids.go index dbe4bc9e7a..7a6877c4a4 100644 --- a/typedapi/types/ids.go +++ b/typedapi/types/ids.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Ids type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L62-L62 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L56-L56 type Ids []string + +type IdsVariant interface { + IdsCaster() *Ids +} diff --git a/typedapi/types/idsquery.go b/typedapi/types/idsquery.go index db53d8dba2..af394acc81 100644 --- a/typedapi/types/idsquery.go +++ b/typedapi/types/idsquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IdsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L88-L96 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L88-L96 type IdsQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -114,3 +114,13 @@ func NewIdsQuery() *IdsQuery { return r } + +// true + +type IdsQueryVariant interface { + IdsQueryCaster() *IdsQuery +} + +func (s *IdsQuery) IdsQueryCaster() *IdsQuery { + return s +} diff --git a/typedapi/types/ilm.go b/typedapi/types/ilm.go index d654585f4e..7506e3eb03 100644 --- a/typedapi/types/ilm.go +++ b/typedapi/types/ilm.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Ilm type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L160-L163 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L172-L175 type Ilm struct { PolicyCount int `json:"policy_count"` PolicyStats []IlmPolicyStatistics `json:"policy_stats"` @@ -84,3 +84,5 @@ func NewIlm() *Ilm { return r } + +// false diff --git a/typedapi/types/ilmactions.go b/typedapi/types/ilmactions.go index 63a996333d..cc29d7d17b 100644 --- a/typedapi/types/ilmactions.go +++ b/typedapi/types/ilmactions.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // IlmActions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/_types/Phase.ts#L42-L96 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/_types/Phase.ts#L39-L93 type IlmActions struct { // Allocate Phases allowed: warm, cold. Allocate *AllocateAction `json:"allocate,omitempty"` @@ -58,3 +58,13 @@ func NewIlmActions() *IlmActions { return r } + +// true + +type IlmActionsVariant interface { + IlmActionsCaster() *IlmActions +} + +func (s *IlmActions) IlmActionsCaster() *IlmActions { + return s +} diff --git a/typedapi/types/ilmindicator.go b/typedapi/types/ilmindicator.go index 3058310d13..43e278abd4 100644 --- a/typedapi/types/ilmindicator.go +++ b/typedapi/types/ilmindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // IlmIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L163-L167 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L164-L168 type IlmIndicator struct { Details *IlmIndicatorDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` @@ -100,3 +100,5 @@ func NewIlmIndicator() *IlmIndicator { return r } + +// false diff --git a/typedapi/types/ilmindicatordetails.go b/typedapi/types/ilmindicatordetails.go index 0aa666bcef..381797e74b 100644 --- a/typedapi/types/ilmindicatordetails.go +++ b/typedapi/types/ilmindicatordetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // IlmIndicatorDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L168-L172 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L169-L173 type IlmIndicatorDetails struct { IlmStatus lifecycleoperationmode.LifecycleOperationMode `json:"ilm_status"` Policies int64 `json:"policies"` @@ -102,3 +102,5 @@ func NewIlmIndicatorDetails() *IlmIndicatorDetails { return r } + +// false diff --git a/typedapi/types/ilmpolicy.go b/typedapi/types/ilmpolicy.go index c8c1b99f55..717210072d 100644 --- a/typedapi/types/ilmpolicy.go +++ b/typedapi/types/ilmpolicy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,8 +30,10 @@ import ( // IlmPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/_types/Policy.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/_types/Policy.ts#L23-L29 type IlmPolicy struct { + // Meta_ Arbitrary metadata that is not automatically generated or used by + // Elasticsearch. Meta_ Metadata `json:"_meta,omitempty"` Phases Phases `json:"phases"` } @@ -72,3 +74,13 @@ func NewIlmPolicy() *IlmPolicy { return r } + +// true + +type IlmPolicyVariant interface { + IlmPolicyCaster() *IlmPolicy +} + +func (s *IlmPolicy) IlmPolicyCaster() *IlmPolicy { + return s +} diff --git a/typedapi/types/ilmpolicystatistics.go b/typedapi/types/ilmpolicystatistics.go index 5fa7d03c4d..35082731d3 100644 --- a/typedapi/types/ilmpolicystatistics.go +++ b/typedapi/types/ilmpolicystatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,10 +31,10 @@ import ( // IlmPolicyStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L155-L158 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L167-L170 type IlmPolicyStatistics struct { - IndicesManaged int `json:"indices_managed"` - Phases Phases `json:"phases"` + IndicesManaged int `json:"indices_managed"` + Phases UsagePhases `json:"phases"` } func (s *IlmPolicyStatistics) UnmarshalJSON(data []byte) error { @@ -84,3 +84,5 @@ func NewIlmPolicyStatistics() *IlmPolicyStatistics { return r } + +// false diff --git a/typedapi/types/impact.go b/typedapi/types/impact.go index fb9075c208..a9bcd4983f 100644 --- a/typedapi/types/impact.go +++ b/typedapi/types/impact.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // Impact type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L66-L71 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L67-L72 type Impact struct { Description string `json:"description"` Id string `json:"id"` @@ -112,3 +112,5 @@ func NewImpact() *Impact { return r } + +// false diff --git a/typedapi/types/includedinvalidation.go b/typedapi/types/includedinvalidation.go index 73ea969aac..439db973be 100644 --- a/typedapi/types/includedinvalidation.go +++ b/typedapi/types/includedinvalidation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // IncludedInValidation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L73-L76 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L73-L76 type IncludedInValidation struct { Constraint []ScalarValue `json:"constraint"` Type string `json:"type,omitempty"` @@ -51,3 +51,13 @@ func NewIncludedInValidation() *IncludedInValidation { return r } + +// true + +type IncludedInValidationVariant interface { + IncludedInValidationCaster() *IncludedInValidation +} + +func (s *IncludedInValidation) IncludedInValidationCaster() *IncludedInValidation { + return s +} diff --git a/typedapi/types/indexaction.go b/typedapi/types/indexaction.go index d2d94612f6..f76422ceb5 100644 --- a/typedapi/types/indexaction.go +++ b/typedapi/types/indexaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // IndexAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L256-L265 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L256-L265 type IndexAction struct { DocId *string `json:"doc_id,omitempty"` ExecutionTimeField *string `json:"execution_time_field,omitempty"` @@ -99,3 +99,13 @@ func NewIndexAction() *IndexAction { return r } + +// true + +type IndexActionVariant interface { + IndexActionCaster() *IndexAction +} + +func (s *IndexAction) IndexActionCaster() *IndexAction { + return s +} diff --git a/typedapi/types/indexaliases.go b/typedapi/types/indexaliases.go index 9bfa787a8f..9883e218a7 100644 --- a/typedapi/types/indexaliases.go +++ b/typedapi/types/indexaliases.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // IndexAliases type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/get_alias/IndicesGetAliasResponse.ts#L37-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_alias/IndicesGetAliasResponse.ts#L37-L39 type IndexAliases struct { Aliases map[string]AliasDefinition `json:"aliases"` } @@ -30,8 +30,10 @@ type IndexAliases struct { // NewIndexAliases returns a IndexAliases. func NewIndexAliases() *IndexAliases { r := &IndexAliases{ - Aliases: make(map[string]AliasDefinition, 0), + Aliases: make(map[string]AliasDefinition), } return r } + +// false diff --git a/typedapi/types/indexanddatastreamaction.go b/typedapi/types/indexanddatastreamaction.go index 67292ee07c..b47f8718ba 100644 --- a/typedapi/types/indexanddatastreamaction.go +++ b/typedapi/types/indexanddatastreamaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // IndexAndDataStreamAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/modify_data_stream/types.ts#L39-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/modify_data_stream/types.ts#L39-L44 type IndexAndDataStreamAction struct { // DataStream Data stream targeted by the action. DataStream string `json:"data_stream"` @@ -74,3 +74,13 @@ func NewIndexAndDataStreamAction() *IndexAndDataStreamAction { return r } + +// true + +type IndexAndDataStreamActionVariant interface { + IndexAndDataStreamActionCaster() *IndexAndDataStreamAction +} + +func (s *IndexAndDataStreamAction) IndexAndDataStreamActionCaster() *IndexAndDataStreamAction { + return s +} diff --git a/typedapi/types/indexcapabilities.go b/typedapi/types/indexcapabilities.go index b72a20beb4..e568af9e4a 100644 --- a/typedapi/types/indexcapabilities.go +++ b/typedapi/types/indexcapabilities.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // IndexCapabilities type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/get_rollup_index_caps/types.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/get_rollup_index_caps/types.ts#L24-L26 type IndexCapabilities struct { RollupJobs []RollupJobSummary `json:"rollup_jobs"` } @@ -33,3 +33,5 @@ func NewIndexCapabilities() *IndexCapabilities { return r } + +// false diff --git a/typedapi/types/indexdetails.go b/typedapi/types/indexdetails.go index e5b0317644..cbad79f433 100644 --- a/typedapi/types/indexdetails.go +++ b/typedapi/types/indexdetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndexDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotIndexDetails.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotIndexDetails.ts#L23-L28 type IndexDetails struct { MaxSegmentsPerShard int64 `json:"max_segments_per_shard"` ShardCount int `json:"shard_count"` @@ -116,3 +116,5 @@ func NewIndexDetails() *IndexDetails { return r } + +// false diff --git a/typedapi/types/indexfield.go b/typedapi/types/indexfield.go index 662d45bf2a..5eeb9aa22f 100644 --- a/typedapi/types/indexfield.go +++ b/typedapi/types/indexfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndexField type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/meta-fields.ts#L46-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/meta-fields.ts#L46-L48 type IndexField struct { Enabled bool `json:"enabled"` } @@ -76,3 +76,13 @@ func NewIndexField() *IndexField { return r } + +// true + +type IndexFieldVariant interface { + IndexFieldCaster() *IndexField +} + +func (s *IndexField) IndexFieldCaster() *IndexField { + return s +} diff --git a/typedapi/types/indexhealthstats.go b/typedapi/types/indexhealthstats.go index b3c221038a..4fa31a604f 100644 --- a/typedapi/types/indexhealthstats.go +++ b/typedapi/types/indexhealthstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // IndexHealthStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/health/types.ts#L24-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/health/types.ts#L24-L35 type IndexHealthStats struct { ActivePrimaryShards int `json:"active_primary_shards"` ActiveShards int `json:"active_shards"` @@ -211,8 +211,10 @@ func (s *IndexHealthStats) UnmarshalJSON(data []byte) error { // NewIndexHealthStats returns a IndexHealthStats. func NewIndexHealthStats() *IndexHealthStats { r := &IndexHealthStats{ - Shards: make(map[string]ShardHealthStats, 0), + Shards: make(map[string]ShardHealthStats), } return r } + +// false diff --git a/typedapi/types/indexingpressurememorysummary.go b/typedapi/types/indexingpressurememorysummary.go index eb16ae26a1..f484ade0a1 100644 --- a/typedapi/types/indexingpressurememorysummary.go +++ b/typedapi/types/indexingpressurememorysummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndexingPressureMemorySummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L580-L589 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L580-L589 type IndexingPressureMemorySummary struct { AllInBytes int64 `json:"all_in_bytes"` CombinedCoordinatingAndPrimaryInBytes int64 `json:"combined_coordinating_and_primary_in_bytes"` @@ -189,3 +189,5 @@ func NewIndexingPressureMemorySummary() *IndexingPressureMemorySummary { return r } + +// false diff --git a/typedapi/types/indexingslowlogsettings.go b/typedapi/types/indexingslowlogsettings.go index 7b1ef4d035..1498319793 100644 --- a/typedapi/types/indexingslowlogsettings.go +++ b/typedapi/types/indexingslowlogsettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndexingSlowlogSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L563-L568 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L588-L593 type IndexingSlowlogSettings struct { Level *string `json:"level,omitempty"` Reformat *bool `json:"reformat,omitempty"` @@ -112,3 +112,13 @@ func NewIndexingSlowlogSettings() *IndexingSlowlogSettings { return r } + +// true + +type IndexingSlowlogSettingsVariant interface { + IndexingSlowlogSettingsCaster() *IndexingSlowlogSettings +} + +func (s *IndexingSlowlogSettings) IndexingSlowlogSettingsCaster() *IndexingSlowlogSettings { + return s +} diff --git a/typedapi/types/indexingslowlogtresholds.go b/typedapi/types/indexingslowlogtresholds.go index 6ad869cae7..651e366e02 100644 --- a/typedapi/types/indexingslowlogtresholds.go +++ b/typedapi/types/indexingslowlogtresholds.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // IndexingSlowlogTresholds type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L570-L577 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L595-L602 type IndexingSlowlogTresholds struct { // Index The indexing slow log, similar in functionality to the search slow log. The // log file name ends with `_index_indexing_slowlog.json`. @@ -36,3 +36,13 @@ func NewIndexingSlowlogTresholds() *IndexingSlowlogTresholds { return r } + +// true + +type IndexingSlowlogTresholdsVariant interface { + IndexingSlowlogTresholdsCaster() *IndexingSlowlogTresholds +} + +func (s *IndexingSlowlogTresholds) IndexingSlowlogTresholdsCaster() *IndexingSlowlogTresholds { + return s +} diff --git a/typedapi/types/indexingstats.go b/typedapi/types/indexingstats.go index 21b787b45c..7bce4191d2 100644 --- a/typedapi/types/indexingstats.go +++ b/typedapi/types/indexingstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndexingStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L143-L159 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L146-L162 type IndexingStats struct { DeleteCurrent int64 `json:"delete_current"` DeleteTime Duration `json:"delete_time,omitempty"` @@ -231,8 +231,10 @@ func (s *IndexingStats) UnmarshalJSON(data []byte) error { // NewIndexingStats returns a IndexingStats. func NewIndexingStats() *IndexingStats { r := &IndexingStats{ - Types: make(map[string]IndexingStats, 0), + Types: make(map[string]IndexingStats), } return r } + +// false diff --git a/typedapi/types/indexmappingrecord.go b/typedapi/types/indexmappingrecord.go index 788ef9db8e..ed271f7a99 100644 --- a/typedapi/types/indexmappingrecord.go +++ b/typedapi/types/indexmappingrecord.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // IndexMappingRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/get_mapping/IndicesGetMappingResponse.ts#L29-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_mapping/IndicesGetMappingResponse.ts#L29-L32 type IndexMappingRecord struct { Item *TypeMapping `json:"item,omitempty"` Mappings TypeMapping `json:"mappings"` @@ -34,3 +34,5 @@ func NewIndexMappingRecord() *IndexMappingRecord { return r } + +// false diff --git a/typedapi/types/indexoperation.go b/typedapi/types/indexoperation.go index 44c991a898..202b0dd3b8 100644 --- a/typedapi/types/indexoperation.go +++ b/typedapi/types/indexoperation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,30 +33,30 @@ import ( // IndexOperation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/bulk/types.ts#L132-L132 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/bulk/types.ts#L142-L142 type IndexOperation struct { // DynamicTemplates A map from the full name of fields to the name of dynamic templates. - // Defaults to an empty map. - // If a name matches a dynamic template, then that template will be applied + // It defaults to an empty map. + // If a name matches a dynamic template, that template will be applied // regardless of other match predicates defined in the template. - // If a field is already defined in the mapping, then this parameter won’t be + // If a field is already defined in the mapping, then this parameter won't be // used. DynamicTemplates map[string]string `json:"dynamic_templates,omitempty"` // Id_ The document ID. Id_ *string `json:"_id,omitempty"` IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` IfSeqNo *int64 `json:"if_seq_no,omitempty"` - // Index_ Name of the index or index alias to perform the action on. + // Index_ The name of the index or index alias to perform the action on. Index_ *string `json:"_index,omitempty"` - // Pipeline ID of the pipeline to use to preprocess incoming documents. - // If the index has a default ingest pipeline specified, then setting the value - // to `_none` disables the default ingest pipeline for this request. - // If a final pipeline is configured it will always run, regardless of the value + // Pipeline The ID of the pipeline to use to preprocess incoming documents. + // If the index has a default ingest pipeline specified, setting the value to + // `_none` turns off the default ingest pipeline for this request. + // If a final pipeline is configured, it will always run regardless of the value // of this parameter. Pipeline *string `json:"pipeline,omitempty"` - // RequireAlias If `true`, the request’s actions must target an index alias. + // RequireAlias If `true`, the request's actions must target an index alias. RequireAlias *bool `json:"require_alias,omitempty"` - // Routing Custom value used to route operations to a specific shard. + // Routing A custom value used to route operations to a specific shard. Routing *string `json:"routing,omitempty"` Version *int64 `json:"version,omitempty"` VersionType *versiontype.VersionType `json:"version_type,omitempty"` @@ -164,8 +164,18 @@ func (s *IndexOperation) UnmarshalJSON(data []byte) error { // NewIndexOperation returns a IndexOperation. func NewIndexOperation() *IndexOperation { r := &IndexOperation{ - DynamicTemplates: make(map[string]string, 0), + DynamicTemplates: make(map[string]string), } return r } + +// true + +type IndexOperationVariant interface { + IndexOperationCaster() *IndexOperation +} + +func (s *IndexOperation) IndexOperationCaster() *IndexOperation { + return s +} diff --git a/typedapi/types/indexprivilegescheck.go b/typedapi/types/indexprivilegescheck.go index 2e6b218b66..62b0406661 100644 --- a/typedapi/types/indexprivilegescheck.go +++ b/typedapi/types/indexprivilegescheck.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,16 +33,16 @@ import ( // IndexPrivilegesCheck type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/has_privileges/types.ts#L33-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/has_privileges/types.ts#L34-L45 type IndexPrivilegesCheck struct { - // AllowRestrictedIndices This needs to be set to true (default is false) if using wildcards or regexps - // for patterns that cover restricted indices. + // AllowRestrictedIndices This needs to be set to `true` (default is `false`) if using wildcards or + // regexps for patterns that cover restricted indices. // Implicitly, restricted indices do not match index patterns because restricted // indices usually have limited privileges and including them in pattern tests // would render most such tests false. // If restricted indices are explicitly included in the names list, privileges // will be checked against them regardless of the value of - // allow_restricted_indices. + // `allow_restricted_indices`. AllowRestrictedIndices *bool `json:"allow_restricted_indices,omitempty"` // Names A list of indices. Names []string `json:"names"` @@ -111,3 +111,13 @@ func NewIndexPrivilegesCheck() *IndexPrivilegesCheck { return r } + +// true + +type IndexPrivilegesCheckVariant interface { + IndexPrivilegesCheckCaster() *IndexPrivilegesCheck +} + +func (s *IndexPrivilegesCheck) IndexPrivilegesCheckCaster() *IndexPrivilegesCheck { + return s +} diff --git a/typedapi/types/indexresult.go b/typedapi/types/indexresult.go index 8afa606eef..5a04f5ccec 100644 --- a/typedapi/types/indexresult.go +++ b/typedapi/types/indexresult.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // IndexResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L267-L269 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L267-L269 type IndexResult struct { Response IndexResultSummary `json:"response"` } @@ -33,3 +33,5 @@ func NewIndexResult() *IndexResult { return r } + +// false diff --git a/typedapi/types/indexresultsummary.go b/typedapi/types/indexresultsummary.go index 953c301e29..85d37d246b 100644 --- a/typedapi/types/indexresultsummary.go +++ b/typedapi/types/indexresultsummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // IndexResultSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L271-L277 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L271-L277 type IndexResultSummary struct { Created bool `json:"created"` Id string `json:"id"` @@ -102,3 +102,5 @@ func NewIndexResultSummary() *IndexResultSummary { return r } + +// false diff --git a/typedapi/types/indexrouting.go b/typedapi/types/indexrouting.go index 0c48f56e86..5a5cf7d948 100644 --- a/typedapi/types/indexrouting.go +++ b/typedapi/types/indexrouting.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // IndexRouting type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexRouting.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexRouting.ts#L22-L25 type IndexRouting struct { Allocation *IndexRoutingAllocation `json:"allocation,omitempty"` Rebalance *IndexRoutingRebalance `json:"rebalance,omitempty"` @@ -34,3 +34,13 @@ func NewIndexRouting() *IndexRouting { return r } + +// true + +type IndexRoutingVariant interface { + IndexRoutingCaster() *IndexRouting +} + +func (s *IndexRouting) IndexRoutingCaster() *IndexRouting { + return s +} diff --git a/typedapi/types/indexroutingallocation.go b/typedapi/types/indexroutingallocation.go index e2323d0050..889c8da0e8 100644 --- a/typedapi/types/indexroutingallocation.go +++ b/typedapi/types/indexroutingallocation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // IndexRoutingAllocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexRouting.ts#L27-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexRouting.ts#L27-L32 type IndexRoutingAllocation struct { Disk *IndexRoutingAllocationDisk `json:"disk,omitempty"` Enable *indexroutingallocationoptions.IndexRoutingAllocationOptions `json:"enable,omitempty"` @@ -40,3 +40,13 @@ func NewIndexRoutingAllocation() *IndexRoutingAllocation { return r } + +// true + +type IndexRoutingAllocationVariant interface { + IndexRoutingAllocationCaster() *IndexRoutingAllocation +} + +func (s *IndexRoutingAllocation) IndexRoutingAllocationCaster() *IndexRoutingAllocation { + return s +} diff --git a/typedapi/types/indexroutingallocationdisk.go b/typedapi/types/indexroutingallocationdisk.go index ab0c71bdbc..7eb3529c52 100644 --- a/typedapi/types/indexroutingallocationdisk.go +++ b/typedapi/types/indexroutingallocationdisk.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndexRoutingAllocationDisk type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexRouting.ts#L62-L64 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexRouting.ts#L62-L64 type IndexRoutingAllocationDisk struct { ThresholdEnabled string `json:"threshold_enabled,omitempty"` } @@ -74,3 +74,13 @@ func NewIndexRoutingAllocationDisk() *IndexRoutingAllocationDisk { return r } + +// true + +type IndexRoutingAllocationDiskVariant interface { + IndexRoutingAllocationDiskCaster() *IndexRoutingAllocationDisk +} + +func (s *IndexRoutingAllocationDisk) IndexRoutingAllocationDiskCaster() *IndexRoutingAllocationDisk { + return s +} diff --git a/typedapi/types/indexroutingallocationinclude.go b/typedapi/types/indexroutingallocationinclude.go index ce89c1ea6a..8097d17b7b 100644 --- a/typedapi/types/indexroutingallocationinclude.go +++ b/typedapi/types/indexroutingallocationinclude.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndexRoutingAllocationInclude type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexRouting.ts#L52-L55 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexRouting.ts#L52-L55 type IndexRoutingAllocationInclude struct { Id_ *string `json:"_id,omitempty"` TierPreference_ *string `json:"_tier_preference,omitempty"` @@ -80,3 +80,13 @@ func NewIndexRoutingAllocationInclude() *IndexRoutingAllocationInclude { return r } + +// true + +type IndexRoutingAllocationIncludeVariant interface { + IndexRoutingAllocationIncludeCaster() *IndexRoutingAllocationInclude +} + +func (s *IndexRoutingAllocationInclude) IndexRoutingAllocationIncludeCaster() *IndexRoutingAllocationInclude { + return s +} diff --git a/typedapi/types/indexroutingallocationinitialrecovery.go b/typedapi/types/indexroutingallocationinitialrecovery.go index e9ba979f1e..cf51539d35 100644 --- a/typedapi/types/indexroutingallocationinitialrecovery.go +++ b/typedapi/types/indexroutingallocationinitialrecovery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // IndexRoutingAllocationInitialRecovery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexRouting.ts#L57-L59 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexRouting.ts#L57-L59 type IndexRoutingAllocationInitialRecovery struct { Id_ *string `json:"_id,omitempty"` } @@ -66,3 +66,13 @@ func NewIndexRoutingAllocationInitialRecovery() *IndexRoutingAllocationInitialRe return r } + +// true + +type IndexRoutingAllocationInitialRecoveryVariant interface { + IndexRoutingAllocationInitialRecoveryCaster() *IndexRoutingAllocationInitialRecovery +} + +func (s *IndexRoutingAllocationInitialRecovery) IndexRoutingAllocationInitialRecoveryCaster() *IndexRoutingAllocationInitialRecovery { + return s +} diff --git a/typedapi/types/indexroutingrebalance.go b/typedapi/types/indexroutingrebalance.go index 6b65223394..7150ecfee3 100644 --- a/typedapi/types/indexroutingrebalance.go +++ b/typedapi/types/indexroutingrebalance.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // IndexRoutingRebalance type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexRouting.ts#L34-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexRouting.ts#L34-L36 type IndexRoutingRebalance struct { Enable indexroutingrebalanceoptions.IndexRoutingRebalanceOptions `json:"enable"` } @@ -37,3 +37,13 @@ func NewIndexRoutingRebalance() *IndexRoutingRebalance { return r } + +// true + +type IndexRoutingRebalanceVariant interface { + IndexRoutingRebalanceCaster() *IndexRoutingRebalance +} + +func (s *IndexRoutingRebalance) IndexRoutingRebalanceCaster() *IndexRoutingRebalance { + return s +} diff --git a/typedapi/types/indexsegment.go b/typedapi/types/indexsegment.go index b262d33cbe..3f37ad287b 100644 --- a/typedapi/types/indexsegment.go +++ b/typedapi/types/indexsegment.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // IndexSegment type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/segments/types.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/segments/types.ts#L24-L26 type IndexSegment struct { Shards map[string][]ShardsSegment `json:"shards"` } @@ -30,8 +30,10 @@ type IndexSegment struct { // NewIndexSegment returns a IndexSegment. func NewIndexSegment() *IndexSegment { r := &IndexSegment{ - Shards: make(map[string][]ShardsSegment, 0), + Shards: make(map[string][]ShardsSegment), } return r } + +// false diff --git a/typedapi/types/indexsegmentsort.go b/typedapi/types/indexsegmentsort.go index 27a4b6ad96..7fe2441e14 100644 --- a/typedapi/types/indexsegmentsort.go +++ b/typedapi/types/indexsegmentsort.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // IndexSegmentSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSegmentSort.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSegmentSort.ts#L22-L27 type IndexSegmentSort struct { Field []string `json:"field,omitempty"` Missing []segmentsortmissing.SegmentSortMissing `json:"missing,omitempty"` @@ -132,3 +132,13 @@ func NewIndexSegmentSort() *IndexSegmentSort { return r } + +// true + +type IndexSegmentSortVariant interface { + IndexSegmentSortCaster() *IndexSegmentSort +} + +func (s *IndexSegmentSort) IndexSegmentSortCaster() *IndexSegmentSort { + return s +} diff --git a/typedapi/types/indexsettingblocks.go b/typedapi/types/indexsettingblocks.go index 68fc2156cf..2b257ef675 100644 --- a/typedapi/types/indexsettingblocks.go +++ b/typedapi/types/indexsettingblocks.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // IndexSettingBlocks type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L254-L260 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L262-L268 type IndexSettingBlocks struct { Metadata Stringifiedboolean `json:"metadata,omitempty"` Read Stringifiedboolean `json:"read,omitempty"` @@ -90,3 +90,13 @@ func NewIndexSettingBlocks() *IndexSettingBlocks { return r } + +// true + +type IndexSettingBlocksVariant interface { + IndexSettingBlocksCaster() *IndexSettingBlocks +} + +func (s *IndexSettingBlocks) IndexSettingBlocksCaster() *IndexSettingBlocks { + return s +} diff --git a/typedapi/types/indexsettings.go b/typedapi/types/indexsettings.go index fde283f625..1f3c3d9e3c 100644 --- a/typedapi/types/indexsettings.go +++ b/typedapi/types/indexsettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,13 +33,13 @@ import ( // IndexSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L69-L169 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L70-L176 type IndexSettings struct { Analysis *IndexSettingsAnalysis `json:"analysis,omitempty"` // Analyze Settings to define analyzers, tokenizers, token filters and character // filters. Analyze *SettingsAnalyze `json:"analyze,omitempty"` - AutoExpandReplicas *string `json:"auto_expand_replicas,omitempty"` + AutoExpandReplicas any `json:"auto_expand_replicas,omitempty"` Blocks *IndexSettingBlocks `json:"blocks,omitempty"` CheckOnStartup *indexcheckonstartup.IndexCheckOnStartup `json:"check_on_startup,omitempty"` Codec *string `json:"codec,omitempty"` @@ -128,16 +128,9 @@ func (s *IndexSettings) UnmarshalJSON(data []byte) error { } case "auto_expand_replicas": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.AutoExpandReplicas); err != nil { return fmt.Errorf("%s | %w", "AutoExpandReplicas", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.AutoExpandReplicas = &o case "blocks": if err := dec.Decode(&s.Blocks); err != nil { @@ -572,55 +565,55 @@ func (s *IndexSettings) UnmarshalJSON(data []byte) error { case "BM25": oo := NewSettingsSimilarityBm25() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Similarity | %w", err) } s.Similarity[key] = oo case "boolean": oo := NewSettingsSimilarityBoolean() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Similarity | %w", err) } s.Similarity[key] = oo case "DFI": oo := NewSettingsSimilarityDfi() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Similarity | %w", err) } s.Similarity[key] = oo case "DFR": oo := NewSettingsSimilarityDfr() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Similarity | %w", err) } s.Similarity[key] = oo case "IB": oo := NewSettingsSimilarityIb() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Similarity | %w", err) } s.Similarity[key] = oo case "LMDirichlet": oo := NewSettingsSimilarityLmd() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Similarity | %w", err) } s.Similarity[key] = oo case "LMJelinekMercer": oo := NewSettingsSimilarityLmj() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Similarity | %w", err) } s.Similarity[key] = oo case "scripted": oo := NewSettingsSimilarityScripted() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Similarity | %w", err) } s.Similarity[key] = oo default: oo := new(SettingsSimilarity) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(SettingsSimilarity) | %w", err) } s.Similarity[key] = oo } @@ -739,9 +732,19 @@ func (s IndexSettings) MarshalJSON() ([]byte, error) { // NewIndexSettings returns a IndexSettings. func NewIndexSettings() *IndexSettings { r := &IndexSettings{ - IndexSettings: make(map[string]json.RawMessage, 0), - Similarity: make(map[string]SettingsSimilarity, 0), + IndexSettings: make(map[string]json.RawMessage), + Similarity: make(map[string]SettingsSimilarity), } return r } + +// true + +type IndexSettingsVariant interface { + IndexSettingsCaster() *IndexSettings +} + +func (s *IndexSettings) IndexSettingsCaster() *IndexSettings { + return s +} diff --git a/typedapi/types/indexsettingsanalysis.go b/typedapi/types/indexsettingsanalysis.go index 9a0517df41..5f2d00c583 100644 --- a/typedapi/types/indexsettingsanalysis.go +++ b/typedapi/types/indexsettingsanalysis.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -24,12 +24,13 @@ import ( "bytes" "encoding/json" "errors" + "fmt" "io" ) // IndexSettingsAnalysis type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L319-L325 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L333-L339 type IndexSettingsAnalysis struct { Analyzer map[string]Analyzer `json:"analyzer,omitempty"` CharFilter map[string]CharFilter `json:"char_filter,omitempty"` @@ -72,307 +73,301 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { case "custom": oo := NewCustomAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "fingerprint": oo := NewFingerprintAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "keyword": oo := NewKeywordAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err - } - s.Analyzer[key] = oo - case "language": - oo := NewLanguageAnalyzer() - if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "nori": oo := NewNoriAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "pattern": oo := NewPatternAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "simple": oo := NewSimpleAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "standard": oo := NewStandardAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "stop": oo := NewStopAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "whitespace": oo := NewWhitespaceAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "icu_analyzer": oo := NewIcuAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "kuromoji": oo := NewKuromojiAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "snowball": oo := NewSnowballAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "arabic": oo := NewArabicAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "armenian": oo := NewArmenianAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "basque": oo := NewBasqueAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "bengali": oo := NewBengaliAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "brazilian": oo := NewBrazilianAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "bulgarian": oo := NewBulgarianAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "catalan": oo := NewCatalanAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "chinese": oo := NewChineseAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "cjk": oo := NewCjkAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "czech": oo := NewCzechAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "danish": oo := NewDanishAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "dutch": oo := NewDutchAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "english": oo := NewEnglishAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "estonian": oo := NewEstonianAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "finnish": oo := NewFinnishAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "french": oo := NewFrenchAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "galician": oo := NewGalicianAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "german": oo := NewGermanAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "greek": oo := NewGreekAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "hindi": oo := NewHindiAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "hungarian": oo := NewHungarianAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "indonesian": oo := NewIndonesianAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "irish": oo := NewIrishAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "italian": oo := NewItalianAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "latvian": oo := NewLatvianAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "lithuanian": oo := NewLithuanianAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "norwegian": oo := NewNorwegianAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "persian": oo := NewPersianAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "portuguese": oo := NewPortugueseAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "romanian": oo := NewRomanianAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "russian": oo := NewRussianAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "serbian": oo := NewSerbianAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "sorani": oo := NewSoraniAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "spanish": oo := NewSpanishAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "swedish": oo := NewSwedishAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "turkish": oo := NewTurkishAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "thai": oo := NewThaiAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo default: oo := new(Analyzer) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Analyzer) | %w", err) } s.Analyzer[key] = oo } @@ -395,37 +390,37 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { case "html_strip": oo := NewHtmlStripCharFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("CharFilter | %w", err) } s.CharFilter[key] = oo case "mapping": oo := NewMappingCharFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("CharFilter | %w", err) } s.CharFilter[key] = oo case "pattern_replace": oo := NewPatternReplaceCharFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("CharFilter | %w", err) } s.CharFilter[key] = oo case "icu_normalizer": oo := NewIcuNormalizationCharFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("CharFilter | %w", err) } s.CharFilter[key] = oo case "kuromoji_iteration_mark": oo := NewKuromojiIterationMarkCharFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("CharFilter | %w", err) } s.CharFilter[key] = oo default: oo := new(CharFilter) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(CharFilter) | %w", err) } s.CharFilter[key] = oo } @@ -448,289 +443,289 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { case "asciifolding": oo := NewAsciiFoldingTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "common_grams": oo := NewCommonGramsTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "condition": oo := NewConditionTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "delimited_payload": oo := NewDelimitedPayloadTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "edge_ngram": oo := NewEdgeNGramTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "elision": oo := NewElisionTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "fingerprint": oo := NewFingerprintTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "hunspell": oo := NewHunspellTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "hyphenation_decompounder": oo := NewHyphenationDecompounderTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "keep_types": oo := NewKeepTypesTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "keep": oo := NewKeepWordsTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "keyword_marker": oo := NewKeywordMarkerTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "kstem": oo := NewKStemTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "length": oo := NewLengthTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "limit": oo := NewLimitTokenCountTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "lowercase": oo := NewLowercaseTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "multiplexer": oo := NewMultiplexerTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "ngram": oo := NewNGramTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "nori_part_of_speech": oo := NewNoriPartOfSpeechTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "pattern_capture": oo := NewPatternCaptureTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "pattern_replace": oo := NewPatternReplaceTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "porter_stem": oo := NewPorterStemTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "predicate_token_filter": oo := NewPredicateTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "remove_duplicates": oo := NewRemoveDuplicatesTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "reverse": oo := NewReverseTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "shingle": oo := NewShingleTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "snowball": oo := NewSnowballTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "stemmer_override": oo := NewStemmerOverrideTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "stemmer": oo := NewStemmerTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "stop": oo := NewStopTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "synonym_graph": oo := NewSynonymGraphTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "synonym": oo := NewSynonymTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "trim": oo := NewTrimTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "truncate": oo := NewTruncateTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "unique": oo := NewUniqueTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "uppercase": oo := NewUppercaseTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "word_delimiter_graph": oo := NewWordDelimiterGraphTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "word_delimiter": oo := NewWordDelimiterTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "kuromoji_stemmer": oo := NewKuromojiStemmerTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "kuromoji_readingform": oo := NewKuromojiReadingFormTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "kuromoji_part_of_speech": oo := NewKuromojiPartOfSpeechTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "icu_collation": oo := NewIcuCollationTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "icu_folding": oo := NewIcuFoldingTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "icu_normalizer": oo := NewIcuNormalizationTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "icu_transform": oo := NewIcuTransformTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "phonetic": oo := NewPhoneticTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "dictionary_decompounder": oo := NewDictionaryDecompounderTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo default: oo := new(TokenFilter) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(TokenFilter) | %w", err) } s.Filter[key] = oo } @@ -755,19 +750,19 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { case "lowercase": oo := NewLowercaseNormalizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Normalizer | %w", err) } s.Normalizer[key] = oo case "custom": oo := NewCustomNormalizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Normalizer | %w", err) } s.Normalizer[key] = oo default: oo := new(Normalizer) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Normalizer) | %w", err) } s.Normalizer[key] = oo } @@ -790,115 +785,115 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { case "char_group": oo := NewCharGroupTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "classic": oo := NewClassicTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "edge_ngram": oo := NewEdgeNGramTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "keyword": oo := NewKeywordTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "letter": oo := NewLetterTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "lowercase": oo := NewLowercaseTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "ngram": oo := NewNGramTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "path_hierarchy": oo := NewPathHierarchyTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "pattern": oo := NewPatternTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "simple_pattern": oo := NewSimplePatternTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "simple_pattern_split": oo := NewSimplePatternSplitTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "standard": oo := NewStandardTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "thai": oo := NewThaiTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "uax_url_email": oo := NewUaxEmailUrlTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "whitespace": oo := NewWhitespaceTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "icu_tokenizer": oo := NewIcuTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "kuromoji_tokenizer": oo := NewKuromojiTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "nori_tokenizer": oo := NewNoriTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo default: oo := new(Tokenizer) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Tokenizer) | %w", err) } s.Tokenizer[key] = oo } @@ -912,12 +907,22 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { // NewIndexSettingsAnalysis returns a IndexSettingsAnalysis. func NewIndexSettingsAnalysis() *IndexSettingsAnalysis { r := &IndexSettingsAnalysis{ - Analyzer: make(map[string]Analyzer, 0), - CharFilter: make(map[string]CharFilter, 0), - Filter: make(map[string]TokenFilter, 0), - Normalizer: make(map[string]Normalizer, 0), - Tokenizer: make(map[string]Tokenizer, 0), + Analyzer: make(map[string]Analyzer), + CharFilter: make(map[string]CharFilter), + Filter: make(map[string]TokenFilter), + Normalizer: make(map[string]Normalizer), + Tokenizer: make(map[string]Tokenizer), } return r } + +// true + +type IndexSettingsAnalysisVariant interface { + IndexSettingsAnalysisCaster() *IndexSettingsAnalysis +} + +func (s *IndexSettingsAnalysis) IndexSettingsAnalysisCaster() *IndexSettingsAnalysis { + return s +} diff --git a/typedapi/types/indexsettingslifecycle.go b/typedapi/types/indexsettingslifecycle.go index cda65a90fb..0d689e98f1 100644 --- a/typedapi/types/indexsettingslifecycle.go +++ b/typedapi/types/indexsettingslifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndexSettingsLifecycle type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L276-L309 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L284-L323 type IndexSettingsLifecycle struct { // IndexingComplete Indicates whether or not the index has been rolled over. Automatically set to // true when ILM completes the rollover action. @@ -55,6 +55,10 @@ type IndexSettingsLifecycle struct { // for example logs-2016.10.31-000002). If the index name doesn’t match the // pattern, index creation fails. ParseOriginationDate *bool `json:"parse_origination_date,omitempty"` + // PreferIlm Preference for the system that manages a data stream backing index + // (preferring ILM when both ILM and DLM are + // applicable for an index). + PreferIlm string `json:"prefer_ilm,omitempty"` // RolloverAlias The index alias to update when the index rolls over. Specify when using a // policy that contains a rollover action. // When the index rolls over, the alias is updated to reflect that the index is @@ -118,6 +122,18 @@ func (s *IndexSettingsLifecycle) UnmarshalJSON(data []byte) error { s.ParseOriginationDate = &v } + case "prefer_ilm": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PreferIlm", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PreferIlm = o + case "rollover_alias": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -146,3 +162,13 @@ func NewIndexSettingsLifecycle() *IndexSettingsLifecycle { return r } + +// true + +type IndexSettingsLifecycleVariant interface { + IndexSettingsLifecycleCaster() *IndexSettingsLifecycle +} + +func (s *IndexSettingsLifecycle) IndexSettingsLifecycleCaster() *IndexSettingsLifecycle { + return s +} diff --git a/typedapi/types/indexsettingslifecyclestep.go b/typedapi/types/indexsettingslifecyclestep.go index 1eaf983123..4b4c0c3d45 100644 --- a/typedapi/types/indexsettingslifecyclestep.go +++ b/typedapi/types/indexsettingslifecyclestep.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // IndexSettingsLifecycleStep type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L311-L317 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L325-L331 type IndexSettingsLifecycleStep struct { // WaitTimeThreshold Time to wait for the cluster to resolve allocation issues during an ILM // shrink action. Must be greater than 1h (1 hour). @@ -69,3 +69,13 @@ func NewIndexSettingsLifecycleStep() *IndexSettingsLifecycleStep { return r } + +// true + +type IndexSettingsLifecycleStepVariant interface { + IndexSettingsLifecycleStepCaster() *IndexSettingsLifecycleStep +} + +func (s *IndexSettingsLifecycleStep) IndexSettingsLifecycleStepCaster() *IndexSettingsLifecycleStep { + return s +} diff --git a/typedapi/types/indexsettingstimeseries.go b/typedapi/types/indexsettingstimeseries.go index dcd210da4f..90c07c89fa 100644 --- a/typedapi/types/indexsettingstimeseries.go +++ b/typedapi/types/indexsettingstimeseries.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // IndexSettingsTimeSeries type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L327-L330 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L341-L344 type IndexSettingsTimeSeries struct { EndTime DateTime `json:"end_time,omitempty"` StartTime DateTime `json:"start_time,omitempty"` @@ -72,3 +72,13 @@ func NewIndexSettingsTimeSeries() *IndexSettingsTimeSeries { return r } + +// true + +type IndexSettingsTimeSeriesVariant interface { + IndexSettingsTimeSeriesCaster() *IndexSettingsTimeSeries +} + +func (s *IndexSettingsTimeSeries) IndexSettingsTimeSeriesCaster() *IndexSettingsTimeSeries { + return s +} diff --git a/typedapi/types/indexstate.go b/typedapi/types/indexstate.go index 841bd2849d..8967884f0f 100644 --- a/typedapi/types/indexstate.go +++ b/typedapi/types/indexstate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // IndexState type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexState.ts#L27-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexState.ts#L27-L40 type IndexState struct { Aliases map[string]Alias `json:"aliases,omitempty"` DataStream *string `json:"data_stream,omitempty"` @@ -98,8 +98,18 @@ func (s *IndexState) UnmarshalJSON(data []byte) error { // NewIndexState returns a IndexState. func NewIndexState() *IndexState { r := &IndexState{ - Aliases: make(map[string]Alias, 0), + Aliases: make(map[string]Alias), } return r } + +// true + +type IndexStateVariant interface { + IndexStateCaster() *IndexState +} + +func (s *IndexState) IndexStateCaster() *IndexState { + return s +} diff --git a/typedapi/types/indexstats.go b/typedapi/types/indexstats.go index 9be9b7c203..2908ca169a 100644 --- a/typedapi/types/indexstats.go +++ b/typedapi/types/indexstats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // IndexStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/stats/types.ts#L52-L93 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/stats/types.ts#L52-L93 type IndexStats struct { Bulk *BulkStats `json:"bulk,omitempty"` // Completion Contains statistics about completions across all shards assigned to the node. @@ -72,3 +72,5 @@ func NewIndexStats() *IndexStats { return r } + +// false diff --git a/typedapi/types/indextemplate.go b/typedapi/types/indextemplate.go index dfe5aa8532..86706f13b7 100644 --- a/typedapi/types/indextemplate.go +++ b/typedapi/types/indextemplate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndexTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexTemplate.ts#L28-L67 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexTemplate.ts#L28-L81 type IndexTemplate struct { AllowAutoCreate *bool `json:"allow_auto_create,omitempty"` // ComposedOf An ordered list of component template names. @@ -43,6 +43,13 @@ type IndexTemplate struct { // Supports an empty object. // Data streams require a matching index template with a `data_stream` object. DataStream *IndexTemplateDataStreamConfiguration `json:"data_stream,omitempty"` + // Deprecated Marks this index template as deprecated. + // When creating or updating a non-deprecated index template that uses + // deprecated components, + // Elasticsearch will emit a deprecation warning. + Deprecated *bool `json:"deprecated,omitempty"` + // IgnoreMissingComponentTemplates A list of component template names that are allowed to be absent. + IgnoreMissingComponentTemplates []string `json:"ignore_missing_component_templates,omitempty"` // IndexPatterns Name of the index template. IndexPatterns []string `json:"index_patterns"` // Meta_ Optional user metadata about the index template. May have any contents. @@ -103,6 +110,36 @@ func (s *IndexTemplate) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "DataStream", err) } + case "deprecated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deprecated", err) + } + s.Deprecated = &value + case bool: + s.Deprecated = &v + } + + case "ignore_missing_component_templates": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissingComponentTemplates", err) + } + + s.IgnoreMissingComponentTemplates = append(s.IgnoreMissingComponentTemplates, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.IgnoreMissingComponentTemplates); err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissingComponentTemplates", err) + } + } + case "index_patterns": rawMsg := json.RawMessage{} dec.Decode(&rawMsg) @@ -160,3 +197,13 @@ func NewIndexTemplate() *IndexTemplate { return r } + +// true + +type IndexTemplateVariant interface { + IndexTemplateCaster() *IndexTemplate +} + +func (s *IndexTemplate) IndexTemplateCaster() *IndexTemplate { + return s +} diff --git a/typedapi/types/indextemplatedatastreamconfiguration.go b/typedapi/types/indextemplatedatastreamconfiguration.go index 4eb9a966b4..690aee19fa 100644 --- a/typedapi/types/indextemplatedatastreamconfiguration.go +++ b/typedapi/types/indextemplatedatastreamconfiguration.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndexTemplateDataStreamConfiguration type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexTemplate.ts#L69-L80 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexTemplate.ts#L83-L94 type IndexTemplateDataStreamConfiguration struct { // AllowCustomRouting If true, the data stream supports custom routing. AllowCustomRouting *bool `json:"allow_custom_routing,omitempty"` @@ -93,3 +93,13 @@ func NewIndexTemplateDataStreamConfiguration() *IndexTemplateDataStreamConfigura return r } + +// true + +type IndexTemplateDataStreamConfigurationVariant interface { + IndexTemplateDataStreamConfigurationCaster() *IndexTemplateDataStreamConfiguration +} + +func (s *IndexTemplateDataStreamConfiguration) IndexTemplateDataStreamConfigurationCaster() *IndexTemplateDataStreamConfiguration { + return s +} diff --git a/typedapi/types/indextemplateitem.go b/typedapi/types/indextemplateitem.go index 7a01cb9178..6f9c6adcce 100644 --- a/typedapi/types/indextemplateitem.go +++ b/typedapi/types/indextemplateitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // IndexTemplateItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L29-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L29-L32 type IndexTemplateItem struct { IndexTemplate IndexTemplate `json:"index_template"` Name string `json:"name"` @@ -72,3 +72,5 @@ func NewIndexTemplateItem() *IndexTemplateItem { return r } + +// false diff --git a/typedapi/types/indextemplatemapping.go b/typedapi/types/indextemplatemapping.go index 3ea681c734..4ee22cbcba 100644 --- a/typedapi/types/indextemplatemapping.go +++ b/typedapi/types/indextemplatemapping.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // IndexTemplateMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L121-L143 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L159-L181 type IndexTemplateMapping struct { // Aliases Aliases to add. // If the index template includes a `data_stream` object, these are data stream @@ -43,8 +43,18 @@ type IndexTemplateMapping struct { // NewIndexTemplateMapping returns a IndexTemplateMapping. func NewIndexTemplateMapping() *IndexTemplateMapping { r := &IndexTemplateMapping{ - Aliases: make(map[string]Alias, 0), + Aliases: make(map[string]Alias), } return r } + +// true + +type IndexTemplateMappingVariant interface { + IndexTemplateMappingCaster() *IndexTemplateMapping +} + +func (s *IndexTemplateMapping) IndexTemplateMappingCaster() *IndexTemplateMapping { + return s +} diff --git a/typedapi/types/indextemplatesummary.go b/typedapi/types/indextemplatesummary.go index ac6b4623a9..a82045cf3b 100644 --- a/typedapi/types/indextemplatesummary.go +++ b/typedapi/types/indextemplatesummary.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // IndexTemplateSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexTemplate.ts#L82-L104 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexTemplate.ts#L96-L118 type IndexTemplateSummary struct { // Aliases Aliases to add. // If the index template includes a `data_stream` object, these are data stream @@ -43,8 +43,18 @@ type IndexTemplateSummary struct { // NewIndexTemplateSummary returns a IndexTemplateSummary. func NewIndexTemplateSummary() *IndexTemplateSummary { r := &IndexTemplateSummary{ - Aliases: make(map[string]Alias, 0), + Aliases: make(map[string]Alias), } return r } + +// true + +type IndexTemplateSummaryVariant interface { + IndexTemplateSummaryCaster() *IndexTemplateSummary +} + +func (s *IndexTemplateSummary) IndexTemplateSummaryCaster() *IndexTemplateSummary { + return s +} diff --git a/typedapi/types/indexversioning.go b/typedapi/types/indexversioning.go index 5f86a803d4..073ce3f005 100644 --- a/typedapi/types/indexversioning.go +++ b/typedapi/types/indexversioning.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndexVersioning type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L271-L274 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L279-L282 type IndexVersioning struct { Created *string `json:"created,omitempty"` CreatedString *string `json:"created_string,omitempty"` @@ -80,3 +80,13 @@ func NewIndexVersioning() *IndexVersioning { return r } + +// true + +type IndexVersioningVariant interface { + IndexVersioningCaster() *IndexVersioning +} + +func (s *IndexVersioning) IndexVersioningCaster() *IndexVersioning { + return s +} diff --git a/typedapi/types/indicatornode.go b/typedapi/types/indicatornode.go index a5bf7728f3..1d076e6619 100644 --- a/typedapi/types/indicatornode.go +++ b/typedapi/types/indicatornode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndicatorNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L91-L94 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L92-L95 type IndicatorNode struct { Name *string `json:"name,omitempty"` NodeId *string `json:"node_id,omitempty"` @@ -87,3 +87,5 @@ func NewIndicatorNode() *IndicatorNode { return r } + +// false diff --git a/typedapi/types/indicators.go b/typedapi/types/indicators.go index 76bb8fd42f..0d9a507157 100644 --- a/typedapi/types/indicators.go +++ b/typedapi/types/indicators.go @@ -16,16 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Indicators type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L32-L41 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L32-L42 type Indicators struct { DataStreamLifecycle *DataStreamLifecycleIndicator `json:"data_stream_lifecycle,omitempty"` Disk *DiskIndicator `json:"disk,omitempty"` + FileSettings *FileSettingsIndicator `json:"file_settings,omitempty"` Ilm *IlmIndicator `json:"ilm,omitempty"` MasterIsStable *MasterIsStableIndicator `json:"master_is_stable,omitempty"` RepositoryIntegrity *RepositoryIntegrityIndicator `json:"repository_integrity,omitempty"` @@ -40,3 +41,5 @@ func NewIndicators() *Indicators { return r } + +// false diff --git a/typedapi/types/indices.go b/typedapi/types/indices.go index 0f4308f9c8..7723e7a285 100644 --- a/typedapi/types/indices.go +++ b/typedapi/types/indices.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Indices type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L67-L67 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L61-L61 type Indices []string + +type IndicesVariant interface { + IndicesCaster() *Indices +} diff --git a/typedapi/types/indicesaction.go b/typedapi/types/indicesaction.go index 214c65918b..66d7ae263a 100644 --- a/typedapi/types/indicesaction.go +++ b/typedapi/types/indicesaction.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // IndicesAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/update_aliases/types.ts#L23-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/update_aliases/types.ts#L23-L39 type IndicesAction struct { // Add Adds a data stream or index to an alias. // If the alias doesn’t exist, the `add` action creates it. - Add *AddAction `json:"add,omitempty"` + Add *AddAction `json:"add,omitempty"` + AdditionalIndicesActionProperty map[string]json.RawMessage `json:"-"` // Remove Removes a data stream or index from an alias. Remove *RemoveAction `json:"remove,omitempty"` // RemoveIndex Deletes an index. @@ -34,9 +40,50 @@ type IndicesAction struct { RemoveIndex *RemoveIndexAction `json:"remove_index,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s IndicesAction) MarshalJSON() ([]byte, error) { + type opt IndicesAction + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalIndicesActionProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalIndicesActionProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewIndicesAction returns a IndicesAction. func NewIndicesAction() *IndicesAction { - r := &IndicesAction{} + r := &IndicesAction{ + AdditionalIndicesActionProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type IndicesActionVariant interface { + IndicesActionCaster() *IndicesAction +} + +func (s *IndicesAction) IndicesActionCaster() *IndicesAction { + return s +} diff --git a/typedapi/types/indicesblockstatus.go b/typedapi/types/indicesblockstatus.go index 5095e35b69..5df1a24232 100644 --- a/typedapi/types/indicesblockstatus.go +++ b/typedapi/types/indicesblockstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndicesBlockStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/add_block/IndicesAddBlockResponse.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/add_block/IndicesAddBlockResponse.ts#L30-L33 type IndicesBlockStatus struct { Blocked bool `json:"blocked"` Name string `json:"name"` @@ -82,3 +82,5 @@ func NewIndicesBlockStatus() *IndicesBlockStatus { return r } + +// false diff --git a/typedapi/types/indicesindexingpressure.go b/typedapi/types/indicesindexingpressure.go index dd167db1bd..a160937ce9 100644 --- a/typedapi/types/indicesindexingpressure.go +++ b/typedapi/types/indicesindexingpressure.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // IndicesIndexingPressure type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L550-L552 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L575-L577 type IndicesIndexingPressure struct { Memory IndicesIndexingPressureMemory `json:"memory"` } @@ -33,3 +33,13 @@ func NewIndicesIndexingPressure() *IndicesIndexingPressure { return r } + +// true + +type IndicesIndexingPressureVariant interface { + IndicesIndexingPressureCaster() *IndicesIndexingPressure +} + +func (s *IndicesIndexingPressure) IndicesIndexingPressureCaster() *IndicesIndexingPressure { + return s +} diff --git a/typedapi/types/indicesindexingpressurememory.go b/typedapi/types/indicesindexingpressurememory.go index 558bddad7f..eda9ddd455 100644 --- a/typedapi/types/indicesindexingpressurememory.go +++ b/typedapi/types/indicesindexingpressurememory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndicesIndexingPressureMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L554-L561 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L579-L586 type IndicesIndexingPressureMemory struct { // Limit Number of outstanding bytes that may be consumed by indexing requests. When // this limit is reached or exceeded, @@ -83,3 +83,13 @@ func NewIndicesIndexingPressureMemory() *IndicesIndexingPressureMemory { return r } + +// true + +type IndicesIndexingPressureMemoryVariant interface { + IndicesIndexingPressureMemoryCaster() *IndicesIndexingPressureMemory +} + +func (s *IndicesIndexingPressureMemory) IndicesIndexingPressureMemoryCaster() *IndicesIndexingPressureMemory { + return s +} diff --git a/typedapi/types/indicesmodifyaction.go b/typedapi/types/indicesmodifyaction.go index e97c9f4733..1491f767dc 100644 --- a/typedapi/types/indicesmodifyaction.go +++ b/typedapi/types/indicesmodifyaction.go @@ -16,29 +16,76 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // IndicesModifyAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/modify_data_stream/types.ts#L22-L37 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/modify_data_stream/types.ts#L22-L37 type IndicesModifyAction struct { // AddBackingIndex Adds an existing index as a backing index for a data stream. // The index is hidden as part of this operation. // WARNING: Adding indices with the `add_backing_index` action can potentially // result in improper data stream behavior. // This should be considered an expert level API. - AddBackingIndex *IndexAndDataStreamAction `json:"add_backing_index,omitempty"` + AddBackingIndex *IndexAndDataStreamAction `json:"add_backing_index,omitempty"` + AdditionalIndicesModifyActionProperty map[string]json.RawMessage `json:"-"` // RemoveBackingIndex Removes a backing index from a data stream. // The index is unhidden as part of this operation. // A data stream’s write index cannot be removed. RemoveBackingIndex *IndexAndDataStreamAction `json:"remove_backing_index,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s IndicesModifyAction) MarshalJSON() ([]byte, error) { + type opt IndicesModifyAction + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalIndicesModifyActionProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalIndicesModifyActionProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewIndicesModifyAction returns a IndicesModifyAction. func NewIndicesModifyAction() *IndicesModifyAction { - r := &IndicesModifyAction{} + r := &IndicesModifyAction{ + AdditionalIndicesModifyActionProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type IndicesModifyActionVariant interface { + IndicesModifyActionCaster() *IndicesModifyAction +} + +func (s *IndicesModifyAction) IndicesModifyActionCaster() *IndicesModifyAction { + return s +} diff --git a/typedapi/types/indicesoptions.go b/typedapi/types/indicesoptions.go index 751a51189d..b06d75edb8 100644 --- a/typedapi/types/indicesoptions.go +++ b/typedapi/types/indicesoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // IndicesOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L336-L363 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L332-L359 type IndicesOptions struct { // AllowNoIndices If false, the request returns an error if any wildcard expression, index // alias, or `_all` value targets only @@ -138,3 +138,13 @@ func NewIndicesOptions() *IndicesOptions { return r } + +// true + +type IndicesOptionsVariant interface { + IndicesOptionsCaster() *IndicesOptions +} + +func (s *IndicesOptions) IndicesOptionsCaster() *IndicesOptions { + return s +} diff --git a/typedapi/types/indicesprivileges.go b/typedapi/types/indicesprivileges.go index 6b7bbc35f5..5208140ece 100644 --- a/typedapi/types/indicesprivileges.go +++ b/typedapi/types/indicesprivileges.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // IndicesPrivileges type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Privileges.ts#L198-L222 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L216-L242 type IndicesPrivileges struct { // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that // cover restricted indices. Implicitly, restricted indices have limited @@ -91,8 +91,19 @@ func (s *IndicesPrivileges) UnmarshalJSON(data []byte) error { } case "names": - if err := dec.Decode(&s.Names); err != nil { - return fmt.Errorf("%s | %w", "Names", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } } case "privileges": @@ -118,7 +129,7 @@ func (s *IndicesPrivileges) UnmarshalJSON(data []byte) error { switch t { - case "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": o := NewQuery() localDec := json.NewDecoder(bytes.NewReader(message)) if err := localDec.Decode(&o); err != nil { @@ -156,3 +167,13 @@ func NewIndicesPrivileges() *IndicesPrivileges { return r } + +// true + +type IndicesPrivilegesVariant interface { + IndicesPrivilegesCaster() *IndicesPrivileges +} + +func (s *IndicesPrivileges) IndicesPrivilegesCaster() *IndicesPrivileges { + return s +} diff --git a/typedapi/types/indicesprivilegesquery.go b/typedapi/types/indicesprivilegesquery.go index 347313ed8d..05935004ac 100644 --- a/typedapi/types/indicesprivilegesquery.go +++ b/typedapi/types/indicesprivilegesquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,5 +26,9 @@ package types // Query // RoleTemplateQuery // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Privileges.ts#L279-L287 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L341-L349 type IndicesPrivilegesQuery any + +type IndicesPrivilegesQueryVariant interface { + IndicesPrivilegesQueryCaster() *IndicesPrivilegesQuery +} diff --git a/typedapi/types/indicesrecord.go b/typedapi/types/indicesrecord.go index 68a7f59cb5..234092ba64 100644 --- a/typedapi/types/indicesrecord.go +++ b/typedapi/types/indicesrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndicesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/indices/types.ts#L20-L808 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/indices/types.ts#L20-L808 type IndicesRecord struct { // BulkAvgSizeInBytes average size in bytes of shard bulk BulkAvgSizeInBytes *string `json:"bulk.avg_size_in_bytes,omitempty"` @@ -2051,3 +2051,5 @@ func NewIndicesRecord() *IndicesRecord { return r } + +// false diff --git a/typedapi/types/indicesshardsstats.go b/typedapi/types/indicesshardsstats.go index 26a1ab0f3a..29f87cef62 100644 --- a/typedapi/types/indicesshardsstats.go +++ b/typedapi/types/indicesshardsstats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // IndicesShardsStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L52-L55 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L52-L55 type IndicesShardsStats struct { AllFields FieldSummary `json:"all_fields"` Fields map[string]FieldSummary `json:"fields"` @@ -31,8 +31,10 @@ type IndicesShardsStats struct { // NewIndicesShardsStats returns a IndicesShardsStats. func NewIndicesShardsStats() *IndicesShardsStats { r := &IndicesShardsStats{ - Fields: make(map[string]FieldSummary, 0), + Fields: make(map[string]FieldSummary), } return r } + +// false diff --git a/typedapi/types/indicesshardstats.go b/typedapi/types/indicesshardstats.go index 58feb1029c..353be634c4 100644 --- a/typedapi/types/indicesshardstats.go +++ b/typedapi/types/indicesshardstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // IndicesShardStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/stats/types.ts#L192-L223 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/stats/types.ts#L192-L223 type IndicesShardStats struct { Bulk *BulkStats `json:"bulk,omitempty"` Commit *ShardCommit `json:"commit,omitempty"` @@ -59,8 +59,10 @@ type IndicesShardStats struct { // NewIndicesShardStats returns a IndicesShardStats. func NewIndicesShardStats() *IndicesShardStats { r := &IndicesShardStats{ - Shards: make(map[string]json.RawMessage, 0), + Shards: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/indicesshardstores.go b/typedapi/types/indicesshardstores.go index 365caf4632..5c93e4b6cc 100644 --- a/typedapi/types/indicesshardstores.go +++ b/typedapi/types/indicesshardstores.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // IndicesShardStores type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/shard_stores/types.ts#L25-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/shard_stores/types.ts#L25-L27 type IndicesShardStores struct { Shards map[string]ShardStoreWrapper `json:"shards"` } @@ -30,8 +30,10 @@ type IndicesShardStores struct { // NewIndicesShardStores returns a IndicesShardStores. func NewIndicesShardStores() *IndicesShardStores { r := &IndicesShardStores{ - Shards: make(map[string]ShardStoreWrapper, 0), + Shards: make(map[string]ShardStoreWrapper), } return r } + +// false diff --git a/typedapi/types/indicesstats.go b/typedapi/types/indicesstats.go index eb97884f61..e80c94b293 100644 --- a/typedapi/types/indicesstats.go +++ b/typedapi/types/indicesstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // IndicesStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/stats/types.ts#L95-L110 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/stats/types.ts#L95-L110 type IndicesStats struct { Health *healthstatus.HealthStatus `json:"health,omitempty"` Primaries *IndexStats `json:"primaries,omitempty"` @@ -99,8 +99,10 @@ func (s *IndicesStats) UnmarshalJSON(data []byte) error { // NewIndicesStats returns a IndicesStats. func NewIndicesStats() *IndicesStats { r := &IndicesStats{ - Shards: make(map[string][]IndicesShardStats, 0), + Shards: make(map[string][]IndicesShardStats), } return r } + +// false diff --git a/typedapi/types/indicesvalidationexplanation.go b/typedapi/types/indicesvalidationexplanation.go index 4f3589b065..71b4b30dc1 100644 --- a/typedapi/types/indicesvalidationexplanation.go +++ b/typedapi/types/indicesvalidationexplanation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndicesValidationExplanation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/validate_query/IndicesValidateQueryResponse.ts#L32-L37 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/validate_query/IndicesValidateQueryResponse.ts#L32-L37 type IndicesValidationExplanation struct { Error *string `json:"error,omitempty"` Explanation *string `json:"explanation,omitempty"` @@ -108,3 +108,5 @@ func NewIndicesValidationExplanation() *IndicesValidationExplanation { return r } + +// false diff --git a/typedapi/types/indicesversions.go b/typedapi/types/indicesversions.go index 1f597fc207..39ddca3e7e 100644 --- a/typedapi/types/indicesversions.go +++ b/typedapi/types/indicesversions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndicesVersions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L263-L268 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L263-L268 type IndicesVersions struct { IndexCount int `json:"index_count"` PrimaryShardCount int `json:"primary_shard_count"` @@ -117,3 +117,5 @@ func NewIndicesVersions() *IndicesVersions { return r } + +// false diff --git a/typedapi/types/indonesiananalyzer.go b/typedapi/types/indonesiananalyzer.go index e825ec3dbf..27c7cf4e78 100644 --- a/typedapi/types/indonesiananalyzer.go +++ b/typedapi/types/indonesiananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IndonesianAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L202-L207 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L213-L218 type IndonesianAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewIndonesianAnalyzer() *IndonesianAnalyzer { return r } + +// true + +type IndonesianAnalyzerVariant interface { + IndonesianAnalyzerCaster() *IndonesianAnalyzer +} + +func (s *IndonesianAnalyzer) IndonesianAnalyzerCaster() *IndonesianAnalyzer { + return s +} diff --git a/typedapi/types/inferenceaggregate.go b/typedapi/types/inferenceaggregate.go index ce8cbf62bc..8646569195 100644 --- a/typedapi/types/inferenceaggregate.go +++ b/typedapi/types/inferenceaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // InferenceAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L755-L770 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L755-L770 type InferenceAggregate struct { Data map[string]json.RawMessage `json:"-"` FeatureImportance []InferenceFeatureImportance `json:"feature_importance,omitempty"` @@ -138,8 +138,10 @@ func (s InferenceAggregate) MarshalJSON() ([]byte, error) { // NewInferenceAggregate returns a InferenceAggregate. func NewInferenceAggregate() *InferenceAggregate { r := &InferenceAggregate{ - Data: make(map[string]json.RawMessage, 0), + Data: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/inferenceaggregation.go b/typedapi/types/inferenceaggregation.go index 0f0fbc638a..9066f66dac 100644 --- a/typedapi/types/inferenceaggregation.go +++ b/typedapi/types/inferenceaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // InferenceAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L225-L234 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L225-L234 type InferenceAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -107,3 +107,13 @@ func NewInferenceAggregation() *InferenceAggregation { return r } + +// true + +type InferenceAggregationVariant interface { + InferenceAggregationCaster() *InferenceAggregation +} + +func (s *InferenceAggregation) InferenceAggregationCaster() *InferenceAggregation { + return s +} diff --git a/typedapi/types/inferencechunkingsettings.go b/typedapi/types/inferencechunkingsettings.go new file mode 100644 index 0000000000..5c7276ce6b --- /dev/null +++ b/typedapi/types/inferencechunkingsettings.go @@ -0,0 +1,182 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InferenceChunkingSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/_types/Services.ts#L60-L89 +type InferenceChunkingSettings struct { + // ChunkingSettings Chunking configuration object + ChunkingSettings *InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // MaxChunkSize The maximum size of a chunk in words. + // This value cannot be higher than `300` or lower than `20` (for `sentence` + // strategy) or `10` (for `word` strategy). + MaxChunkSize *int `json:"max_chunk_size,omitempty"` + // Overlap The number of overlapping words for chunks. + // It is applicable only to a `word` chunking strategy. + // This value cannot be higher than half the `max_chunk_size` value. + Overlap *int `json:"overlap,omitempty"` + // SentenceOverlap The number of overlapping sentences for chunks. + // It is applicable only for a `sentence` chunking strategy. + // It can be either `1` or `0`. + SentenceOverlap *int `json:"sentence_overlap,omitempty"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // Strategy The chunking strategy: `sentence` or `word`. + Strategy *string `json:"strategy,omitempty"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +func (s *InferenceChunkingSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "chunking_settings": + if err := dec.Decode(&s.ChunkingSettings); err != nil { + return fmt.Errorf("%s | %w", "ChunkingSettings", err) + } + + case "max_chunk_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxChunkSize", err) + } + s.MaxChunkSize = &value + case float64: + f := int(v) + s.MaxChunkSize = &f + } + + case "overlap": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Overlap", err) + } + s.Overlap = &value + case float64: + f := int(v) + s.Overlap = &f + } + + case "sentence_overlap": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SentenceOverlap", err) + } + s.SentenceOverlap = &value + case float64: + f := int(v) + s.SentenceOverlap = &f + } + + case "service": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Service", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Service = o + + case "service_settings": + if err := dec.Decode(&s.ServiceSettings); err != nil { + return fmt.Errorf("%s | %w", "ServiceSettings", err) + } + + case "strategy": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Strategy", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Strategy = &o + + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) + } + + } + } + return nil +} + +// NewInferenceChunkingSettings returns a InferenceChunkingSettings. +func NewInferenceChunkingSettings() *InferenceChunkingSettings { + r := &InferenceChunkingSettings{} + + return r +} + +// true + +type InferenceChunkingSettingsVariant interface { + InferenceChunkingSettingsCaster() *InferenceChunkingSettings +} + +func (s *InferenceChunkingSettings) InferenceChunkingSettingsCaster() *InferenceChunkingSettings { + return s +} diff --git a/typedapi/types/inferenceclassimportance.go b/typedapi/types/inferenceclassimportance.go index e9dce2afb5..8cc6d21508 100644 --- a/typedapi/types/inferenceclassimportance.go +++ b/typedapi/types/inferenceclassimportance.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // InferenceClassImportance type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L784-L787 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L784-L787 type InferenceClassImportance struct { ClassName string `json:"class_name"` Importance Float64 `json:"importance"` @@ -91,3 +91,5 @@ func NewInferenceClassImportance() *InferenceClassImportance { return r } + +// false diff --git a/typedapi/types/inferenceconfig.go b/typedapi/types/inferenceconfig.go index 29b160033f..f6eb4c754d 100644 --- a/typedapi/types/inferenceconfig.go +++ b/typedapi/types/inferenceconfig.go @@ -16,23 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // InferenceConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1008-L1020 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1049-L1061 type InferenceConfig struct { + AdditionalInferenceConfigProperty map[string]json.RawMessage `json:"-"` // Classification Classification configuration for inference. Classification *InferenceConfigClassification `json:"classification,omitempty"` // Regression Regression configuration for inference. Regression *InferenceConfigRegression `json:"regression,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s InferenceConfig) MarshalJSON() ([]byte, error) { + type opt InferenceConfig + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalInferenceConfigProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalInferenceConfigProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewInferenceConfig returns a InferenceConfig. func NewInferenceConfig() *InferenceConfig { - r := &InferenceConfig{} + r := &InferenceConfig{ + AdditionalInferenceConfigProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type InferenceConfigVariant interface { + InferenceConfigCaster() *InferenceConfig +} + +func (s *InferenceConfig) InferenceConfigCaster() *InferenceConfig { + return s +} diff --git a/typedapi/types/inferenceconfigclassification.go b/typedapi/types/inferenceconfigclassification.go index 1f160daed6..63ea0b3cc7 100644 --- a/typedapi/types/inferenceconfigclassification.go +++ b/typedapi/types/inferenceconfigclassification.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // InferenceConfigClassification type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1035-L1061 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1076-L1102 type InferenceConfigClassification struct { // NumTopClasses Specifies the number of top class predictions to return. NumTopClasses *int `json:"num_top_classes,omitempty"` @@ -127,3 +127,13 @@ func NewInferenceConfigClassification() *InferenceConfigClassification { return r } + +// true + +type InferenceConfigClassificationVariant interface { + InferenceConfigClassificationCaster() *InferenceConfigClassification +} + +func (s *InferenceConfigClassification) InferenceConfigClassificationCaster() *InferenceConfigClassification { + return s +} diff --git a/typedapi/types/inferenceconfigcontainer.go b/typedapi/types/inferenceconfigcontainer.go index a4604fc7e2..a91dea4872 100644 --- a/typedapi/types/inferenceconfigcontainer.go +++ b/typedapi/types/inferenceconfigcontainer.go @@ -16,23 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // InferenceConfigContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L236-L242 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L236-L242 type InferenceConfigContainer struct { + AdditionalInferenceConfigContainerProperty map[string]json.RawMessage `json:"-"` // Classification Classification configuration for inference. Classification *ClassificationInferenceOptions `json:"classification,omitempty"` // Regression Regression configuration for inference. Regression *RegressionInferenceOptions `json:"regression,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s InferenceConfigContainer) MarshalJSON() ([]byte, error) { + type opt InferenceConfigContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalInferenceConfigContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalInferenceConfigContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewInferenceConfigContainer returns a InferenceConfigContainer. func NewInferenceConfigContainer() *InferenceConfigContainer { - r := &InferenceConfigContainer{} + r := &InferenceConfigContainer{ + AdditionalInferenceConfigContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type InferenceConfigContainerVariant interface { + InferenceConfigContainerCaster() *InferenceConfigContainer +} + +func (s *InferenceConfigContainer) InferenceConfigContainerCaster() *InferenceConfigContainer { + return s +} diff --git a/typedapi/types/inferenceconfigcreatecontainer.go b/typedapi/types/inferenceconfigcreatecontainer.go index 61af8f52b6..e5942da1bd 100644 --- a/typedapi/types/inferenceconfigcreatecontainer.go +++ b/typedapi/types/inferenceconfigcreatecontainer.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // InferenceConfigCreateContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L23-L80 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L23-L80 type InferenceConfigCreateContainer struct { + AdditionalInferenceConfigCreateContainerProperty map[string]json.RawMessage `json:"-"` // Classification Classification configuration for inference. Classification *ClassificationInferenceOptions `json:"classification,omitempty"` // FillMask Fill mask configuration for inference. @@ -46,9 +52,50 @@ type InferenceConfigCreateContainer struct { ZeroShotClassification *ZeroShotClassificationInferenceOptions `json:"zero_shot_classification,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s InferenceConfigCreateContainer) MarshalJSON() ([]byte, error) { + type opt InferenceConfigCreateContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalInferenceConfigCreateContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalInferenceConfigCreateContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewInferenceConfigCreateContainer returns a InferenceConfigCreateContainer. func NewInferenceConfigCreateContainer() *InferenceConfigCreateContainer { - r := &InferenceConfigCreateContainer{} + r := &InferenceConfigCreateContainer{ + AdditionalInferenceConfigCreateContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type InferenceConfigCreateContainerVariant interface { + InferenceConfigCreateContainerCaster() *InferenceConfigCreateContainer +} + +func (s *InferenceConfigCreateContainer) InferenceConfigCreateContainerCaster() *InferenceConfigCreateContainer { + return s +} diff --git a/typedapi/types/inferenceconfigregression.go b/typedapi/types/inferenceconfigregression.go index e5305d6784..5c8ea66515 100644 --- a/typedapi/types/inferenceconfigregression.go +++ b/typedapi/types/inferenceconfigregression.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // InferenceConfigRegression type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1022-L1033 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1063-L1074 type InferenceConfigRegression struct { // NumTopFeatureImportanceValues Specifies the maximum number of feature importance values per document. NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` @@ -87,3 +87,13 @@ func NewInferenceConfigRegression() *InferenceConfigRegression { return r } + +// true + +type InferenceConfigRegressionVariant interface { + InferenceConfigRegressionCaster() *InferenceConfigRegression +} + +func (s *InferenceConfigRegression) InferenceConfigRegressionCaster() *InferenceConfigRegression { + return s +} diff --git a/typedapi/types/inferenceconfigupdatecontainer.go b/typedapi/types/inferenceconfigupdatecontainer.go index 043e323946..4523f4fc04 100644 --- a/typedapi/types/inferenceconfigupdatecontainer.go +++ b/typedapi/types/inferenceconfigupdatecontainer.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // InferenceConfigUpdateContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L296-L318 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L284-L306 type InferenceConfigUpdateContainer struct { + AdditionalInferenceConfigUpdateContainerProperty map[string]json.RawMessage `json:"-"` // Classification Classification configuration for inference. Classification *ClassificationInferenceOptions `json:"classification,omitempty"` // FillMask Fill mask configuration for inference. @@ -46,9 +52,50 @@ type InferenceConfigUpdateContainer struct { ZeroShotClassification *ZeroShotClassificationInferenceUpdateOptions `json:"zero_shot_classification,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s InferenceConfigUpdateContainer) MarshalJSON() ([]byte, error) { + type opt InferenceConfigUpdateContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalInferenceConfigUpdateContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalInferenceConfigUpdateContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewInferenceConfigUpdateContainer returns a InferenceConfigUpdateContainer. func NewInferenceConfigUpdateContainer() *InferenceConfigUpdateContainer { - r := &InferenceConfigUpdateContainer{} + r := &InferenceConfigUpdateContainer{ + AdditionalInferenceConfigUpdateContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type InferenceConfigUpdateContainerVariant interface { + InferenceConfigUpdateContainerCaster() *InferenceConfigUpdateContainer +} + +func (s *InferenceConfigUpdateContainer) InferenceConfigUpdateContainerCaster() *InferenceConfigUpdateContainer { + return s +} diff --git a/typedapi/types/inferenceendpoint.go b/typedapi/types/inferenceendpoint.go index 58af789547..1ff7c36158 100644 --- a/typedapi/types/inferenceendpoint.go +++ b/typedapi/types/inferenceendpoint.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,8 +31,10 @@ import ( // InferenceEndpoint type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/_types/Services.ts#L23-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/_types/Services.ts#L24-L44 type InferenceEndpoint struct { + // ChunkingSettings Chunking configuration object + ChunkingSettings *InferenceChunkingSettings `json:"chunking_settings,omitempty"` // Service The service type Service string `json:"service"` // ServiceSettings Settings specific to the service @@ -56,6 +58,11 @@ func (s *InferenceEndpoint) UnmarshalJSON(data []byte) error { switch t { + case "chunking_settings": + if err := dec.Decode(&s.ChunkingSettings); err != nil { + return fmt.Errorf("%s | %w", "ChunkingSettings", err) + } + case "service": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -89,3 +96,13 @@ func NewInferenceEndpoint() *InferenceEndpoint { return r } + +// true + +type InferenceEndpointVariant interface { + InferenceEndpointCaster() *InferenceEndpoint +} + +func (s *InferenceEndpoint) InferenceEndpointCaster() *InferenceEndpoint { + return s +} diff --git a/typedapi/types/inferenceendpointinfo.go b/typedapi/types/inferenceendpointinfo.go index e288c7991e..58dc034634 100644 --- a/typedapi/types/inferenceendpointinfo.go +++ b/typedapi/types/inferenceendpointinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,8 +33,10 @@ import ( // InferenceEndpointInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/_types/Services.ts#L41-L53 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/_types/Services.ts#L46-L58 type InferenceEndpointInfo struct { + // ChunkingSettings Chunking configuration object + ChunkingSettings *InferenceChunkingSettings `json:"chunking_settings,omitempty"` // InferenceId The inference Id InferenceId string `json:"inference_id"` // Service The service type @@ -62,6 +64,11 @@ func (s *InferenceEndpointInfo) UnmarshalJSON(data []byte) error { switch t { + case "chunking_settings": + if err := dec.Decode(&s.ChunkingSettings); err != nil { + return fmt.Errorf("%s | %w", "ChunkingSettings", err) + } + case "inference_id": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -112,3 +119,5 @@ func NewInferenceEndpointInfo() *InferenceEndpointInfo { return r } + +// false diff --git a/typedapi/types/inferencefeatureimportance.go b/typedapi/types/inferencefeatureimportance.go index 9e36dffe7f..e91e755067 100644 --- a/typedapi/types/inferencefeatureimportance.go +++ b/typedapi/types/inferencefeatureimportance.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // InferenceFeatureImportance type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L778-L782 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L778-L782 type InferenceFeatureImportance struct { Classes []InferenceClassImportance `json:"classes,omitempty"` FeatureName string `json:"feature_name"` @@ -97,3 +97,5 @@ func NewInferenceFeatureImportance() *InferenceFeatureImportance { return r } + +// false diff --git a/typedapi/types/inferenceprocessor.go b/typedapi/types/inferenceprocessor.go index 34084e22d0..3799dad652 100644 --- a/typedapi/types/inferenceprocessor.go +++ b/typedapi/types/inferenceprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // InferenceProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L987-L1006 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1028-L1047 type InferenceProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -41,7 +41,7 @@ type InferenceProcessor struct { // configuration. FieldMap map[string]json.RawMessage `json:"field_map,omitempty"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // InferenceConfig Contains the inference type and its options. @@ -93,16 +93,9 @@ func (s *InferenceProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -158,8 +151,18 @@ func (s *InferenceProcessor) UnmarshalJSON(data []byte) error { // NewInferenceProcessor returns a InferenceProcessor. func NewInferenceProcessor() *InferenceProcessor { r := &InferenceProcessor{ - FieldMap: make(map[string]json.RawMessage, 0), + FieldMap: make(map[string]json.RawMessage), } return r } + +// true + +type InferenceProcessorVariant interface { + InferenceProcessorCaster() *InferenceProcessor +} + +func (s *InferenceProcessor) InferenceProcessorCaster() *InferenceProcessor { + return s +} diff --git a/typedapi/types/inferenceresponseresult.go b/typedapi/types/inferenceresponseresult.go index 2356ada3ef..a243bcc04e 100644 --- a/typedapi/types/inferenceresponseresult.go +++ b/typedapi/types/inferenceresponseresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // InferenceResponseResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L459-L507 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L447-L495 type InferenceResponseResult struct { // Entities If the model is trained for named entity recognition (NER) tasks, the // response contains the recognized entities. @@ -200,3 +200,5 @@ func NewInferenceResponseResult() *InferenceResponseResult { return r } + +// false diff --git a/typedapi/types/inferencetopclassentry.go b/typedapi/types/inferencetopclassentry.go index 3587f9cd87..9823d6d1b2 100644 --- a/typedapi/types/inferencetopclassentry.go +++ b/typedapi/types/inferencetopclassentry.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // InferenceTopClassEntry type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L772-L776 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L772-L776 type InferenceTopClassEntry struct { ClassName FieldValue `json:"class_name"` ClassProbability Float64 `json:"class_probability"` @@ -101,3 +101,5 @@ func NewInferenceTopClassEntry() *InferenceTopClassEntry { return r } + +// false diff --git a/typedapi/types/influence.go b/typedapi/types/influence.go index 0d2453485e..e396d76825 100644 --- a/typedapi/types/influence.go +++ b/typedapi/types/influence.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Influence type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Anomaly.ts#L140-L143 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Anomaly.ts#L141-L144 type Influence struct { InfluencerFieldName string `json:"influencer_field_name"` InfluencerFieldValues []string `json:"influencer_field_values"` @@ -80,3 +80,5 @@ func NewInfluence() *Influence { return r } + +// false diff --git a/typedapi/types/influencer.go b/typedapi/types/influencer.go index 52eda7d451..a532f16a2a 100644 --- a/typedapi/types/influencer.go +++ b/typedapi/types/influencer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Influencer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Influencer.ts#L24-L76 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Influencer.ts#L24-L76 type Influencer struct { // BucketSpan The length of the bucket in seconds. This value matches the bucket span that // is specified in the job. @@ -218,3 +218,5 @@ func NewInfluencer() *Influencer { return r } + +// false diff --git a/typedapi/types/infofeaturestate.go b/typedapi/types/infofeaturestate.go index 46bee52be7..622c997231 100644 --- a/typedapi/types/infofeaturestate.go +++ b/typedapi/types/infofeaturestate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // InfoFeatureState type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotInfoFeatureState.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotInfoFeatureState.ts#L22-L25 type InfoFeatureState struct { FeatureName string `json:"feature_name"` Indices []string `json:"indices"` @@ -91,3 +91,5 @@ func NewInfoFeatureState() *InfoFeatureState { return r } + +// false diff --git a/typedapi/types/simulateingest.go b/typedapi/types/ingest.go similarity index 77% rename from typedapi/types/simulateingest.go rename to typedapi/types/ingest.go index 1a71e78238..b2f32a47ce 100644 --- a/typedapi/types/simulateingest.go +++ b/typedapi/types/ingest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -28,16 +28,16 @@ import ( "io" ) -// SimulateIngest type. +// Ingest type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/simulate/types.ts#L29-L37 -type SimulateIngest struct { +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Simulation.ts#L29-L37 +type Ingest struct { Pipeline *string `json:"pipeline,omitempty"` Redact_ *Redact `json:"_redact,omitempty"` Timestamp DateTime `json:"timestamp"` } -func (s *SimulateIngest) UnmarshalJSON(data []byte) error { +func (s *Ingest) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -72,9 +72,11 @@ func (s *SimulateIngest) UnmarshalJSON(data []byte) error { return nil } -// NewSimulateIngest returns a SimulateIngest. -func NewSimulateIngest() *SimulateIngest { - r := &SimulateIngest{} +// NewIngest returns a Ingest. +func NewIngest() *Ingest { + r := &Ingest{} return r } + +// false diff --git a/typedapi/types/ingestdocumentsimulation.go b/typedapi/types/ingestdocumentsimulation.go new file mode 100644 index 0000000000..c85dbbad40 --- /dev/null +++ b/typedapi/types/ingestdocumentsimulation.go @@ -0,0 +1,170 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IngestDocumentSimulation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/simulate/ingest/SimulateIngestResponse.ts#L35-L78 +type IngestDocumentSimulation struct { + // Error Any error resulting from simulatng ingest on this doc. This can be an error + // generated by + // executing a processor, or a mapping validation error when simulating indexing + // the resulting + // doc. + Error *ErrorCause `json:"error,omitempty"` + // ExecutedPipelines A list of the names of the pipelines executed on this document. + ExecutedPipelines []string `json:"executed_pipelines"` + // Id_ Identifier for the document. + Id_ string `json:"_id"` + // IgnoredFields A list of the fields that would be ignored at the indexing step. For example, + // a field whose + // value is larger than the allowed limit would make it through all of the + // pipelines, but + // would not be indexed into Elasticsearch. + IgnoredFields []map[string]string `json:"ignored_fields,omitempty"` + // Index_ Name of the index that the document would be indexed into if this were not a + // simulation. + Index_ string `json:"_index"` + IngestDocumentSimulation map[string]string `json:"-"` + // Source_ JSON body for the document. + Source_ map[string]json.RawMessage `json:"_source"` + Version_ StringifiedVersionNumber `json:"_version"` +} + +func (s *IngestDocumentSimulation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + if err := dec.Decode(&s.Error); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + + case "executed_pipelines": + if err := dec.Decode(&s.ExecutedPipelines); err != nil { + return fmt.Errorf("%s | %w", "ExecutedPipelines", err) + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "ignored_fields": + if err := dec.Decode(&s.IgnoredFields); err != nil { + return fmt.Errorf("%s | %w", "IgnoredFields", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "_source": + if s.Source_ == nil { + s.Source_ = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + + case "_version": + if err := dec.Decode(&s.Version_); err != nil { + return fmt.Errorf("%s | %w", "Version_", err) + } + + default: + + if key, ok := t.(string); ok { + if s.IngestDocumentSimulation == nil { + s.IngestDocumentSimulation = make(map[string]string, 0) + } + raw := new(string) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "IngestDocumentSimulation", err) + } + s.IngestDocumentSimulation[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s IngestDocumentSimulation) MarshalJSON() ([]byte, error) { + type opt IngestDocumentSimulation + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.IngestDocumentSimulation { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "IngestDocumentSimulation") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewIngestDocumentSimulation returns a IngestDocumentSimulation. +func NewIngestDocumentSimulation() *IngestDocumentSimulation { + r := &IngestDocumentSimulation{ + IngestDocumentSimulation: make(map[string]string), + Source_: make(map[string]json.RawMessage), + } + + return r +} + +// false diff --git a/typedapi/types/ingestpipeline.go b/typedapi/types/ingestpipeline.go index d8f379404f..3515d9bd86 100644 --- a/typedapi/types/ingestpipeline.go +++ b/typedapi/types/ingestpipeline.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IngestPipeline type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Pipeline.ts#L23-L51 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Pipeline.ts#L23-L51 type IngestPipeline struct { // Deprecated Marks this ingest pipeline as deprecated. // When a deprecated ingest pipeline is referenced as the default or final @@ -124,3 +124,13 @@ func NewIngestPipeline() *IngestPipeline { return r } + +// true + +type IngestPipelineVariant interface { + IngestPipelineCaster() *IngestPipeline +} + +func (s *IngestPipeline) IngestPipelineCaster() *IngestPipeline { + return s +} diff --git a/typedapi/types/ingestpipelineparams.go b/typedapi/types/ingestpipelineparams.go index d931dcebeb..e3a554034e 100644 --- a/typedapi/types/ingestpipelineparams.go +++ b/typedapi/types/ingestpipelineparams.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IngestPipelineParams type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L148-L153 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L148-L153 type IngestPipelineParams struct { ExtractBinaryContent bool `json:"extract_binary_content"` Name string `json:"name"` @@ -119,3 +119,13 @@ func NewIngestPipelineParams() *IngestPipelineParams { return r } + +// true + +type IngestPipelineParamsVariant interface { + IngestPipelineParamsCaster() *IngestPipelineParams +} + +func (s *IngestPipelineParams) IngestPipelineParamsCaster() *IngestPipelineParams { + return s +} diff --git a/typedapi/types/ingeststats.go b/typedapi/types/ingeststats.go index e4d7dc07d5..c6e7d3b79c 100644 --- a/typedapi/types/ingeststats.go +++ b/typedapi/types/ingeststats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IngestStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L356-L394 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L356-L394 type IngestStats struct { // Count Total number of documents ingested during the lifetime of this node. Count int64 `json:"count"` @@ -173,3 +173,5 @@ func NewIngestStats() *IngestStats { return r } + +// false diff --git a/typedapi/types/ingesttotal.go b/typedapi/types/ingesttotal.go index 2de9571816..9a9130d93c 100644 --- a/typedapi/types/ingesttotal.go +++ b/typedapi/types/ingesttotal.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IngestTotal type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L396-L413 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L396-L413 type IngestTotal struct { // Count Total number of documents ingested during the lifetime of this node. Count int64 `json:"count"` @@ -120,3 +120,5 @@ func NewIngestTotal() *IngestTotal { return r } + +// false diff --git a/typedapi/types/inlineget.go b/typedapi/types/inlineget.go index 06aff1d85e..7748ce6295 100644 --- a/typedapi/types/inlineget.go +++ b/typedapi/types/inlineget.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // InlineGet type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L321-L334 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L317-L330 type InlineGet struct { Fields map[string]json.RawMessage `json:"fields,omitempty"` Found bool `json:"found"` @@ -159,9 +159,11 @@ func (s InlineGet) MarshalJSON() ([]byte, error) { // NewInlineGet returns a InlineGet. func NewInlineGet() *InlineGet { r := &InlineGet{ - Fields: make(map[string]json.RawMessage, 0), - Metadata: make(map[string]json.RawMessage, 0), + Fields: make(map[string]json.RawMessage), + Metadata: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/inlinegetdictuserdefined.go b/typedapi/types/inlinegetdictuserdefined.go index 58543cbb74..d8e2c11600 100644 --- a/typedapi/types/inlinegetdictuserdefined.go +++ b/typedapi/types/inlinegetdictuserdefined.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // InlineGetDictUserDefined type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L321-L334 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L317-L330 type InlineGetDictUserDefined struct { Fields map[string]json.RawMessage `json:"fields,omitempty"` Found bool `json:"found"` @@ -162,10 +162,12 @@ func (s InlineGetDictUserDefined) MarshalJSON() ([]byte, error) { // NewInlineGetDictUserDefined returns a InlineGetDictUserDefined. func NewInlineGetDictUserDefined() *InlineGetDictUserDefined { r := &InlineGetDictUserDefined{ - Fields: make(map[string]json.RawMessage, 0), - InlineGetDictUserDefined: make(map[string]json.RawMessage, 0), - Source_: make(map[string]json.RawMessage, 0), + Fields: make(map[string]json.RawMessage), + InlineGetDictUserDefined: make(map[string]json.RawMessage), + Source_: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/innerhits.go b/typedapi/types/innerhits.go index 720edfd1b3..7a2a75794e 100644 --- a/typedapi/types/innerhits.go +++ b/typedapi/types/innerhits.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // InnerHits type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/hits.ts#L108-L142 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/hits.ts#L107-L141 type InnerHits struct { Collapse *FieldCollapse `json:"collapse,omitempty"` DocvalueFields []FieldAndFormat `json:"docvalue_fields,omitempty"` @@ -294,8 +294,18 @@ func (s *InnerHits) UnmarshalJSON(data []byte) error { // NewInnerHits returns a InnerHits. func NewInnerHits() *InnerHits { r := &InnerHits{ - ScriptFields: make(map[string]ScriptField, 0), + ScriptFields: make(map[string]ScriptField), } return r } + +// true + +type InnerHitsVariant interface { + InnerHitsCaster() *InnerHits +} + +func (s *InnerHits) InnerHitsCaster() *InnerHits { + return s +} diff --git a/typedapi/types/innerhitsresult.go b/typedapi/types/innerhitsresult.go index 0b46308c04..4e63695b36 100644 --- a/typedapi/types/innerhitsresult.go +++ b/typedapi/types/innerhitsresult.go @@ -16,15 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // InnerHitsResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/hits.ts#L86-L88 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/hits.ts#L85-L87 type InnerHitsResult struct { - Hits *HitsMetadata `json:"hits,omitempty"` + Hits HitsMetadata `json:"hits"` } // NewInnerHitsResult returns a InnerHitsResult. @@ -33,3 +33,5 @@ func NewInnerHitsResult() *InnerHitsResult { return r } + +// false diff --git a/typedapi/types/inprogress.go b/typedapi/types/inprogress.go index f801d6ca5f..700baef2d4 100644 --- a/typedapi/types/inprogress.go +++ b/typedapi/types/inprogress.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // InProgress type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/_types/SnapshotLifecycle.ts#L131-L136 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/_types/SnapshotLifecycle.ts#L141-L146 type InProgress struct { Name string `json:"name"` StartTimeMillis int64 `json:"start_time_millis"` @@ -92,3 +92,5 @@ func NewInProgress() *InProgress { return r } + +// false diff --git a/typedapi/types/input.go b/typedapi/types/input.go index 7ed7fe1875..f70472257e 100644 --- a/typedapi/types/input.go +++ b/typedapi/types/input.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // Input type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model/types.ts#L56-L58 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model/types.ts#L56-L58 type Input struct { FieldNames []string `json:"field_names"` } @@ -77,3 +77,13 @@ func NewInput() *Input { return r } + +// true + +type InputVariant interface { + InputCaster() *Input +} + +func (s *Input) InputCaster() *Input { + return s +} diff --git a/typedapi/types/integernumberproperty.go b/typedapi/types/integernumberproperty.go index b70fcad347..6c2c75c476 100644 --- a/typedapi/types/integernumberproperty.go +++ b/typedapi/types/integernumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // IntegerNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L157-L160 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L161-L164 type IntegerNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,12 +48,13 @@ type IntegerNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *int `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *int `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -161,301 +163,313 @@ func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -553,301 +567,313 @@ func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -872,6 +898,11 @@ func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -920,6 +951,7 @@ func (s IntegerNumberProperty) MarshalJSON() ([]byte, error) { Properties: s.Properties, Script: s.Script, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -933,10 +965,20 @@ func (s IntegerNumberProperty) MarshalJSON() ([]byte, error) { // NewIntegerNumberProperty returns a IntegerNumberProperty. func NewIntegerNumberProperty() *IntegerNumberProperty { r := &IntegerNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type IntegerNumberPropertyVariant interface { + IntegerNumberPropertyCaster() *IntegerNumberProperty +} + +func (s *IntegerNumberProperty) IntegerNumberPropertyCaster() *IntegerNumberProperty { + return s +} diff --git a/typedapi/types/integerrangeproperty.go b/typedapi/types/integerrangeproperty.go index 5f7882ad7f..d06e15a41c 100644 --- a/typedapi/types/integerrangeproperty.go +++ b/typedapi/types/integerrangeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // IntegerRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/range.ts#L42-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/range.ts#L42-L44 type IntegerRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -44,10 +45,11 @@ type IntegerRangeProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { @@ -149,301 +151,313 @@ func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -506,301 +520,313 @@ func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -820,6 +846,11 @@ func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -834,18 +865,19 @@ func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { func (s IntegerRangeProperty) MarshalJSON() ([]byte, error) { type innerIntegerRangeProperty IntegerRangeProperty tmp := innerIntegerRangeProperty{ - Boost: s.Boost, - Coerce: s.Coerce, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Index: s.Index, - Meta: s.Meta, - Properties: s.Properties, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "integer_range" @@ -856,10 +888,20 @@ func (s IntegerRangeProperty) MarshalJSON() ([]byte, error) { // NewIntegerRangeProperty returns a IntegerRangeProperty. func NewIntegerRangeProperty() *IntegerRangeProperty { r := &IntegerRangeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type IntegerRangePropertyVariant interface { + IntegerRangePropertyCaster() *IntegerRangeProperty +} + +func (s *IntegerRangeProperty) IntegerRangePropertyCaster() *IntegerRangeProperty { + return s +} diff --git a/typedapi/types/intervals.go b/typedapi/types/intervals.go index 28e33bbefd..1cfa29e7f9 100644 --- a/typedapi/types/intervals.go +++ b/typedapi/types/intervals.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // Intervals type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L83-L110 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L83-L110 type Intervals struct { + AdditionalIntervalsProperty map[string]json.RawMessage `json:"-"` // AllOf Returns matches that span a combination of other rules. AllOf *IntervalsAllOf `json:"all_of,omitempty"` // AnyOf Returns intervals produced by any of its sub-rules. @@ -38,9 +44,50 @@ type Intervals struct { Wildcard *IntervalsWildcard `json:"wildcard,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s Intervals) MarshalJSON() ([]byte, error) { + type opt Intervals + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalIntervalsProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalIntervalsProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewIntervals returns a Intervals. func NewIntervals() *Intervals { - r := &Intervals{} + r := &Intervals{ + AdditionalIntervalsProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type IntervalsVariant interface { + IntervalsCaster() *Intervals +} + +func (s *Intervals) IntervalsCaster() *Intervals { + return s +} diff --git a/typedapi/types/intervalsallof.go b/typedapi/types/intervalsallof.go index 70265151f5..5a08a5b67e 100644 --- a/typedapi/types/intervalsallof.go +++ b/typedapi/types/intervalsallof.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IntervalsAllOf type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L50-L70 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L50-L70 type IntervalsAllOf struct { // Filter Rule used to filter returned intervals. Filter *IntervalsFilter `json:"filter,omitempty"` @@ -113,3 +113,13 @@ func NewIntervalsAllOf() *IntervalsAllOf { return r } + +// true + +type IntervalsAllOfVariant interface { + IntervalsAllOfCaster() *IntervalsAllOf +} + +func (s *IntervalsAllOf) IntervalsAllOfCaster() *IntervalsAllOf { + return s +} diff --git a/typedapi/types/intervalsanyof.go b/typedapi/types/intervalsanyof.go index d8d2043aef..2eb0441677 100644 --- a/typedapi/types/intervalsanyof.go +++ b/typedapi/types/intervalsanyof.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // IntervalsAnyOf type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L72-L81 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L72-L81 type IntervalsAnyOf struct { // Filter Rule used to filter returned intervals. Filter *IntervalsFilter `json:"filter,omitempty"` @@ -36,3 +36,13 @@ func NewIntervalsAnyOf() *IntervalsAnyOf { return r } + +// true + +type IntervalsAnyOfVariant interface { + IntervalsAnyOfCaster() *IntervalsAnyOf +} + +func (s *IntervalsAnyOf) IntervalsAnyOfCaster() *IntervalsAnyOf { + return s +} diff --git a/typedapi/types/intervalsfilter.go b/typedapi/types/intervalsfilter.go index e56db98eed..19a61448a4 100644 --- a/typedapi/types/intervalsfilter.go +++ b/typedapi/types/intervalsfilter.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // IntervalsFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L112-L152 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L112-L152 type IntervalsFilter struct { + AdditionalIntervalsFilterProperty map[string]json.RawMessage `json:"-"` // After Query used to return intervals that follow an interval from the `filter` // rule. After *Intervals `json:"after,omitempty"` @@ -53,9 +59,50 @@ type IntervalsFilter struct { Script *Script `json:"script,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s IntervalsFilter) MarshalJSON() ([]byte, error) { + type opt IntervalsFilter + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalIntervalsFilterProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalIntervalsFilterProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewIntervalsFilter returns a IntervalsFilter. func NewIntervalsFilter() *IntervalsFilter { - r := &IntervalsFilter{} + r := &IntervalsFilter{ + AdditionalIntervalsFilterProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type IntervalsFilterVariant interface { + IntervalsFilterCaster() *IntervalsFilter +} + +func (s *IntervalsFilter) IntervalsFilterCaster() *IntervalsFilter { + return s +} diff --git a/typedapi/types/intervalsfuzzy.go b/typedapi/types/intervalsfuzzy.go index 627f40f301..014a3095c6 100644 --- a/typedapi/types/intervalsfuzzy.go +++ b/typedapi/types/intervalsfuzzy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IntervalsFuzzy type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L154-L184 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L154-L184 type IntervalsFuzzy struct { // Analyzer Analyzer used to normalize the term. Analyzer *string `json:"analyzer,omitempty"` @@ -141,3 +141,13 @@ func NewIntervalsFuzzy() *IntervalsFuzzy { return r } + +// true + +type IntervalsFuzzyVariant interface { + IntervalsFuzzyCaster() *IntervalsFuzzy +} + +func (s *IntervalsFuzzy) IntervalsFuzzyCaster() *IntervalsFuzzy { + return s +} diff --git a/typedapi/types/intervalsmatch.go b/typedapi/types/intervalsmatch.go index a30a57e3b0..db58df440d 100644 --- a/typedapi/types/intervalsmatch.go +++ b/typedapi/types/intervalsmatch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IntervalsMatch type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L186-L216 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L186-L216 type IntervalsMatch struct { // Analyzer Analyzer used to analyze terms in the query. Analyzer *string `json:"analyzer,omitempty"` @@ -141,3 +141,13 @@ func NewIntervalsMatch() *IntervalsMatch { return r } + +// true + +type IntervalsMatchVariant interface { + IntervalsMatchCaster() *IntervalsMatch +} + +func (s *IntervalsMatch) IntervalsMatchCaster() *IntervalsMatch { + return s +} diff --git a/typedapi/types/intervalsprefix.go b/typedapi/types/intervalsprefix.go index 0ea1c68f2e..13e5cf8582 100644 --- a/typedapi/types/intervalsprefix.go +++ b/typedapi/types/intervalsprefix.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IntervalsPrefix type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L218-L233 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L218-L233 type IntervalsPrefix struct { // Analyzer Analyzer used to analyze the `prefix`. Analyzer *string `json:"analyzer,omitempty"` @@ -99,3 +99,13 @@ func NewIntervalsPrefix() *IntervalsPrefix { return r } + +// true + +type IntervalsPrefixVariant interface { + IntervalsPrefixCaster() *IntervalsPrefix +} + +func (s *IntervalsPrefix) IntervalsPrefixCaster() *IntervalsPrefix { + return s +} diff --git a/typedapi/types/intervalsquery.go b/typedapi/types/intervalsquery.go index 60ac19b336..e19c0065c8 100644 --- a/typedapi/types/intervalsquery.go +++ b/typedapi/types/intervalsquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,8 +31,9 @@ import ( // IntervalsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L235-L266 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L235-L266 type IntervalsQuery struct { + AdditionalIntervalsQueryProperty map[string]json.RawMessage `json:"-"` // AllOf Returns matches that span a combination of other rules. AllOf *IntervalsAllOf `json:"all_of,omitempty"` // AnyOf Returns intervals produced by any of its sub-rules. @@ -128,14 +129,68 @@ func (s *IntervalsQuery) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Wildcard", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalIntervalsQueryProperty == nil { + s.AdditionalIntervalsQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalIntervalsQueryProperty", err) + } + s.AdditionalIntervalsQueryProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s IntervalsQuery) MarshalJSON() ([]byte, error) { + type opt IntervalsQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalIntervalsQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalIntervalsQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewIntervalsQuery returns a IntervalsQuery. func NewIntervalsQuery() *IntervalsQuery { - r := &IntervalsQuery{} + r := &IntervalsQuery{ + AdditionalIntervalsQueryProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type IntervalsQueryVariant interface { + IntervalsQueryCaster() *IntervalsQuery +} + +func (s *IntervalsQuery) IntervalsQueryCaster() *IntervalsQuery { + return s +} diff --git a/typedapi/types/intervalswildcard.go b/typedapi/types/intervalswildcard.go index 8677e19bd7..0208ec546d 100644 --- a/typedapi/types/intervalswildcard.go +++ b/typedapi/types/intervalswildcard.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IntervalsWildcard type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L268-L283 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L268-L283 type IntervalsWildcard struct { // Analyzer Analyzer used to analyze the `pattern`. // Defaults to the top-level field's analyzer. @@ -100,3 +100,13 @@ func NewIntervalsWildcard() *IntervalsWildcard { return r } + +// true + +type IntervalsWildcardVariant interface { + IntervalsWildcardCaster() *IntervalsWildcard +} + +func (s *IntervalsWildcard) IntervalsWildcardCaster() *IntervalsWildcard { + return s +} diff --git a/typedapi/types/invertedindex.go b/typedapi/types/invertedindex.go index 7130d37510..1c264aa188 100644 --- a/typedapi/types/invertedindex.go +++ b/typedapi/types/invertedindex.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // InvertedIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L68-L76 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L68-L76 type InvertedIndex struct { Offsets uint `json:"offsets"` Payloads uint `json:"payloads"` @@ -39,3 +39,5 @@ func NewInvertedIndex() *InvertedIndex { return r } + +// false diff --git a/typedapi/types/invocation.go b/typedapi/types/invocation.go index e2bc3db033..fbbe839af5 100644 --- a/typedapi/types/invocation.go +++ b/typedapi/types/invocation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // Invocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/_types/SnapshotLifecycle.ts#L138-L141 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/_types/SnapshotLifecycle.ts#L148-L151 type Invocation struct { SnapshotName string `json:"snapshot_name"` Time DateTime `json:"time"` @@ -72,3 +72,5 @@ func NewInvocation() *Invocation { return r } + +// false diff --git a/typedapi/types/invocations.go b/typedapi/types/invocations.go index 8e013929e7..c6f50d706d 100644 --- a/typedapi/types/invocations.go +++ b/typedapi/types/invocations.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Invocations type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L42-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L41-L43 type Invocations struct { Total int64 `json:"total"` } @@ -77,3 +77,5 @@ func NewInvocations() *Invocations { return r } + +// false diff --git a/typedapi/types/iostatdevice.go b/typedapi/types/iostatdevice.go index 21edf8c97c..562a8c7da4 100644 --- a/typedapi/types/iostatdevice.go +++ b/typedapi/types/iostatdevice.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IoStatDevice type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L801-L826 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L801-L826 type IoStatDevice struct { // DeviceName The Linux device name. DeviceName *string `json:"device_name,omitempty"` @@ -165,3 +165,5 @@ func NewIoStatDevice() *IoStatDevice { return r } + +// false diff --git a/typedapi/types/iostats.go b/typedapi/types/iostats.go index b34125dc2b..5bad37f72f 100644 --- a/typedapi/types/iostats.go +++ b/typedapi/types/iostats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // IoStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L789-L799 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L789-L799 type IoStats struct { // Devices Array of disk metrics for each device that is backing an Elasticsearch data // path. @@ -40,3 +40,5 @@ func NewIoStats() *IoStats { return r } + +// false diff --git a/typedapi/types/ipfilter.go b/typedapi/types/ipfilter.go index 5b5b9a1809..b53ddd9ef3 100644 --- a/typedapi/types/ipfilter.go +++ b/typedapi/types/ipfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IpFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L165-L168 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L177-L180 type IpFilter struct { Http bool `json:"http"` Transport bool `json:"transport"` @@ -91,3 +91,5 @@ func NewIpFilter() *IpFilter { return r } + +// false diff --git a/typedapi/types/ipinfo.go b/typedapi/types/ipinfo.go new file mode 100644 index 0000000000..26c0769087 --- /dev/null +++ b/typedapi/types/ipinfo.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +// Ipinfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Database.ts#L59-L59 +type Ipinfo struct { +} + +// NewIpinfo returns a Ipinfo. +func NewIpinfo() *Ipinfo { + r := &Ipinfo{} + + return r +} + +// true + +type IpinfoVariant interface { + IpinfoCaster() *Ipinfo +} + +func (s *Ipinfo) IpinfoCaster() *Ipinfo { + return s +} diff --git a/typedapi/types/iplocationdatabaseconfigurationmetadata.go b/typedapi/types/iplocationdatabaseconfigurationmetadata.go new file mode 100644 index 0000000000..aa8c0f66d2 --- /dev/null +++ b/typedapi/types/iplocationdatabaseconfigurationmetadata.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IpLocationDatabaseConfigurationMetadata type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/get_ip_location_database/GetIpLocationDatabaseResponse.ts#L28-L34 +type IpLocationDatabaseConfigurationMetadata struct { + Database DatabaseConfigurationFull `json:"database"` + Id string `json:"id"` + ModifiedDate *int64 `json:"modified_date,omitempty"` + ModifiedDateMillis *int64 `json:"modified_date_millis,omitempty"` + Version int64 `json:"version"` +} + +func (s *IpLocationDatabaseConfigurationMetadata) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "database": + if err := dec.Decode(&s.Database); err != nil { + return fmt.Errorf("%s | %w", "Database", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "modified_date": + if err := dec.Decode(&s.ModifiedDate); err != nil { + return fmt.Errorf("%s | %w", "ModifiedDate", err) + } + + case "modified_date_millis": + if err := dec.Decode(&s.ModifiedDateMillis); err != nil { + return fmt.Errorf("%s | %w", "ModifiedDateMillis", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewIpLocationDatabaseConfigurationMetadata returns a IpLocationDatabaseConfigurationMetadata. +func NewIpLocationDatabaseConfigurationMetadata() *IpLocationDatabaseConfigurationMetadata { + r := &IpLocationDatabaseConfigurationMetadata{} + + return r +} + +// false diff --git a/typedapi/types/iplocationprocessor.go b/typedapi/types/iplocationprocessor.go new file mode 100644 index 0000000000..e34e0c34cb --- /dev/null +++ b/typedapi/types/iplocationprocessor.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IpLocationProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L479-L513 +type IpLocationProcessor struct { + // DatabaseFile The database filename referring to a database the module ships with + // (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom + // database in the ingest-geoip config directory. + DatabaseFile *string `json:"database_file,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // DownloadDatabaseOnPipelineCreation If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the + // missing database is downloaded when the pipeline is created. + // Else, the download is triggered by when the pipeline is used as the + // `default_pipeline` or `final_pipeline` in an index. + DownloadDatabaseOnPipelineCreation *bool `json:"download_database_on_pipeline_creation,omitempty"` + // Field The field to get the ip address from for the geographical lookup. + Field string `json:"field"` + // FirstOnly If `true`, only the first found IP location data will be returned, even if + // the field contains an array. + FirstOnly *bool `json:"first_only,omitempty"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Properties Controls what properties are added to the `target_field` based on the IP + // location lookup. + Properties []string `json:"properties,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field that will hold the geographical information looked up from the + // MaxMind database. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *IpLocationProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "database_file": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DatabaseFile", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DatabaseFile = &o + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "download_database_on_pipeline_creation": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DownloadDatabaseOnPipelineCreation", err) + } + s.DownloadDatabaseOnPipelineCreation = &value + case bool: + s.DownloadDatabaseOnPipelineCreation = &v + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "first_only": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "FirstOnly", err) + } + s.FirstOnly = &value + case bool: + s.FirstOnly = &v + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "properties": + if err := dec.Decode(&s.Properties); err != nil { + return fmt.Errorf("%s | %w", "Properties", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewIpLocationProcessor returns a IpLocationProcessor. +func NewIpLocationProcessor() *IpLocationProcessor { + r := &IpLocationProcessor{} + + return r +} + +// true + +type IpLocationProcessorVariant interface { + IpLocationProcessorCaster() *IpLocationProcessor +} + +func (s *IpLocationProcessor) IpLocationProcessorCaster() *IpLocationProcessor { + return s +} diff --git a/typedapi/types/ipprefixaggregate.go b/typedapi/types/ipprefixaggregate.go index e26719364c..046bba7acc 100644 --- a/typedapi/types/ipprefixaggregate.go +++ b/typedapi/types/ipprefixaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // IpPrefixAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L709-L713 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L709-L713 type IpPrefixAggregate struct { Buckets BucketsIpPrefixBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewIpPrefixAggregate() *IpPrefixAggregate { return r } + +// false diff --git a/typedapi/types/ipprefixaggregation.go b/typedapi/types/ipprefixaggregation.go index e289cc276a..039cf4c1a7 100644 --- a/typedapi/types/ipprefixaggregation.go +++ b/typedapi/types/ipprefixaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IpPrefixAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L1195-L1224 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L1195-L1224 type IpPrefixAggregation struct { // AppendPrefixLength Defines whether the prefix length is appended to IP address keys in the // response. @@ -156,3 +156,13 @@ func NewIpPrefixAggregation() *IpPrefixAggregation { return r } + +// true + +type IpPrefixAggregationVariant interface { + IpPrefixAggregationCaster() *IpPrefixAggregation +} + +func (s *IpPrefixAggregation) IpPrefixAggregationCaster() *IpPrefixAggregation { + return s +} diff --git a/typedapi/types/ipprefixbucket.go b/typedapi/types/ipprefixbucket.go index 8e44175036..3b722f847e 100644 --- a/typedapi/types/ipprefixbucket.go +++ b/typedapi/types/ipprefixbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // IpPrefixBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L715-L720 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L715-L720 type IpPrefixBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -683,8 +683,10 @@ func (s IpPrefixBucket) MarshalJSON() ([]byte, error) { // NewIpPrefixBucket returns a IpPrefixBucket. func NewIpPrefixBucket() *IpPrefixBucket { r := &IpPrefixBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/ipproperty.go b/typedapi/types/ipproperty.go index dd3a80b97c..1228c32a8a 100644 --- a/typedapi/types/ipproperty.go +++ b/typedapi/types/ipproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,11 +30,12 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // IpProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/specialized.ts#L65-L79 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/specialized.ts#L74-L88 type IpProperty struct { Boost *Float64 `json:"boost,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -45,12 +46,13 @@ type IpProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *string `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *string `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -142,301 +144,313 @@ func (s *IpProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -530,301 +544,313 @@ func (s *IpProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -849,6 +875,11 @@ func (s *IpProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -891,6 +922,7 @@ func (s IpProperty) MarshalJSON() ([]byte, error) { Properties: s.Properties, Script: s.Script, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, Type: s.Type, } @@ -903,10 +935,20 @@ func (s IpProperty) MarshalJSON() ([]byte, error) { // NewIpProperty returns a IpProperty. func NewIpProperty() *IpProperty { r := &IpProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type IpPropertyVariant interface { + IpPropertyCaster() *IpProperty +} + +func (s *IpProperty) IpPropertyCaster() *IpProperty { + return s +} diff --git a/typedapi/types/iprangeaggregate.go b/typedapi/types/iprangeaggregate.go index 2ab850f255..183c92b064 100644 --- a/typedapi/types/iprangeaggregate.go +++ b/typedapi/types/iprangeaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // IpRangeAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L624-L629 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L624-L629 type IpRangeAggregate struct { Buckets BucketsIpRangeBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewIpRangeAggregate() *IpRangeAggregate { return r } + +// false diff --git a/typedapi/types/iprangeaggregation.go b/typedapi/types/iprangeaggregation.go index 3e417c03ff..23976b3bc4 100644 --- a/typedapi/types/iprangeaggregation.go +++ b/typedapi/types/iprangeaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // IpRangeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L567-L576 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L567-L576 type IpRangeAggregation struct { // Field The date field whose values are used to build ranges. Field *string `json:"field,omitempty"` @@ -74,3 +74,13 @@ func NewIpRangeAggregation() *IpRangeAggregation { return r } + +// true + +type IpRangeAggregationVariant interface { + IpRangeAggregationCaster() *IpRangeAggregation +} + +func (s *IpRangeAggregation) IpRangeAggregationCaster() *IpRangeAggregation { + return s +} diff --git a/typedapi/types/iprangeaggregationrange.go b/typedapi/types/iprangeaggregationrange.go index a481cdece4..f3d28cbe6b 100644 --- a/typedapi/types/iprangeaggregationrange.go +++ b/typedapi/types/iprangeaggregationrange.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IpRangeAggregationRange type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L578-L591 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L578-L591 type IpRangeAggregationRange struct { // From Start of the range. From *string `json:"from,omitempty"` @@ -103,3 +103,13 @@ func NewIpRangeAggregationRange() *IpRangeAggregationRange { return r } + +// true + +type IpRangeAggregationRangeVariant interface { + IpRangeAggregationRangeCaster() *IpRangeAggregationRange +} + +func (s *IpRangeAggregationRange) IpRangeAggregationRangeCaster() *IpRangeAggregationRange { + return s +} diff --git a/typedapi/types/iprangebucket.go b/typedapi/types/iprangebucket.go index 4ddcb656b6..fcbd10a089 100644 --- a/typedapi/types/iprangebucket.go +++ b/typedapi/types/iprangebucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // IpRangeBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L631-L635 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L631-L635 type IpRangeBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -664,8 +664,10 @@ func (s IpRangeBucket) MarshalJSON() ([]byte, error) { // NewIpRangeBucket returns a IpRangeBucket. func NewIpRangeBucket() *IpRangeBucket { r := &IpRangeBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/iprangeproperty.go b/typedapi/types/iprangeproperty.go index fe8f859809..73b001853c 100644 --- a/typedapi/types/iprangeproperty.go +++ b/typedapi/types/iprangeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // IpRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/range.ts#L46-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/range.ts#L46-L48 type IpRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -44,10 +45,11 @@ type IpRangeProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { @@ -149,301 +151,313 @@ func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -506,301 +520,313 @@ func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -820,6 +846,11 @@ func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -834,18 +865,19 @@ func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { func (s IpRangeProperty) MarshalJSON() ([]byte, error) { type innerIpRangeProperty IpRangeProperty tmp := innerIpRangeProperty{ - Boost: s.Boost, - Coerce: s.Coerce, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Index: s.Index, - Meta: s.Meta, - Properties: s.Properties, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "ip_range" @@ -856,10 +888,20 @@ func (s IpRangeProperty) MarshalJSON() ([]byte, error) { // NewIpRangeProperty returns a IpRangeProperty. func NewIpRangeProperty() *IpRangeProperty { r := &IpRangeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type IpRangePropertyVariant interface { + IpRangePropertyCaster() *IpRangeProperty +} + +func (s *IpRangeProperty) IpRangePropertyCaster() *IpRangeProperty { + return s +} diff --git a/typedapi/types/irishanalyzer.go b/typedapi/types/irishanalyzer.go index 883b1cbf3c..5e7b0994c8 100644 --- a/typedapi/types/irishanalyzer.go +++ b/typedapi/types/irishanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // IrishAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L209-L214 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L220-L225 type IrishAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewIrishAnalyzer() *IrishAnalyzer { return r } + +// true + +type IrishAnalyzerVariant interface { + IrishAnalyzerCaster() *IrishAnalyzer +} + +func (s *IrishAnalyzer) IrishAnalyzerCaster() *IrishAnalyzer { + return s +} diff --git a/typedapi/types/italiananalyzer.go b/typedapi/types/italiananalyzer.go index 5dde378622..85e61b62e8 100644 --- a/typedapi/types/italiananalyzer.go +++ b/typedapi/types/italiananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ItalianAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L216-L221 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L227-L232 type ItalianAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewItalianAnalyzer() *ItalianAnalyzer { return r } + +// true + +type ItalianAnalyzerVariant interface { + ItalianAnalyzerCaster() *ItalianAnalyzer +} + +func (s *ItalianAnalyzer) ItalianAnalyzerCaster() *ItalianAnalyzer { + return s +} diff --git a/typedapi/types/job.go b/typedapi/types/job.go index 14abae9ad1..6d48d8c816 100644 --- a/typedapi/types/job.go +++ b/typedapi/types/job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Job type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Job.ts#L61-L180 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Job.ts#L61-L180 type Job struct { // AllowLazyOpen Advanced configuration option. // Specifies whether this job can open when there is insufficient machine @@ -351,3 +351,5 @@ func NewJob() *Job { return r } + +// false diff --git a/typedapi/types/jobblocked.go b/typedapi/types/jobblocked.go index 9989be096b..94dd0ac08b 100644 --- a/typedapi/types/jobblocked.go +++ b/typedapi/types/jobblocked.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // JobBlocked type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Job.ts#L392-L395 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Job.ts#L392-L395 type JobBlocked struct { Reason jobblockedreason.JobBlockedReason `json:"reason"` TaskId TaskId `json:"task_id,omitempty"` @@ -74,3 +74,5 @@ func NewJobBlocked() *JobBlocked { return r } + +// false diff --git a/typedapi/types/jobconfig.go b/typedapi/types/jobconfig.go index 2a56ef3195..eb802c5922 100644 --- a/typedapi/types/jobconfig.go +++ b/typedapi/types/jobconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // JobConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Job.ts#L182-L283 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Job.ts#L182-L283 type JobConfig struct { // AllowLazyOpen Advanced configuration option. Specifies whether this job can open when there // is insufficient machine learning node capacity for it to be immediately @@ -293,3 +293,13 @@ func NewJobConfig() *JobConfig { return r } + +// true + +type JobConfigVariant interface { + JobConfigCaster() *JobConfig +} + +func (s *JobConfig) JobConfigCaster() *JobConfig { + return s +} diff --git a/typedapi/types/jobforecaststatistics.go b/typedapi/types/jobforecaststatistics.go index a430c4775f..22012fb264 100644 --- a/typedapi/types/jobforecaststatistics.go +++ b/typedapi/types/jobforecaststatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // JobForecastStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Job.ts#L343-L350 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Job.ts#L343-L350 type JobForecastStatistics struct { ForecastedJobs int `json:"forecasted_jobs"` MemoryBytes *JobStatistics `json:"memory_bytes,omitempty"` @@ -118,8 +118,10 @@ func (s *JobForecastStatistics) UnmarshalJSON(data []byte) error { // NewJobForecastStatistics returns a JobForecastStatistics. func NewJobForecastStatistics() *JobForecastStatistics { r := &JobForecastStatistics{ - Status: make(map[string]int64, 0), + Status: make(map[string]int64), } return r } + +// false diff --git a/typedapi/types/jobsrecord.go b/typedapi/types/jobsrecord.go index 7cf5538774..a2799aeac5 100644 --- a/typedapi/types/jobsrecord.go +++ b/typedapi/types/jobsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -35,7 +35,7 @@ import ( // JobsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/ml_jobs/types.ts#L24-L347 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/ml_jobs/types.ts#L24-L347 type JobsRecord struct { // AssignmentExplanation For open anomaly detection jobs only, contains messages relating to the // selection of a node to run the job. @@ -909,3 +909,5 @@ func NewJobsRecord() *JobsRecord { return r } + +// false diff --git a/typedapi/types/jobstatistics.go b/typedapi/types/jobstatistics.go index 9ae63aff16..da5c933c73 100644 --- a/typedapi/types/jobstatistics.go +++ b/typedapi/types/jobstatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // JobStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Job.ts#L54-L59 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Job.ts#L54-L59 type JobStatistics struct { Avg Float64 `json:"avg"` Max Float64 `json:"max"` @@ -129,3 +129,5 @@ func NewJobStatistics() *JobStatistics { return r } + +// false diff --git a/typedapi/types/jobstats.go b/typedapi/types/jobstats.go index 94e122b2c6..80e09aa590 100644 --- a/typedapi/types/jobstats.go +++ b/typedapi/types/jobstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // JobStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Job.ts#L284-L330 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Job.ts#L284-L330 type JobStats struct { // AssignmentExplanation For open anomaly detection jobs only, contains messages relating to the // selection of a node to run the job. @@ -57,7 +57,7 @@ type JobStats struct { ModelSizeStats ModelSizeStats `json:"model_size_stats"` // Node Contains properties for the node that runs the job. // This information is available only for open jobs. - Node *DiscoveryNode `json:"node,omitempty"` + Node *DiscoveryNodeCompact `json:"node,omitempty"` // OpenTime For open jobs only, the elapsed time for which the job has been open. OpenTime DateTime `json:"open_time,omitempty"` // State The status of the anomaly detection job, which can be one of the following @@ -167,3 +167,5 @@ func NewJobStats() *JobStats { return r } + +// false diff --git a/typedapi/types/jobtimingstats.go b/typedapi/types/jobtimingstats.go index b78f8f6687..7843779444 100644 --- a/typedapi/types/jobtimingstats.go +++ b/typedapi/types/jobtimingstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // JobTimingStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Job.ts#L332-L341 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Job.ts#L332-L341 type JobTimingStats struct { AverageBucketProcessingTimeMs Float64 `json:"average_bucket_processing_time_ms,omitempty"` BucketCount int64 `json:"bucket_count"` @@ -119,3 +119,5 @@ func NewJobTimingStats() *JobTimingStats { return r } + +// false diff --git a/typedapi/types/jobusage.go b/typedapi/types/jobusage.go index 2bfe0271d4..39fc2a4380 100644 --- a/typedapi/types/jobusage.go +++ b/typedapi/types/jobusage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // JobUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L362-L368 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L370-L376 type JobUsage struct { Count int `json:"count"` CreatedBy map[string]int64 `json:"created_by"` @@ -102,8 +102,10 @@ func (s *JobUsage) UnmarshalJSON(data []byte) error { // NewJobUsage returns a JobUsage. func NewJobUsage() *JobUsage { r := &JobUsage{ - CreatedBy: make(map[string]int64, 0), + CreatedBy: make(map[string]int64), } return r } + +// false diff --git a/typedapi/types/joinprocessor.go b/typedapi/types/joinprocessor.go index 5c604b305b..ad7e72d9c1 100644 --- a/typedapi/types/joinprocessor.go +++ b/typedapi/types/joinprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // JoinProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1063-L1078 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1104-L1119 type JoinProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type JoinProcessor struct { // Field Field containing array values to join. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // OnFailure Handle failures for the processor. @@ -87,16 +87,9 @@ func (s *JoinProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -157,3 +150,13 @@ func NewJoinProcessor() *JoinProcessor { return r } + +// true + +type JoinProcessorVariant interface { + JoinProcessorCaster() *JoinProcessor +} + +func (s *JoinProcessor) JoinProcessorCaster() *JoinProcessor { + return s +} diff --git a/typedapi/types/joinproperty.go b/typedapi/types/joinproperty.go index abd0fb088c..7d14b0b387 100644 --- a/typedapi/types/joinproperty.go +++ b/typedapi/types/joinproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,21 +29,23 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // JoinProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L88-L92 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L92-L96 type JoinProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` EagerGlobalOrdinals *bool `json:"eager_global_ordinals,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Relations map[string][]string `json:"relations,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Relations map[string][]string `json:"relations,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *JoinProperty) UnmarshalJSON(data []byte) error { @@ -99,301 +101,313 @@ func (s *JoinProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -442,301 +456,313 @@ func (s *JoinProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -767,6 +793,11 @@ func (s *JoinProperty) UnmarshalJSON(data []byte) error { } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -788,6 +819,7 @@ func (s JoinProperty) MarshalJSON() ([]byte, error) { Meta: s.Meta, Properties: s.Properties, Relations: s.Relations, + SyntheticSourceKeep: s.SyntheticSourceKeep, Type: s.Type, } @@ -799,11 +831,21 @@ func (s JoinProperty) MarshalJSON() ([]byte, error) { // NewJoinProperty returns a JoinProperty. func NewJoinProperty() *JoinProperty { r := &JoinProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), - Relations: make(map[string][]string, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + Relations: make(map[string][]string), } return r } + +// true + +type JoinPropertyVariant interface { + JoinPropertyCaster() *JoinProperty +} + +func (s *JoinProperty) JoinPropertyCaster() *JoinProperty { + return s +} diff --git a/typedapi/types/jsonprocessor.go b/typedapi/types/jsonprocessor.go index b03eb4a442..5590819a7f 100644 --- a/typedapi/types/jsonprocessor.go +++ b/typedapi/types/jsonprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // JsonProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1080-L1109 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1121-L1150 type JsonProcessor struct { // AddToRoot Flag that forces the parsed JSON to be added at the top level of the // document. @@ -54,7 +54,7 @@ type JsonProcessor struct { // Field The field to be parsed. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // OnFailure Handle failures for the processor. @@ -133,16 +133,9 @@ func (s *JsonProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -191,3 +184,13 @@ func NewJsonProcessor() *JsonProcessor { return r } + +// true + +type JsonProcessorVariant interface { + JsonProcessorCaster() *JsonProcessor +} + +func (s *JsonProcessor) JsonProcessorCaster() *JsonProcessor { + return s +} diff --git a/typedapi/types/jvm.go b/typedapi/types/jvm.go index 2341185205..a8b30a75cc 100644 --- a/typedapi/types/jvm.go +++ b/typedapi/types/jvm.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Jvm type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L882-L916 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L882-L916 type Jvm struct { // BufferPools Contains statistics about JVM buffer pools for the node. BufferPools map[string]NodeBufferPool `json:"buffer_pools,omitempty"` @@ -145,8 +145,10 @@ func (s *Jvm) UnmarshalJSON(data []byte) error { // NewJvm returns a Jvm. func NewJvm() *Jvm { r := &Jvm{ - BufferPools: make(map[string]NodeBufferPool, 0), + BufferPools: make(map[string]NodeBufferPool), } return r } + +// false diff --git a/typedapi/types/jvmclasses.go b/typedapi/types/jvmclasses.go index e898eb3746..cf43512ac0 100644 --- a/typedapi/types/jvmclasses.go +++ b/typedapi/types/jvmclasses.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // JvmClasses type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L979-L992 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L979-L992 type JvmClasses struct { // CurrentLoadedCount Number of classes currently loaded by JVM. CurrentLoadedCount *int64 `json:"current_loaded_count,omitempty"` @@ -112,3 +112,5 @@ func NewJvmClasses() *JvmClasses { return r } + +// false diff --git a/typedapi/types/jvmmemorystats.go b/typedapi/types/jvmmemorystats.go index 5cc88c2364..ad6d205600 100644 --- a/typedapi/types/jvmmemorystats.go +++ b/typedapi/types/jvmmemorystats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // JvmMemoryStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L918-L947 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L918-L947 type JvmMemoryStats struct { // HeapCommittedInBytes Amount of memory, in bytes, available for use by the heap. HeapCommittedInBytes *int64 `json:"heap_committed_in_bytes,omitempty"` @@ -170,8 +170,10 @@ func (s *JvmMemoryStats) UnmarshalJSON(data []byte) error { // NewJvmMemoryStats returns a JvmMemoryStats. func NewJvmMemoryStats() *JvmMemoryStats { r := &JvmMemoryStats{ - Pools: make(map[string]Pool, 0), + Pools: make(map[string]Pool), } return r } + +// false diff --git a/typedapi/types/jvmstats.go b/typedapi/types/jvmstats.go index ad169b9373..2e9698b9c1 100644 --- a/typedapi/types/jvmstats.go +++ b/typedapi/types/jvmstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // JvmStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_memory_stats/types.ts#L50-L63 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_memory_stats/types.ts#L50-L63 type JvmStats struct { // HeapMax Maximum amount of memory available for use by the heap. HeapMax ByteSize `json:"heap_max,omitempty"` @@ -138,3 +138,5 @@ func NewJvmStats() *JvmStats { return r } + +// false diff --git a/typedapi/types/jvmthreads.go b/typedapi/types/jvmthreads.go index 4214f7201f..c604fc2ea0 100644 --- a/typedapi/types/jvmthreads.go +++ b/typedapi/types/jvmthreads.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // JvmThreads type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L968-L977 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L968-L977 type JvmThreads struct { // Count Number of active threads in use by JVM. Count *int64 `json:"count,omitempty"` @@ -95,3 +95,5 @@ func NewJvmThreads() *JvmThreads { return r } + +// false diff --git a/typedapi/types/keeptypestokenfilter.go b/typedapi/types/keeptypestokenfilter.go index 81f98eb3d5..bfc4578051 100644 --- a/typedapi/types/keeptypestokenfilter.go +++ b/typedapi/types/keeptypestokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // KeepTypesTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L219-L223 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L219-L223 type KeepTypesTokenFilter struct { Mode *keeptypesmode.KeepTypesMode `json:"mode,omitempty"` Type string `json:"type,omitempty"` @@ -101,3 +101,13 @@ func NewKeepTypesTokenFilter() *KeepTypesTokenFilter { return r } + +// true + +type KeepTypesTokenFilterVariant interface { + KeepTypesTokenFilterCaster() *KeepTypesTokenFilter +} + +func (s *KeepTypesTokenFilter) KeepTypesTokenFilterCaster() *KeepTypesTokenFilter { + return s +} diff --git a/typedapi/types/keepwordstokenfilter.go b/typedapi/types/keepwordstokenfilter.go index 584534def6..43aaa994c2 100644 --- a/typedapi/types/keepwordstokenfilter.go +++ b/typedapi/types/keepwordstokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // KeepWordsTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L225-L230 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L225-L230 type KeepWordsTokenFilter struct { KeepWords []string `json:"keep_words,omitempty"` KeepWordsCase *bool `json:"keep_words_case,omitempty"` @@ -123,3 +123,13 @@ func NewKeepWordsTokenFilter() *KeepWordsTokenFilter { return r } + +// true + +type KeepWordsTokenFilterVariant interface { + KeepWordsTokenFilterCaster() *KeepWordsTokenFilter +} + +func (s *KeepWordsTokenFilter) KeepWordsTokenFilterCaster() *KeepWordsTokenFilter { + return s +} diff --git a/typedapi/types/keyedpercentiles.go b/typedapi/types/keyedpercentiles.go index 2eb2b717bd..1856180915 100644 --- a/typedapi/types/keyedpercentiles.go +++ b/typedapi/types/keyedpercentiles.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // KeyedPercentiles type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L160-L160 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L160-L160 type KeyedPercentiles map[string]string func (s KeyedPercentiles) UnmarshalJSON(data []byte) error { diff --git a/typedapi/types/keyedprocessor.go b/typedapi/types/keyedprocessor.go index 068d2b7044..bd3fb65891 100644 --- a/typedapi/types/keyedprocessor.go +++ b/typedapi/types/keyedprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // KeyedProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L415-L418 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L415-L418 type KeyedProcessor struct { Stats *Processor `json:"stats,omitempty"` Type *string `json:"type,omitempty"` @@ -80,3 +80,5 @@ func NewKeyedProcessor() *KeyedProcessor { return r } + +// false diff --git a/typedapi/types/keyvalueprocessor.go b/typedapi/types/keyvalueprocessor.go index a80716c62c..dfbaa1faf0 100644 --- a/typedapi/types/keyvalueprocessor.go +++ b/typedapi/types/keyvalueprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // KeyValueProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1118-L1170 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1159-L1211 type KeyValueProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -44,7 +44,7 @@ type KeyValueProcessor struct { // FieldSplit Regex pattern to use for splitting key-value pairs. FieldSplit string `json:"field_split"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly @@ -126,16 +126,9 @@ func (s *KeyValueProcessor) UnmarshalJSON(data []byte) error { s.FieldSplit = o case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -265,3 +258,13 @@ func NewKeyValueProcessor() *KeyValueProcessor { return r } + +// true + +type KeyValueProcessorVariant interface { + KeyValueProcessorCaster() *KeyValueProcessor +} + +func (s *KeyValueProcessor) KeyValueProcessorCaster() *KeyValueProcessor { + return s +} diff --git a/typedapi/types/keywordanalyzer.go b/typedapi/types/keywordanalyzer.go index 83ef2b9bde..c5958ff0a2 100644 --- a/typedapi/types/keywordanalyzer.go +++ b/typedapi/types/keywordanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // KeywordAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L47-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L66-L70 type KeywordAnalyzer struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewKeywordAnalyzer() *KeywordAnalyzer { return r } + +// true + +type KeywordAnalyzerVariant interface { + KeywordAnalyzerCaster() *KeywordAnalyzer +} + +func (s *KeywordAnalyzer) KeywordAnalyzerCaster() *KeywordAnalyzer { + return s +} diff --git a/typedapi/types/keywordmarkertokenfilter.go b/typedapi/types/keywordmarkertokenfilter.go index f0879ab1b5..0c64f54f65 100644 --- a/typedapi/types/keywordmarkertokenfilter.go +++ b/typedapi/types/keywordmarkertokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // KeywordMarkerTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L232-L238 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L232-L238 type KeywordMarkerTokenFilter struct { IgnoreCase *bool `json:"ignore_case,omitempty"` Keywords []string `json:"keywords,omitempty"` @@ -148,3 +148,13 @@ func NewKeywordMarkerTokenFilter() *KeywordMarkerTokenFilter { return r } + +// true + +type KeywordMarkerTokenFilterVariant interface { + KeywordMarkerTokenFilterCaster() *KeywordMarkerTokenFilter +} + +func (s *KeywordMarkerTokenFilter) KeywordMarkerTokenFilterCaster() *KeywordMarkerTokenFilter { + return s +} diff --git a/typedapi/types/keywordproperty.go b/typedapi/types/keywordproperty.go index c7e833acf5..b610facb77 100644 --- a/typedapi/types/keywordproperty.go +++ b/typedapi/types/keywordproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,11 +31,12 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // KeywordProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L94-L113 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L98-L117 type KeywordProperty struct { Boost *Float64 `json:"boost,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -47,16 +48,17 @@ type KeywordProperty struct { Index *bool `json:"index,omitempty"` IndexOptions *indexoptions.IndexOptions `json:"index_options,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Normalizer *string `json:"normalizer,omitempty"` - Norms *bool `json:"norms,omitempty"` - NullValue *string `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Similarity *string `json:"similarity,omitempty"` - SplitQueriesOnWhitespace *bool `json:"split_queries_on_whitespace,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Normalizer *string `json:"normalizer,omitempty"` + Norms *bool `json:"norms,omitempty"` + NullValue *string `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Similarity *string `json:"similarity,omitempty"` + SplitQueriesOnWhitespace *bool `json:"split_queries_on_whitespace,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -162,301 +164,313 @@ func (s *KeywordProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -567,301 +581,313 @@ func (s *KeywordProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -912,6 +938,11 @@ func (s *KeywordProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -959,6 +990,7 @@ func (s KeywordProperty) MarshalJSON() ([]byte, error) { Similarity: s.Similarity, SplitQueriesOnWhitespace: s.SplitQueriesOnWhitespace, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, Type: s.Type, } @@ -971,10 +1003,20 @@ func (s KeywordProperty) MarshalJSON() ([]byte, error) { // NewKeywordProperty returns a KeywordProperty. func NewKeywordProperty() *KeywordProperty { r := &KeywordProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type KeywordPropertyVariant interface { + KeywordPropertyCaster() *KeywordProperty +} + +func (s *KeywordProperty) KeywordPropertyCaster() *KeywordProperty { + return s +} diff --git a/typedapi/types/keywordtokenizer.go b/typedapi/types/keywordtokenizer.go index e8ff0e979f..0420dfd735 100644 --- a/typedapi/types/keywordtokenizer.go +++ b/typedapi/types/keywordtokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // KeywordTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L68-L74 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L68-L74 type KeywordTokenizer struct { BufferSize *int `json:"buffer_size,omitempty"` Type string `json:"type,omitempty"` @@ -104,3 +104,13 @@ func NewKeywordTokenizer() *KeywordTokenizer { return r } + +// true + +type KeywordTokenizerVariant interface { + KeywordTokenizerCaster() *KeywordTokenizer +} + +func (s *KeywordTokenizer) KeywordTokenizerCaster() *KeywordTokenizer { + return s +} diff --git a/typedapi/types/kibanatoken.go b/typedapi/types/kibanatoken.go index 56c9e58182..45ed44010a 100644 --- a/typedapi/types/kibanatoken.go +++ b/typedapi/types/kibanatoken.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,9 +31,12 @@ import ( // KibanaToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/enroll_kibana/Response.ts#L27-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/enroll_kibana/Response.ts#L31-L41 type KibanaToken struct { - Name string `json:"name"` + // Name The name of the bearer token for the `elastic/kibana` service account. + Name string `json:"name"` + // Value The value of the bearer token for the `elastic/kibana` service account. + // Use this value to authenticate the service account with Elasticsearch. Value string `json:"value"` } @@ -87,3 +90,5 @@ func NewKibanaToken() *KibanaToken { return r } + +// false diff --git a/typedapi/types/knncollectorresult.go b/typedapi/types/knncollectorresult.go index 097936aee6..a3a93d95b2 100644 --- a/typedapi/types/knncollectorresult.go +++ b/typedapi/types/knncollectorresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // KnnCollectorResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L222-L228 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L222-L228 type KnnCollectorResult struct { Children []KnnCollectorResult `json:"children,omitempty"` Name string `json:"name"` @@ -105,3 +105,5 @@ func NewKnnCollectorResult() *KnnCollectorResult { return r } + +// false diff --git a/typedapi/types/knnquery.go b/typedapi/types/knnquery.go index 3de30727de..de35527e1e 100644 --- a/typedapi/types/knnquery.go +++ b/typedapi/types/knnquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // KnnQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Knn.ts#L54-L72 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Knn.ts#L64-L87 type KnnQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -53,6 +53,8 @@ type KnnQuery struct { // QueryVectorBuilder The query vector builder. You must provide a query_vector_builder or // query_vector, but not both. QueryVectorBuilder *QueryVectorBuilder `json:"query_vector_builder,omitempty"` + // RescoreVector Apply oversampling and rescoring to quantized vectors * + RescoreVector *RescoreVector `json:"rescore_vector,omitempty"` // Similarity The minimum similarity for a vector to be considered a match Similarity *float32 `json:"similarity,omitempty"` } @@ -163,6 +165,11 @@ func (s *KnnQuery) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "QueryVectorBuilder", err) } + case "rescore_vector": + if err := dec.Decode(&s.RescoreVector); err != nil { + return fmt.Errorf("%s | %w", "RescoreVector", err) + } + case "similarity": var tmp any dec.Decode(&tmp) @@ -190,3 +197,13 @@ func NewKnnQuery() *KnnQuery { return r } + +// true + +type KnnQueryVariant interface { + KnnQueryCaster() *KnnQuery +} + +func (s *KnnQuery) KnnQueryCaster() *KnnQuery { + return s +} diff --git a/typedapi/types/knnqueryprofilebreakdown.go b/typedapi/types/knnqueryprofilebreakdown.go index 58877d3cc0..e7b9961739 100644 --- a/typedapi/types/knnqueryprofilebreakdown.go +++ b/typedapi/types/knnqueryprofilebreakdown.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // KnnQueryProfileBreakdown type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L199-L220 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L199-L220 type KnnQueryProfileBreakdown struct { Advance int64 `json:"advance"` AdvanceCount int64 `json:"advance_count"` @@ -381,3 +381,5 @@ func NewKnnQueryProfileBreakdown() *KnnQueryProfileBreakdown { return r } + +// false diff --git a/typedapi/types/knnqueryprofileresult.go b/typedapi/types/knnqueryprofileresult.go index 1736c2a21f..d4294b0f02 100644 --- a/typedapi/types/knnqueryprofileresult.go +++ b/typedapi/types/knnqueryprofileresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // KnnQueryProfileResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L188-L196 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L188-L196 type KnnQueryProfileResult struct { Breakdown KnnQueryProfileBreakdown `json:"breakdown"` Children []KnnQueryProfileResult `json:"children,omitempty"` @@ -117,8 +117,10 @@ func (s *KnnQueryProfileResult) UnmarshalJSON(data []byte) error { // NewKnnQueryProfileResult returns a KnnQueryProfileResult. func NewKnnQueryProfileResult() *KnnQueryProfileResult { r := &KnnQueryProfileResult{ - Debug: make(map[string]json.RawMessage, 0), + Debug: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/knnretriever.go b/typedapi/types/knnretriever.go index daa6fba0c7..151b7272f8 100644 --- a/typedapi/types/knnretriever.go +++ b/typedapi/types/knnretriever.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // KnnRetriever type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Retriever.ts#L64-L77 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Retriever.ts#L64-L82 type KnnRetriever struct { // Field The name of the vector field to search against. Field string `json:"field"` @@ -50,6 +50,8 @@ type KnnRetriever struct { QueryVector []float32 `json:"query_vector,omitempty"` // QueryVectorBuilder Defines a model to build a query vector. QueryVectorBuilder *QueryVectorBuilder `json:"query_vector_builder,omitempty"` + // RescoreVector Apply oversampling and rescoring to quantized vectors * + RescoreVector *RescoreVector `json:"rescore_vector,omitempty"` // Similarity The minimum similarity required for a document to be considered a match. Similarity *float32 `json:"similarity,omitempty"` } @@ -155,6 +157,11 @@ func (s *KnnRetriever) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "QueryVectorBuilder", err) } + case "rescore_vector": + if err := dec.Decode(&s.RescoreVector); err != nil { + return fmt.Errorf("%s | %w", "RescoreVector", err) + } + case "similarity": var tmp any dec.Decode(&tmp) @@ -182,3 +189,13 @@ func NewKnnRetriever() *KnnRetriever { return r } + +// true + +type KnnRetrieverVariant interface { + KnnRetrieverCaster() *KnnRetriever +} + +func (s *KnnRetriever) KnnRetrieverCaster() *KnnRetriever { + return s +} diff --git a/typedapi/types/knnsearch.go b/typedapi/types/knnsearch.go index 6c44feaad4..cbab293e8b 100644 --- a/typedapi/types/knnsearch.go +++ b/typedapi/types/knnsearch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // KnnSearch type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Knn.ts#L30-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Knn.ts#L35-L62 type KnnSearch struct { // Boost Boost value to apply to kNN scores Boost *float32 `json:"boost,omitempty"` @@ -50,6 +50,8 @@ type KnnSearch struct { // QueryVectorBuilder The query vector builder. You must provide a query_vector_builder or // query_vector, but not both. QueryVectorBuilder *QueryVectorBuilder `json:"query_vector_builder,omitempty"` + // RescoreVector Apply oversampling and rescoring to quantized vectors * + RescoreVector *RescoreVector `json:"rescore_vector,omitempty"` // Similarity The minimum similarity for a vector to be considered a match Similarity *float32 `json:"similarity,omitempty"` } @@ -153,6 +155,11 @@ func (s *KnnSearch) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "QueryVectorBuilder", err) } + case "rescore_vector": + if err := dec.Decode(&s.RescoreVector); err != nil { + return fmt.Errorf("%s | %w", "RescoreVector", err) + } + case "similarity": var tmp any dec.Decode(&tmp) @@ -180,3 +187,13 @@ func NewKnnSearch() *KnnSearch { return r } + +// true + +type KnnSearchVariant interface { + KnnSearchCaster() *KnnSearch +} + +func (s *KnnSearch) KnnSearchCaster() *KnnSearch { + return s +} diff --git a/typedapi/types/kstemtokenfilter.go b/typedapi/types/kstemtokenfilter.go index 33574994aa..eec6f8f80d 100644 --- a/typedapi/types/kstemtokenfilter.go +++ b/typedapi/types/kstemtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // KStemTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L240-L242 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L240-L242 type KStemTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewKStemTokenFilter() *KStemTokenFilter { return r } + +// true + +type KStemTokenFilterVariant interface { + KStemTokenFilterCaster() *KStemTokenFilter +} + +func (s *KStemTokenFilter) KStemTokenFilterCaster() *KStemTokenFilter { + return s +} diff --git a/typedapi/types/kuromojianalyzer.go b/typedapi/types/kuromojianalyzer.go index 8e6317b907..36530161ba 100644 --- a/typedapi/types/kuromojianalyzer.go +++ b/typedapi/types/kuromojianalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // KuromojiAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/kuromoji-plugin.ts#L25-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/kuromoji-plugin.ts#L25-L29 type KuromojiAnalyzer struct { Mode kuromojitokenizationmode.KuromojiTokenizationMode `json:"mode"` Type string `json:"type,omitempty"` @@ -102,3 +102,13 @@ func NewKuromojiAnalyzer() *KuromojiAnalyzer { return r } + +// true + +type KuromojiAnalyzerVariant interface { + KuromojiAnalyzerCaster() *KuromojiAnalyzer +} + +func (s *KuromojiAnalyzer) KuromojiAnalyzerCaster() *KuromojiAnalyzer { + return s +} diff --git a/typedapi/types/kuromojiiterationmarkcharfilter.go b/typedapi/types/kuromojiiterationmarkcharfilter.go index cc8ff92492..438addb485 100644 --- a/typedapi/types/kuromojiiterationmarkcharfilter.go +++ b/typedapi/types/kuromojiiterationmarkcharfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // KuromojiIterationMarkCharFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/kuromoji-plugin.ts#L31-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/kuromoji-plugin.ts#L31-L35 type KuromojiIterationMarkCharFilter struct { NormalizeKana bool `json:"normalize_kana"` NormalizeKanji bool `json:"normalize_kanji"` @@ -118,3 +118,13 @@ func NewKuromojiIterationMarkCharFilter() *KuromojiIterationMarkCharFilter { return r } + +// true + +type KuromojiIterationMarkCharFilterVariant interface { + KuromojiIterationMarkCharFilterCaster() *KuromojiIterationMarkCharFilter +} + +func (s *KuromojiIterationMarkCharFilter) KuromojiIterationMarkCharFilterCaster() *KuromojiIterationMarkCharFilter { + return s +} diff --git a/typedapi/types/kuromojipartofspeechtokenfilter.go b/typedapi/types/kuromojipartofspeechtokenfilter.go index f6c5914541..a5bfc9c50f 100644 --- a/typedapi/types/kuromojipartofspeechtokenfilter.go +++ b/typedapi/types/kuromojipartofspeechtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // KuromojiPartOfSpeechTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/kuromoji-plugin.ts#L37-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/kuromoji-plugin.ts#L37-L40 type KuromojiPartOfSpeechTokenFilter struct { Stoptags []string `json:"stoptags"` Type string `json:"type,omitempty"` @@ -92,3 +92,13 @@ func NewKuromojiPartOfSpeechTokenFilter() *KuromojiPartOfSpeechTokenFilter { return r } + +// true + +type KuromojiPartOfSpeechTokenFilterVariant interface { + KuromojiPartOfSpeechTokenFilterCaster() *KuromojiPartOfSpeechTokenFilter +} + +func (s *KuromojiPartOfSpeechTokenFilter) KuromojiPartOfSpeechTokenFilterCaster() *KuromojiPartOfSpeechTokenFilter { + return s +} diff --git a/typedapi/types/kuromojireadingformtokenfilter.go b/typedapi/types/kuromojireadingformtokenfilter.go index f8b514e0f8..4855a4dc62 100644 --- a/typedapi/types/kuromojireadingformtokenfilter.go +++ b/typedapi/types/kuromojireadingformtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // KuromojiReadingFormTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/kuromoji-plugin.ts#L42-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/kuromoji-plugin.ts#L42-L45 type KuromojiReadingFormTokenFilter struct { Type string `json:"type,omitempty"` UseRomaji bool `json:"use_romaji"` @@ -102,3 +102,13 @@ func NewKuromojiReadingFormTokenFilter() *KuromojiReadingFormTokenFilter { return r } + +// true + +type KuromojiReadingFormTokenFilterVariant interface { + KuromojiReadingFormTokenFilterCaster() *KuromojiReadingFormTokenFilter +} + +func (s *KuromojiReadingFormTokenFilter) KuromojiReadingFormTokenFilterCaster() *KuromojiReadingFormTokenFilter { + return s +} diff --git a/typedapi/types/kuromojistemmertokenfilter.go b/typedapi/types/kuromojistemmertokenfilter.go index 4fc5ac99ee..5207c29d88 100644 --- a/typedapi/types/kuromojistemmertokenfilter.go +++ b/typedapi/types/kuromojistemmertokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // KuromojiStemmerTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/kuromoji-plugin.ts#L47-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/kuromoji-plugin.ts#L47-L50 type KuromojiStemmerTokenFilter struct { MinimumLength int `json:"minimum_length"` Type string `json:"type,omitempty"` @@ -104,3 +104,13 @@ func NewKuromojiStemmerTokenFilter() *KuromojiStemmerTokenFilter { return r } + +// true + +type KuromojiStemmerTokenFilterVariant interface { + KuromojiStemmerTokenFilterCaster() *KuromojiStemmerTokenFilter +} + +func (s *KuromojiStemmerTokenFilter) KuromojiStemmerTokenFilterCaster() *KuromojiStemmerTokenFilter { + return s +} diff --git a/typedapi/types/kuromojitokenizer.go b/typedapi/types/kuromojitokenizer.go index 746133ae67..848ccd5cbb 100644 --- a/typedapi/types/kuromojitokenizer.go +++ b/typedapi/types/kuromojitokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // KuromojiTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/kuromoji-plugin.ts#L58-L67 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/kuromoji-plugin.ts#L58-L67 type KuromojiTokenizer struct { DiscardCompoundToken *bool `json:"discard_compound_token,omitempty"` DiscardPunctuation *bool `json:"discard_punctuation,omitempty"` @@ -180,3 +180,13 @@ func NewKuromojiTokenizer() *KuromojiTokenizer { return r } + +// true + +type KuromojiTokenizerVariant interface { + KuromojiTokenizerCaster() *KuromojiTokenizer +} + +func (s *KuromojiTokenizer) KuromojiTokenizerCaster() *KuromojiTokenizer { + return s +} diff --git a/typedapi/types/languageanalyzer.go b/typedapi/types/languageanalyzer.go deleted file mode 100644 index ee156239e5..0000000000 --- a/typedapi/types/languageanalyzer.go +++ /dev/null @@ -1,136 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/language" -) - -// LanguageAnalyzer type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L52-L59 -type LanguageAnalyzer struct { - Language language.Language `json:"language"` - StemExclusion []string `json:"stem_exclusion"` - Stopwords []string `json:"stopwords,omitempty"` - StopwordsPath *string `json:"stopwords_path,omitempty"` - Type string `json:"type,omitempty"` - Version *string `json:"version,omitempty"` -} - -func (s *LanguageAnalyzer) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "language": - if err := dec.Decode(&s.Language); err != nil { - return fmt.Errorf("%s | %w", "Language", err) - } - - case "stem_exclusion": - if err := dec.Decode(&s.StemExclusion); err != nil { - return fmt.Errorf("%s | %w", "StemExclusion", err) - } - - case "stopwords": - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - if !bytes.HasPrefix(rawMsg, []byte("[")) { - o := new(string) - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Stopwords", err) - } - - s.Stopwords = append(s.Stopwords, *o) - } else { - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { - return fmt.Errorf("%s | %w", "Stopwords", err) - } - } - - case "stopwords_path": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "StopwordsPath", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.StopwordsPath = &o - - case "type": - if err := dec.Decode(&s.Type); err != nil { - return fmt.Errorf("%s | %w", "Type", err) - } - - case "version": - if err := dec.Decode(&s.Version); err != nil { - return fmt.Errorf("%s | %w", "Version", err) - } - - } - } - return nil -} - -// MarshalJSON override marshalling to include literal value -func (s LanguageAnalyzer) MarshalJSON() ([]byte, error) { - type innerLanguageAnalyzer LanguageAnalyzer - tmp := innerLanguageAnalyzer{ - Language: s.Language, - StemExclusion: s.StemExclusion, - Stopwords: s.Stopwords, - StopwordsPath: s.StopwordsPath, - Type: s.Type, - Version: s.Version, - } - - tmp.Type = "language" - - return json.Marshal(tmp) -} - -// NewLanguageAnalyzer returns a LanguageAnalyzer. -func NewLanguageAnalyzer() *LanguageAnalyzer { - r := &LanguageAnalyzer{} - - return r -} diff --git a/typedapi/types/languagecontext.go b/typedapi/types/languagecontext.go index 2df25c3639..a75aa13af8 100644 --- a/typedapi/types/languagecontext.go +++ b/typedapi/types/languagecontext.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // LanguageContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/get_script_languages/types.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/get_script_languages/types.ts#L22-L25 type LanguageContext struct { Contexts []string `json:"contexts"` Language scriptlanguage.ScriptLanguage `json:"language"` @@ -38,3 +38,5 @@ func NewLanguageContext() *LanguageContext { return r } + +// false diff --git a/typedapi/types/laplacesmoothingmodel.go b/typedapi/types/laplacesmoothingmodel.go index 258d97ea21..5bc23adfb8 100644 --- a/typedapi/types/laplacesmoothingmodel.go +++ b/typedapi/types/laplacesmoothingmodel.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // LaplaceSmoothingModel type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L430-L435 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L430-L435 type LaplaceSmoothingModel struct { // Alpha A constant that is added to all counts to balance weights. Alpha Float64 `json:"alpha"` @@ -79,3 +79,13 @@ func NewLaplaceSmoothingModel() *LaplaceSmoothingModel { return r } + +// true + +type LaplaceSmoothingModelVariant interface { + LaplaceSmoothingModelCaster() *LaplaceSmoothingModel +} + +func (s *LaplaceSmoothingModel) LaplaceSmoothingModelCaster() *LaplaceSmoothingModel { + return s +} diff --git a/typedapi/types/latest.go b/typedapi/types/latest.go index 8e7f70933d..9396cfe374 100644 --- a/typedapi/types/latest.go +++ b/typedapi/types/latest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // Latest type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/_types/Transform.ts#L47-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/_types/Transform.ts#L47-L52 type Latest struct { // Sort Specifies the date field that is used to identify the latest documents. Sort string `json:"sort"` @@ -74,3 +74,13 @@ func NewLatest() *Latest { return r } + +// true + +type LatestVariant interface { + LatestCaster() *Latest +} + +func (s *Latest) LatestCaster() *Latest { + return s +} diff --git a/typedapi/types/latlongeolocation.go b/typedapi/types/latlongeolocation.go index 8cd3ddbec9..e54930248a 100644 --- a/typedapi/types/latlongeolocation.go +++ b/typedapi/types/latlongeolocation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // LatLonGeoLocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Geo.ts#L120-L129 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Geo.ts#L120-L129 type LatLonGeoLocation struct { // Lat Latitude Lat Float64 `json:"lat"` @@ -97,3 +97,13 @@ func NewLatLonGeoLocation() *LatLonGeoLocation { return r } + +// true + +type LatLonGeoLocationVariant interface { + LatLonGeoLocationCaster() *LatLonGeoLocation +} + +func (s *LatLonGeoLocation) LatLonGeoLocationCaster() *LatLonGeoLocation { + return s +} diff --git a/typedapi/types/latviananalyzer.go b/typedapi/types/latviananalyzer.go index cdcd3072a7..6008878d9a 100644 --- a/typedapi/types/latviananalyzer.go +++ b/typedapi/types/latviananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // LatvianAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L223-L228 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L234-L239 type LatvianAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewLatvianAnalyzer() *LatvianAnalyzer { return r } + +// true + +type LatvianAnalyzerVariant interface { + LatvianAnalyzerCaster() *LatvianAnalyzer +} + +func (s *LatvianAnalyzer) LatvianAnalyzerCaster() *LatvianAnalyzer { + return s +} diff --git a/typedapi/types/learningtorank.go b/typedapi/types/learningtorank.go index 8049b38ed1..e43abc9844 100644 --- a/typedapi/types/learningtorank.go +++ b/typedapi/types/learningtorank.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // LearningToRank type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/rescoring.ts#L88-L97 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/rescoring.ts#L88-L97 type LearningToRank struct { // ModelId The unique identifier of the trained model uploaded to Elasticsearch ModelId string `json:"model_id"` @@ -82,8 +82,18 @@ func (s *LearningToRank) UnmarshalJSON(data []byte) error { // NewLearningToRank returns a LearningToRank. func NewLearningToRank() *LearningToRank { r := &LearningToRank{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type LearningToRankVariant interface { + LearningToRankCaster() *LearningToRank +} + +func (s *LearningToRank) LearningToRankCaster() *LearningToRank { + return s +} diff --git a/typedapi/types/lengthtokenfilter.go b/typedapi/types/lengthtokenfilter.go index bd39764154..826419655c 100644 --- a/typedapi/types/lengthtokenfilter.go +++ b/typedapi/types/lengthtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // LengthTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L244-L248 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L244-L248 type LengthTokenFilter struct { Max *int `json:"max,omitempty"` Min *int `json:"min,omitempty"` @@ -122,3 +122,13 @@ func NewLengthTokenFilter() *LengthTokenFilter { return r } + +// true + +type LengthTokenFilterVariant interface { + LengthTokenFilterCaster() *LengthTokenFilter +} + +func (s *LengthTokenFilter) LengthTokenFilterCaster() *LengthTokenFilter { + return s +} diff --git a/typedapi/types/lessthanvalidation.go b/typedapi/types/lessthanvalidation.go index c090733a1f..b726259a61 100644 --- a/typedapi/types/lessthanvalidation.go +++ b/typedapi/types/lessthanvalidation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // LessThanValidation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L58-L61 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L58-L61 type LessThanValidation struct { Constraint Float64 `json:"constraint"` Type string `json:"type,omitempty"` @@ -97,3 +97,13 @@ func NewLessThanValidation() *LessThanValidation { return r } + +// true + +type LessThanValidationVariant interface { + LessThanValidationCaster() *LessThanValidation +} + +func (s *LessThanValidation) LessThanValidationCaster() *LessThanValidation { + return s +} diff --git a/typedapi/types/lettertokenizer.go b/typedapi/types/lettertokenizer.go index 67e97a8123..403d986af5 100644 --- a/typedapi/types/lettertokenizer.go +++ b/typedapi/types/lettertokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // LetterTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L76-L78 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L76-L78 type LetterTokenizer struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewLetterTokenizer() *LetterTokenizer { return r } + +// true + +type LetterTokenizerVariant interface { + LetterTokenizerCaster() *LetterTokenizer +} + +func (s *LetterTokenizer) LetterTokenizerCaster() *LetterTokenizer { + return s +} diff --git a/typedapi/types/license.go b/typedapi/types/license.go index 2b06831429..07c5ae3011 100644 --- a/typedapi/types/license.go +++ b/typedapi/types/license.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // License type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/license/_types/License.ts#L42-L53 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/license/_types/License.ts#L42-L53 type License struct { ExpiryDateInMillis int64 `json:"expiry_date_in_millis"` IssueDateInMillis int64 `json:"issue_date_in_millis"` @@ -161,3 +161,13 @@ func NewLicense() *License { return r } + +// true + +type LicenseVariant interface { + LicenseCaster() *License +} + +func (s *License) LicenseCaster() *License { + return s +} diff --git a/typedapi/types/licenseinformation.go b/typedapi/types/licenseinformation.go index 665c92d4bf..a62d98bffa 100644 --- a/typedapi/types/licenseinformation.go +++ b/typedapi/types/licenseinformation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // LicenseInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/license/get/types.ts#L25-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/license/get/types.ts#L25-L38 type LicenseInformation struct { ExpiryDate DateTime `json:"expiry_date,omitempty"` ExpiryDateInMillis *int64 `json:"expiry_date_in_millis,omitempty"` @@ -150,3 +150,5 @@ func NewLicenseInformation() *LicenseInformation { return r } + +// false diff --git a/typedapi/types/lifecycle.go b/typedapi/types/lifecycle.go index 161dc87d45..6754255104 100644 --- a/typedapi/types/lifecycle.go +++ b/typedapi/types/lifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // Lifecycle type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/get_lifecycle/types.ts#L24-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/get_lifecycle/types.ts#L24-L28 type Lifecycle struct { ModifiedDate DateTime `json:"modified_date"` Policy IlmPolicy `json:"policy"` @@ -78,3 +78,5 @@ func NewLifecycle() *Lifecycle { return r } + +// false diff --git a/typedapi/types/lifecycleexplain.go b/typedapi/types/lifecycleexplain.go index 9b88093b47..03987f07b4 100644 --- a/typedapi/types/lifecycleexplain.go +++ b/typedapi/types/lifecycleexplain.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // LifecycleExplainManaged // LifecycleExplainUnmanaged // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/explain_lifecycle/types.ts#L59-L62 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/explain_lifecycle/types.ts#L64-L67 type LifecycleExplain any diff --git a/typedapi/types/lifecycleexplainmanaged.go b/typedapi/types/lifecycleexplainmanaged.go index e995aa9943..b0754ef8ae 100644 --- a/typedapi/types/lifecycleexplainmanaged.go +++ b/typedapi/types/lifecycleexplainmanaged.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // LifecycleExplainManaged type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/explain_lifecycle/types.ts#L26-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/explain_lifecycle/types.ts#L27-L57 type LifecycleExplainManaged struct { Action *string `json:"action,omitempty"` ActionTime DateTime `json:"action_time,omitempty"` @@ -39,7 +39,7 @@ type LifecycleExplainManaged struct { Age Duration `json:"age,omitempty"` FailedStep *string `json:"failed_step,omitempty"` FailedStepRetryCount *int `json:"failed_step_retry_count,omitempty"` - Index *string `json:"index,omitempty"` + Index string `json:"index"` IndexCreationDate DateTime `json:"index_creation_date,omitempty"` IndexCreationDateMillis *int64 `json:"index_creation_date_millis,omitempty"` IsAutoRetryableError *bool `json:"is_auto_retryable_error,omitempty"` @@ -50,7 +50,11 @@ type LifecycleExplainManaged struct { PhaseExecution *LifecycleExplainPhaseExecution `json:"phase_execution,omitempty"` PhaseTime DateTime `json:"phase_time,omitempty"` PhaseTimeMillis *int64 `json:"phase_time_millis,omitempty"` - Policy string `json:"policy"` + Policy *string `json:"policy,omitempty"` + PreviousStepInfo map[string]json.RawMessage `json:"previous_step_info,omitempty"` + RepositoryName *string `json:"repository_name,omitempty"` + ShrinkIndexName *string `json:"shrink_index_name,omitempty"` + SnapshotName *string `json:"snapshot_name,omitempty"` Step *string `json:"step,omitempty"` StepInfo map[string]json.RawMessage `json:"step_info,omitempty"` StepTime DateTime `json:"step_time,omitempty"` @@ -183,6 +187,50 @@ func (s *LifecycleExplainManaged) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Policy", err) } + case "previous_step_info": + if s.PreviousStepInfo == nil { + s.PreviousStepInfo = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.PreviousStepInfo); err != nil { + return fmt.Errorf("%s | %w", "PreviousStepInfo", err) + } + + case "repository_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RepositoryName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RepositoryName = &o + + case "shrink_index_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ShrinkIndexName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ShrinkIndexName = &o + + case "snapshot_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SnapshotName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SnapshotName = &o + case "step": if err := dec.Decode(&s.Step); err != nil { return fmt.Errorf("%s | %w", "Step", err) @@ -238,6 +286,10 @@ func (s LifecycleExplainManaged) MarshalJSON() ([]byte, error) { PhaseTime: s.PhaseTime, PhaseTimeMillis: s.PhaseTimeMillis, Policy: s.Policy, + PreviousStepInfo: s.PreviousStepInfo, + RepositoryName: s.RepositoryName, + ShrinkIndexName: s.ShrinkIndexName, + SnapshotName: s.SnapshotName, Step: s.Step, StepInfo: s.StepInfo, StepTime: s.StepTime, @@ -253,8 +305,11 @@ func (s LifecycleExplainManaged) MarshalJSON() ([]byte, error) { // NewLifecycleExplainManaged returns a LifecycleExplainManaged. func NewLifecycleExplainManaged() *LifecycleExplainManaged { r := &LifecycleExplainManaged{ - StepInfo: make(map[string]json.RawMessage, 0), + PreviousStepInfo: make(map[string]json.RawMessage), + StepInfo: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/lifecycleexplainphaseexecution.go b/typedapi/types/lifecycleexplainphaseexecution.go index e2ce329c39..052a5292fc 100644 --- a/typedapi/types/lifecycleexplainphaseexecution.go +++ b/typedapi/types/lifecycleexplainphaseexecution.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,9 +30,10 @@ import ( // LifecycleExplainPhaseExecution type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/explain_lifecycle/types.ts#L64-L68 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/explain_lifecycle/types.ts#L69-L74 type LifecycleExplainPhaseExecution struct { ModifiedDateInMillis int64 `json:"modified_date_in_millis"` + PhaseDefinition *Phase `json:"phase_definition,omitempty"` Policy string `json:"policy"` Version int64 `json:"version"` } @@ -57,6 +58,11 @@ func (s *LifecycleExplainPhaseExecution) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "ModifiedDateInMillis", err) } + case "phase_definition": + if err := dec.Decode(&s.PhaseDefinition); err != nil { + return fmt.Errorf("%s | %w", "PhaseDefinition", err) + } + case "policy": if err := dec.Decode(&s.Policy); err != nil { return fmt.Errorf("%s | %w", "Policy", err) @@ -78,3 +84,5 @@ func NewLifecycleExplainPhaseExecution() *LifecycleExplainPhaseExecution { return r } + +// false diff --git a/typedapi/types/lifecycleexplainunmanaged.go b/typedapi/types/lifecycleexplainunmanaged.go index 38f8f91008..24e252adbb 100644 --- a/typedapi/types/lifecycleexplainunmanaged.go +++ b/typedapi/types/lifecycleexplainunmanaged.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // LifecycleExplainUnmanaged type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/explain_lifecycle/types.ts#L54-L57 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/explain_lifecycle/types.ts#L59-L62 type LifecycleExplainUnmanaged struct { Index string `json:"index"` Managed bool `json:"managed,omitempty"` @@ -85,3 +85,5 @@ func NewLifecycleExplainUnmanaged() *LifecycleExplainUnmanaged { return r } + +// false diff --git a/typedapi/types/like.go b/typedapi/types/like.go index b1a97b1e9f..96a14d4eee 100644 --- a/typedapi/types/like.go +++ b/typedapi/types/like.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // string // LikeDocument // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L198-L203 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L198-L203 type Like any + +type LikeVariant interface { + LikeCaster() *Like +} diff --git a/typedapi/types/likedocument.go b/typedapi/types/likedocument.go index b4c2c4d852..9045d31a6e 100644 --- a/typedapi/types/likedocument.go +++ b/typedapi/types/likedocument.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // LikeDocument type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L174-L196 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L174-L196 type LikeDocument struct { // Doc A document not present in the index. Doc json.RawMessage `json:"doc,omitempty"` @@ -114,8 +114,18 @@ func (s *LikeDocument) UnmarshalJSON(data []byte) error { // NewLikeDocument returns a LikeDocument. func NewLikeDocument() *LikeDocument { r := &LikeDocument{ - PerFieldAnalyzer: make(map[string]string, 0), + PerFieldAnalyzer: make(map[string]string), } return r } + +// true + +type LikeDocumentVariant interface { + LikeDocumentCaster() *LikeDocument +} + +func (s *LikeDocument) LikeDocumentCaster() *LikeDocument { + return s +} diff --git a/typedapi/types/limits.go b/typedapi/types/limits.go index 87788fb3cd..896be6b685 100644 --- a/typedapi/types/limits.go +++ b/typedapi/types/limits.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,11 +31,13 @@ import ( // Limits type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/info/types.ts#L34-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/info/types.ts#L34-L40 type Limits struct { - EffectiveMaxModelMemoryLimit string `json:"effective_max_model_memory_limit"` - MaxModelMemoryLimit *string `json:"max_model_memory_limit,omitempty"` - TotalMlMemory string `json:"total_ml_memory"` + EffectiveMaxModelMemoryLimit ByteSize `json:"effective_max_model_memory_limit,omitempty"` + MaxModelMemoryLimit ByteSize `json:"max_model_memory_limit,omitempty"` + MaxSingleMlNodeProcessors *int `json:"max_single_ml_node_processors,omitempty"` + TotalMlMemory ByteSize `json:"total_ml_memory"` + TotalMlProcessors *int `json:"total_ml_processors,omitempty"` } func (s *Limits) UnmarshalJSON(data []byte) error { @@ -54,40 +56,51 @@ func (s *Limits) UnmarshalJSON(data []byte) error { switch t { case "effective_max_model_memory_limit": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.EffectiveMaxModelMemoryLimit); err != nil { return fmt.Errorf("%s | %w", "EffectiveMaxModelMemoryLimit", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.EffectiveMaxModelMemoryLimit = o case "max_model_memory_limit": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.MaxModelMemoryLimit); err != nil { return fmt.Errorf("%s | %w", "MaxModelMemoryLimit", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) + + case "max_single_ml_node_processors": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxSingleMlNodeProcessors", err) + } + s.MaxSingleMlNodeProcessors = &value + case float64: + f := int(v) + s.MaxSingleMlNodeProcessors = &f } - s.MaxModelMemoryLimit = &o case "total_ml_memory": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.TotalMlMemory); err != nil { return fmt.Errorf("%s | %w", "TotalMlMemory", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) + + case "total_ml_processors": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalMlProcessors", err) + } + s.TotalMlProcessors = &value + case float64: + f := int(v) + s.TotalMlProcessors = &f } - s.TotalMlMemory = o } } @@ -100,3 +113,5 @@ func NewLimits() *Limits { return r } + +// false diff --git a/typedapi/types/limittokencounttokenfilter.go b/typedapi/types/limittokencounttokenfilter.go index 9bc6a1928b..f87fb214a5 100644 --- a/typedapi/types/limittokencounttokenfilter.go +++ b/typedapi/types/limittokencounttokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // LimitTokenCountTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L250-L254 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L250-L254 type LimitTokenCountTokenFilter struct { ConsumeAllTokens *bool `json:"consume_all_tokens,omitempty"` MaxTokenCount Stringifiedinteger `json:"max_token_count,omitempty"` @@ -109,3 +109,13 @@ func NewLimitTokenCountTokenFilter() *LimitTokenCountTokenFilter { return r } + +// true + +type LimitTokenCountTokenFilterVariant interface { + LimitTokenCountTokenFilterCaster() *LimitTokenCountTokenFilter +} + +func (s *LimitTokenCountTokenFilter) LimitTokenCountTokenFilterCaster() *LimitTokenCountTokenFilter { + return s +} diff --git a/typedapi/types/linearinterpolationsmoothingmodel.go b/typedapi/types/linearinterpolationsmoothingmodel.go index 29261b0c55..72264372e9 100644 --- a/typedapi/types/linearinterpolationsmoothingmodel.go +++ b/typedapi/types/linearinterpolationsmoothingmodel.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // LinearInterpolationSmoothingModel type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L437-L441 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L437-L441 type LinearInterpolationSmoothingModel struct { BigramLambda Float64 `json:"bigram_lambda"` TrigramLambda Float64 `json:"trigram_lambda"` @@ -112,3 +112,13 @@ func NewLinearInterpolationSmoothingModel() *LinearInterpolationSmoothingModel { return r } + +// true + +type LinearInterpolationSmoothingModelVariant interface { + LinearInterpolationSmoothingModelCaster() *LinearInterpolationSmoothingModel +} + +func (s *LinearInterpolationSmoothingModel) LinearInterpolationSmoothingModelCaster() *LinearInterpolationSmoothingModel { + return s +} diff --git a/typedapi/types/linearmovingaverageaggregation.go b/typedapi/types/linearmovingaverageaggregation.go index 14292f025f..c2afefe709 100644 --- a/typedapi/types/linearmovingaverageaggregation.go +++ b/typedapi/types/linearmovingaverageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // LinearMovingAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L268-L271 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L268-L271 type LinearMovingAverageAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -173,3 +173,13 @@ func NewLinearMovingAverageAggregation() *LinearMovingAverageAggregation { return r } + +// true + +type LinearMovingAverageAggregationVariant interface { + LinearMovingAverageAggregationCaster() *LinearMovingAverageAggregation +} + +func (s *LinearMovingAverageAggregation) LinearMovingAverageAggregationCaster() *LinearMovingAverageAggregation { + return s +} diff --git a/typedapi/types/listtypevalidation.go b/typedapi/types/listtypevalidation.go index 82a8b6469e..dac72f45b8 100644 --- a/typedapi/types/listtypevalidation.go +++ b/typedapi/types/listtypevalidation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ListTypeValidation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L68-L71 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L68-L71 type ListTypeValidation struct { Constraint string `json:"constraint"` Type string `json:"type,omitempty"` @@ -93,3 +93,13 @@ func NewListTypeValidation() *ListTypeValidation { return r } + +// true + +type ListTypeValidationVariant interface { + ListTypeValidationCaster() *ListTypeValidation +} + +func (s *ListTypeValidation) ListTypeValidationCaster() *ListTypeValidation { + return s +} diff --git a/typedapi/types/lithuaniananalyzer.go b/typedapi/types/lithuaniananalyzer.go index ed24fb7d7b..31570e7a31 100644 --- a/typedapi/types/lithuaniananalyzer.go +++ b/typedapi/types/lithuaniananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // LithuanianAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L230-L235 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L241-L246 type LithuanianAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewLithuanianAnalyzer() *LithuanianAnalyzer { return r } + +// true + +type LithuanianAnalyzerVariant interface { + LithuanianAnalyzerCaster() *LithuanianAnalyzer +} + +func (s *LithuanianAnalyzer) LithuanianAnalyzerCaster() *LithuanianAnalyzer { + return s +} diff --git a/typedapi/types/helprecord.go b/typedapi/types/local.go similarity index 70% rename from typedapi/types/helprecord.go rename to typedapi/types/local.go index 8d5f11d536..cf3d978af2 100644 --- a/typedapi/types/helprecord.go +++ b/typedapi/types/local.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,14 +29,14 @@ import ( "strconv" ) -// HelpRecord type. +// Local type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/help/types.ts#L20-L22 -type HelpRecord struct { - Endpoint string `json:"endpoint"` +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Database.ts#L63-L65 +type Local struct { + Type string `json:"type"` } -func (s *HelpRecord) UnmarshalJSON(data []byte) error { +func (s *Local) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -51,26 +51,28 @@ func (s *HelpRecord) UnmarshalJSON(data []byte) error { switch t { - case "endpoint": + case "type": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Endpoint", err) + return fmt.Errorf("%s | %w", "Type", err) } o := string(tmp[:]) o, err = strconv.Unquote(o) if err != nil { o = string(tmp[:]) } - s.Endpoint = o + s.Type = o } } return nil } -// NewHelpRecord returns a HelpRecord. -func NewHelpRecord() *HelpRecord { - r := &HelpRecord{} +// NewLocal returns a Local. +func NewLocal() *Local { + r := &Local{} return r } + +// false diff --git a/typedapi/types/loggingaction.go b/typedapi/types/loggingaction.go index b9689375f0..38b4cd378c 100644 --- a/typedapi/types/loggingaction.go +++ b/typedapi/types/loggingaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // LoggingAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L281-L285 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L281-L285 type LoggingAction struct { Category *string `json:"category,omitempty"` Level *string `json:"level,omitempty"` @@ -100,3 +100,13 @@ func NewLoggingAction() *LoggingAction { return r } + +// true + +type LoggingActionVariant interface { + LoggingActionCaster() *LoggingAction +} + +func (s *LoggingAction) LoggingActionCaster() *LoggingAction { + return s +} diff --git a/typedapi/types/loggingresult.go b/typedapi/types/loggingresult.go index 1fa82f127f..f700357192 100644 --- a/typedapi/types/loggingresult.go +++ b/typedapi/types/loggingresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // LoggingResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L287-L289 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L287-L289 type LoggingResult struct { LoggedText string `json:"logged_text"` } @@ -74,3 +74,5 @@ func NewLoggingResult() *LoggingResult { return r } + +// false diff --git a/typedapi/types/logstashpipeline.go b/typedapi/types/logstashpipeline.go index c452f86e26..565df5c7d9 100644 --- a/typedapi/types/logstashpipeline.go +++ b/typedapi/types/logstashpipeline.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,24 +31,23 @@ import ( // LogstashPipeline type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/logstash/_types/Pipeline.ts#L60-L92 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/logstash/_types/Pipeline.ts#L60-L91 type LogstashPipeline struct { - // Description Description of the pipeline. + // Description A description of the pipeline. // This description is not used by Elasticsearch or Logstash. Description string `json:"description"` - // LastModified Date the pipeline was last updated. - // Must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. + // LastModified The date the pipeline was last updated. + // It must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. LastModified DateTime `json:"last_modified"` - // Pipeline Configuration for the pipeline. + // Pipeline The configuration for the pipeline. Pipeline string `json:"pipeline"` - // PipelineMetadata Optional metadata about the pipeline. - // May have any contents. + // PipelineMetadata Optional metadata about the pipeline, which can have any contents. // This metadata is not generated or used by Elasticsearch or Logstash. PipelineMetadata PipelineMetadata `json:"pipeline_metadata"` // PipelineSettings Settings for the pipeline. - // Supports only flat keys in dot notation. + // It supports only flat keys in dot notation. PipelineSettings PipelineSettings `json:"pipeline_settings"` - // Username User who last updated the pipeline. + // Username The user who last updated the pipeline. Username string `json:"username"` } @@ -129,3 +128,13 @@ func NewLogstashPipeline() *LogstashPipeline { return r } + +// true + +type LogstashPipelineVariant interface { + LogstashPipelineCaster() *LogstashPipeline +} + +func (s *LogstashPipeline) LogstashPipelineCaster() *LogstashPipeline { + return s +} diff --git a/typedapi/types/longnumberproperty.go b/typedapi/types/longnumberproperty.go index 94603875cd..d9eb6ab18f 100644 --- a/typedapi/types/longnumberproperty.go +++ b/typedapi/types/longnumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // LongNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L162-L165 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L166-L169 type LongNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,12 +48,13 @@ type LongNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *int64 `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *int64 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -161,301 +163,313 @@ func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -552,301 +566,313 @@ func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -871,6 +897,11 @@ func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -919,6 +950,7 @@ func (s LongNumberProperty) MarshalJSON() ([]byte, error) { Properties: s.Properties, Script: s.Script, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -932,10 +964,20 @@ func (s LongNumberProperty) MarshalJSON() ([]byte, error) { // NewLongNumberProperty returns a LongNumberProperty. func NewLongNumberProperty() *LongNumberProperty { r := &LongNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type LongNumberPropertyVariant interface { + LongNumberPropertyCaster() *LongNumberProperty +} + +func (s *LongNumberProperty) LongNumberPropertyCaster() *LongNumberProperty { + return s +} diff --git a/typedapi/types/longrangeproperty.go b/typedapi/types/longrangeproperty.go index 9489a4e083..9423064696 100644 --- a/typedapi/types/longrangeproperty.go +++ b/typedapi/types/longrangeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // LongRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/range.ts#L50-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/range.ts#L50-L52 type LongRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -44,10 +45,11 @@ type LongRangeProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { @@ -149,301 +151,313 @@ func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -506,301 +520,313 @@ func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -820,6 +846,11 @@ func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -834,18 +865,19 @@ func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { func (s LongRangeProperty) MarshalJSON() ([]byte, error) { type innerLongRangeProperty LongRangeProperty tmp := innerLongRangeProperty{ - Boost: s.Boost, - Coerce: s.Coerce, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Index: s.Index, - Meta: s.Meta, - Properties: s.Properties, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "long_range" @@ -856,10 +888,20 @@ func (s LongRangeProperty) MarshalJSON() ([]byte, error) { // NewLongRangeProperty returns a LongRangeProperty. func NewLongRangeProperty() *LongRangeProperty { r := &LongRangeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type LongRangePropertyVariant interface { + LongRangePropertyCaster() *LongRangeProperty +} + +func (s *LongRangeProperty) LongRangePropertyCaster() *LongRangeProperty { + return s +} diff --git a/typedapi/types/longraretermsaggregate.go b/typedapi/types/longraretermsaggregate.go index 49db50ed4d..34fe1caa12 100644 --- a/typedapi/types/longraretermsaggregate.go +++ b/typedapi/types/longraretermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // LongRareTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L471-L476 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L471-L476 type LongRareTermsAggregate struct { Buckets BucketsLongRareTermsBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewLongRareTermsAggregate() *LongRareTermsAggregate { return r } + +// false diff --git a/typedapi/types/longraretermsbucket.go b/typedapi/types/longraretermsbucket.go index 61b6e9156f..772d880026 100644 --- a/typedapi/types/longraretermsbucket.go +++ b/typedapi/types/longraretermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // LongRareTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L478-L481 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L478-L481 type LongRareTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -654,8 +654,10 @@ func (s LongRareTermsBucket) MarshalJSON() ([]byte, error) { // NewLongRareTermsBucket returns a LongRareTermsBucket. func NewLongRareTermsBucket() *LongRareTermsBucket { r := &LongRareTermsBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/longtermsaggregate.go b/typedapi/types/longtermsaggregate.go index 98886a907d..b004098b1d 100644 --- a/typedapi/types/longtermsaggregate.go +++ b/typedapi/types/longtermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // LongTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L439-L444 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L439-L444 type LongTermsAggregate struct { Buckets BucketsLongTermsBucket `json:"buckets"` DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` @@ -121,3 +121,5 @@ func NewLongTermsAggregate() *LongTermsAggregate { return r } + +// false diff --git a/typedapi/types/longtermsbucket.go b/typedapi/types/longtermsbucket.go index cc27314a20..177650e80a 100644 --- a/typedapi/types/longtermsbucket.go +++ b/typedapi/types/longtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // LongTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L446-L449 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L446-L449 type LongTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -670,8 +670,10 @@ func (s LongTermsBucket) MarshalJSON() ([]byte, error) { // NewLongTermsBucket returns a LongTermsBucket. func NewLongTermsBucket() *LongTermsBucket { r := &LongTermsBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/lowercasenormalizer.go b/typedapi/types/lowercasenormalizer.go index be602f4f0a..4f7c89c23f 100644 --- a/typedapi/types/lowercasenormalizer.go +++ b/typedapi/types/lowercasenormalizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // LowercaseNormalizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/normalizers.ts#L26-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/normalizers.ts#L26-L28 type LowercaseNormalizer struct { Type string `json:"type,omitempty"` } @@ -49,3 +49,13 @@ func NewLowercaseNormalizer() *LowercaseNormalizer { return r } + +// true + +type LowercaseNormalizerVariant interface { + LowercaseNormalizerCaster() *LowercaseNormalizer +} + +func (s *LowercaseNormalizer) LowercaseNormalizerCaster() *LowercaseNormalizer { + return s +} diff --git a/typedapi/types/lowercaseprocessor.go b/typedapi/types/lowercaseprocessor.go index d507ce4760..ce36b600a3 100644 --- a/typedapi/types/lowercaseprocessor.go +++ b/typedapi/types/lowercaseprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // LowercaseProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1172-L1188 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1213-L1229 type LowercaseProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type LowercaseProcessor struct { // Field The field to make lowercase. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly @@ -88,16 +88,9 @@ func (s *LowercaseProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -160,3 +153,13 @@ func NewLowercaseProcessor() *LowercaseProcessor { return r } + +// true + +type LowercaseProcessorVariant interface { + LowercaseProcessorCaster() *LowercaseProcessor +} + +func (s *LowercaseProcessor) LowercaseProcessorCaster() *LowercaseProcessor { + return s +} diff --git a/typedapi/types/lowercasetokenfilter.go b/typedapi/types/lowercasetokenfilter.go index 0ae45236b5..66fa86580b 100644 --- a/typedapi/types/lowercasetokenfilter.go +++ b/typedapi/types/lowercasetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // LowercaseTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L256-L259 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L256-L259 type LowercaseTokenFilter struct { Language *string `json:"language,omitempty"` Type string `json:"type,omitempty"` @@ -100,3 +100,13 @@ func NewLowercaseTokenFilter() *LowercaseTokenFilter { return r } + +// true + +type LowercaseTokenFilterVariant interface { + LowercaseTokenFilterCaster() *LowercaseTokenFilter +} + +func (s *LowercaseTokenFilter) LowercaseTokenFilterCaster() *LowercaseTokenFilter { + return s +} diff --git a/typedapi/types/lowercasetokenizer.go b/typedapi/types/lowercasetokenizer.go index 61e4155980..1a22ef74e6 100644 --- a/typedapi/types/lowercasetokenizer.go +++ b/typedapi/types/lowercasetokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // LowercaseTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L80-L82 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L80-L82 type LowercaseTokenizer struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewLowercaseTokenizer() *LowercaseTokenizer { return r } + +// true + +type LowercaseTokenizerVariant interface { + LowercaseTokenizerCaster() *LowercaseTokenizer +} + +func (s *LowercaseTokenizer) LowercaseTokenizerCaster() *LowercaseTokenizer { + return s +} diff --git a/typedapi/types/machinelearning.go b/typedapi/types/machinelearning.go index afc7ed40af..adcd22c990 100644 --- a/typedapi/types/machinelearning.go +++ b/typedapi/types/machinelearning.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MachineLearning type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L370-L377 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L378-L385 type MachineLearning struct { Available bool `json:"available"` DataFrameAnalyticsJobs MlDataFrameAnalyticsJobs `json:"data_frame_analytics_jobs"` @@ -137,9 +137,11 @@ func (s *MachineLearning) UnmarshalJSON(data []byte) error { // NewMachineLearning returns a MachineLearning. func NewMachineLearning() *MachineLearning { r := &MachineLearning{ - Datafeeds: make(map[string]XpackDatafeed, 0), - Jobs: make(map[string]JobUsage, 0), + Datafeeds: make(map[string]XpackDatafeed), + Jobs: make(map[string]JobUsage), } return r } + +// false diff --git a/typedapi/types/manageuserprivileges.go b/typedapi/types/manageuserprivileges.go index 0bd8fa95af..b8d63818a8 100644 --- a/typedapi/types/manageuserprivileges.go +++ b/typedapi/types/manageuserprivileges.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ManageUserPrivileges type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Privileges.ts#L376-L378 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L438-L440 type ManageUserPrivileges struct { Applications []string `json:"applications"` } @@ -33,3 +33,13 @@ func NewManageUserPrivileges() *ManageUserPrivileges { return r } + +// true + +type ManageUserPrivilegesVariant interface { + ManageUserPrivilegesCaster() *ManageUserPrivileges +} + +func (s *ManageUserPrivileges) ManageUserPrivilegesCaster() *ManageUserPrivileges { + return s +} diff --git a/typedapi/types/mapboxvectortiles.go b/typedapi/types/mapboxvectortiles.go index 53cf5df337..9b747c822d 100644 --- a/typedapi/types/mapboxvectortiles.go +++ b/typedapi/types/mapboxvectortiles.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // MapboxVectorTiles type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Binary.ts#L21-L21 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Binary.ts#L21-L21 type MapboxVectorTiles []byte diff --git a/typedapi/types/mappingcharfilter.go b/typedapi/types/mappingcharfilter.go index 545e61b19f..a9fc2fa2af 100644 --- a/typedapi/types/mappingcharfilter.go +++ b/typedapi/types/mappingcharfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MappingCharFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/char_filters.ts#L51-L55 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/char_filters.ts#L51-L55 type MappingCharFilter struct { Mappings []string `json:"mappings,omitempty"` MappingsPath *string `json:"mappings_path,omitempty"` @@ -107,3 +107,13 @@ func NewMappingCharFilter() *MappingCharFilter { return r } + +// true + +type MappingCharFilterVariant interface { + MappingCharFilterCaster() *MappingCharFilter +} + +func (s *MappingCharFilter) MappingCharFilterCaster() *MappingCharFilter { + return s +} diff --git a/typedapi/types/mappinglimitsettings.go b/typedapi/types/mappinglimitsettings.go index c14786060c..ae6579b51e 100644 --- a/typedapi/types/mappinglimitsettings.go +++ b/typedapi/types/mappinglimitsettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,15 +31,16 @@ import ( // MappingLimitSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L411-L424 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L425-L439 type MappingLimitSettings struct { Coerce *bool `json:"coerce,omitempty"` Depth *MappingLimitSettingsDepth `json:"depth,omitempty"` DimensionFields *MappingLimitSettingsDimensionFields `json:"dimension_fields,omitempty"` FieldNameLength *MappingLimitSettingsFieldNameLength `json:"field_name_length,omitempty"` - IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + IgnoreMalformed string `json:"ignore_malformed,omitempty"` NestedFields *MappingLimitSettingsNestedFields `json:"nested_fields,omitempty"` NestedObjects *MappingLimitSettingsNestedObjects `json:"nested_objects,omitempty"` + Source *MappingLimitSettingsSourceFields `json:"source,omitempty"` TotalFields *MappingLimitSettingsTotalFields `json:"total_fields,omitempty"` } @@ -88,18 +89,16 @@ func (s *MappingLimitSettings) UnmarshalJSON(data []byte) error { } case "ignore_malformed": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "IgnoreMalformed", err) - } - s.IgnoreMalformed = &value - case bool: - s.IgnoreMalformed = &v + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IgnoreMalformed = o case "nested_fields": if err := dec.Decode(&s.NestedFields); err != nil { @@ -111,6 +110,11 @@ func (s *MappingLimitSettings) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "NestedObjects", err) } + case "source": + if err := dec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + case "total_fields": if err := dec.Decode(&s.TotalFields); err != nil { return fmt.Errorf("%s | %w", "TotalFields", err) @@ -127,3 +131,13 @@ func NewMappingLimitSettings() *MappingLimitSettings { return r } + +// true + +type MappingLimitSettingsVariant interface { + MappingLimitSettingsCaster() *MappingLimitSettings +} + +func (s *MappingLimitSettings) MappingLimitSettingsCaster() *MappingLimitSettings { + return s +} diff --git a/typedapi/types/mappinglimitsettingsdepth.go b/typedapi/types/mappinglimitsettingsdepth.go index add9a9670b..a28020acae 100644 --- a/typedapi/types/mappinglimitsettingsdepth.go +++ b/typedapi/types/mappinglimitsettingsdepth.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MappingLimitSettingsDepth type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L445-L452 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L460-L467 type MappingLimitSettingsDepth struct { // Limit The maximum depth for a field, which is measured as the number of inner // objects. For instance, if all fields are defined @@ -81,3 +81,13 @@ func NewMappingLimitSettingsDepth() *MappingLimitSettingsDepth { return r } + +// true + +type MappingLimitSettingsDepthVariant interface { + MappingLimitSettingsDepthCaster() *MappingLimitSettingsDepth +} + +func (s *MappingLimitSettingsDepth) MappingLimitSettingsDepthCaster() *MappingLimitSettingsDepth { + return s +} diff --git a/typedapi/types/mappinglimitsettingsdimensionfields.go b/typedapi/types/mappinglimitsettingsdimensionfields.go index 943a752d84..5046f22d1c 100644 --- a/typedapi/types/mappinglimitsettingsdimensionfields.go +++ b/typedapi/types/mappinglimitsettingsdimensionfields.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MappingLimitSettingsDimensionFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L482-L488 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L497-L503 type MappingLimitSettingsDimensionFields struct { // Limit [preview] This functionality is in technical preview and may be changed or // removed in a future release. @@ -81,3 +81,13 @@ func NewMappingLimitSettingsDimensionFields() *MappingLimitSettingsDimensionFiel return r } + +// true + +type MappingLimitSettingsDimensionFieldsVariant interface { + MappingLimitSettingsDimensionFieldsCaster() *MappingLimitSettingsDimensionFields +} + +func (s *MappingLimitSettingsDimensionFields) MappingLimitSettingsDimensionFieldsCaster() *MappingLimitSettingsDimensionFields { + return s +} diff --git a/typedapi/types/mappinglimitsettingsfieldnamelength.go b/typedapi/types/mappinglimitsettingsfieldnamelength.go index 4741da5d12..92b09aee9f 100644 --- a/typedapi/types/mappinglimitsettingsfieldnamelength.go +++ b/typedapi/types/mappinglimitsettingsfieldnamelength.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MappingLimitSettingsFieldNameLength type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L473-L480 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L488-L495 type MappingLimitSettingsFieldNameLength struct { // Limit Setting for the maximum length of a field name. This setting isn’t really // something that addresses mappings explosion but @@ -83,3 +83,13 @@ func NewMappingLimitSettingsFieldNameLength() *MappingLimitSettingsFieldNameLeng return r } + +// true + +type MappingLimitSettingsFieldNameLengthVariant interface { + MappingLimitSettingsFieldNameLengthCaster() *MappingLimitSettingsFieldNameLength +} + +func (s *MappingLimitSettingsFieldNameLength) MappingLimitSettingsFieldNameLengthCaster() *MappingLimitSettingsFieldNameLength { + return s +} diff --git a/typedapi/types/mappinglimitsettingsnestedfields.go b/typedapi/types/mappinglimitsettingsnestedfields.go index 8e45b0b346..bfed761c5b 100644 --- a/typedapi/types/mappinglimitsettingsnestedfields.go +++ b/typedapi/types/mappinglimitsettingsnestedfields.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MappingLimitSettingsNestedFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L454-L462 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L469-L477 type MappingLimitSettingsNestedFields struct { // Limit The maximum number of distinct nested mappings in an index. The nested type // should only be used in special cases, when @@ -82,3 +82,13 @@ func NewMappingLimitSettingsNestedFields() *MappingLimitSettingsNestedFields { return r } + +// true + +type MappingLimitSettingsNestedFieldsVariant interface { + MappingLimitSettingsNestedFieldsCaster() *MappingLimitSettingsNestedFields +} + +func (s *MappingLimitSettingsNestedFields) MappingLimitSettingsNestedFieldsCaster() *MappingLimitSettingsNestedFields { + return s +} diff --git a/typedapi/types/mappinglimitsettingsnestedobjects.go b/typedapi/types/mappinglimitsettingsnestedobjects.go index 5e2154e4d4..c322eaf208 100644 --- a/typedapi/types/mappinglimitsettingsnestedobjects.go +++ b/typedapi/types/mappinglimitsettingsnestedobjects.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MappingLimitSettingsNestedObjects type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L464-L471 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L479-L486 type MappingLimitSettingsNestedObjects struct { // Limit The maximum number of nested JSON objects that a single document can contain // across all nested types. This limit helps @@ -81,3 +81,13 @@ func NewMappingLimitSettingsNestedObjects() *MappingLimitSettingsNestedObjects { return r } + +// true + +type MappingLimitSettingsNestedObjectsVariant interface { + MappingLimitSettingsNestedObjectsCaster() *MappingLimitSettingsNestedObjects +} + +func (s *MappingLimitSettingsNestedObjects) MappingLimitSettingsNestedObjectsCaster() *MappingLimitSettingsNestedObjects { + return s +} diff --git a/typedapi/types/mappinglimitsettingssourcefields.go b/typedapi/types/mappinglimitsettingssourcefields.go new file mode 100644 index 0000000000..96dc2612a1 --- /dev/null +++ b/typedapi/types/mappinglimitsettingssourcefields.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sourcemode" +) + +// MappingLimitSettingsSourceFields type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L505-L507 +type MappingLimitSettingsSourceFields struct { + Mode sourcemode.SourceMode `json:"mode"` +} + +// NewMappingLimitSettingsSourceFields returns a MappingLimitSettingsSourceFields. +func NewMappingLimitSettingsSourceFields() *MappingLimitSettingsSourceFields { + r := &MappingLimitSettingsSourceFields{} + + return r +} + +// true + +type MappingLimitSettingsSourceFieldsVariant interface { + MappingLimitSettingsSourceFieldsCaster() *MappingLimitSettingsSourceFields +} + +func (s *MappingLimitSettingsSourceFields) MappingLimitSettingsSourceFieldsCaster() *MappingLimitSettingsSourceFields { + return s +} diff --git a/typedapi/types/mappinglimitsettingstotalfields.go b/typedapi/types/mappinglimitsettingstotalfields.go index 20a95ca92b..c2d5047810 100644 --- a/typedapi/types/mappinglimitsettingstotalfields.go +++ b/typedapi/types/mappinglimitsettingstotalfields.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MappingLimitSettingsTotalFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L426-L443 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L441-L458 type MappingLimitSettingsTotalFields struct { // IgnoreDynamicBeyondLimit This setting determines what happens when a dynamically mapped field would // exceed the total fields limit. When set @@ -43,14 +43,14 @@ type MappingLimitSettingsTotalFields struct { // similar to dynamic: false. // The fields that were not added to the mapping will be added to the _ignored // field. - IgnoreDynamicBeyondLimit *bool `json:"ignore_dynamic_beyond_limit,omitempty"` + IgnoreDynamicBeyondLimit string `json:"ignore_dynamic_beyond_limit,omitempty"` // Limit The maximum number of fields in an index. Field and object mappings, as well // as field aliases count towards this limit. // The limit is in place to prevent mappings and searches from becoming too // large. Higher values can lead to performance // degradations and memory issues, especially in clusters with a high load or // few resources. - Limit *int64 `json:"limit,omitempty"` + Limit string `json:"limit,omitempty"` } func (s *MappingLimitSettingsTotalFields) UnmarshalJSON(data []byte) error { @@ -69,33 +69,28 @@ func (s *MappingLimitSettingsTotalFields) UnmarshalJSON(data []byte) error { switch t { case "ignore_dynamic_beyond_limit": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "IgnoreDynamicBeyondLimit", err) - } - s.IgnoreDynamicBeyondLimit = &value - case bool: - s.IgnoreDynamicBeyondLimit = &v + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IgnoreDynamicBeyondLimit", err) } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IgnoreDynamicBeyondLimit = o case "limit": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Limit", err) - } - s.Limit = &value - case float64: - f := int64(v) - s.Limit = &f + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Limit", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) } + s.Limit = o } } @@ -108,3 +103,13 @@ func NewMappingLimitSettingsTotalFields() *MappingLimitSettingsTotalFields { return r } + +// true + +type MappingLimitSettingsTotalFieldsVariant interface { + MappingLimitSettingsTotalFieldsCaster() *MappingLimitSettingsTotalFields +} + +func (s *MappingLimitSettingsTotalFields) MappingLimitSettingsTotalFieldsCaster() *MappingLimitSettingsTotalFields { + return s +} diff --git a/typedapi/types/mappingstats.go b/typedapi/types/mappingstats.go index edcf8f7cd8..79ff31bc92 100644 --- a/typedapi/types/mappingstats.go +++ b/typedapi/types/mappingstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MappingStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/stats/types.ts#L186-L190 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/stats/types.ts#L186-L190 type MappingStats struct { TotalCount int64 `json:"total_count"` TotalEstimatedOverhead ByteSize `json:"total_estimated_overhead,omitempty"` @@ -99,3 +99,5 @@ func NewMappingStats() *MappingStats { return r } + +// false diff --git a/typedapi/types/masterisstableindicator.go b/typedapi/types/masterisstableindicator.go index 4c3be27822..018415c039 100644 --- a/typedapi/types/masterisstableindicator.go +++ b/typedapi/types/masterisstableindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // MasterIsStableIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L80-L84 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L81-L85 type MasterIsStableIndicator struct { Details *MasterIsStableIndicatorDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` @@ -100,3 +100,5 @@ func NewMasterIsStableIndicator() *MasterIsStableIndicator { return r } + +// false diff --git a/typedapi/types/masterisstableindicatorclusterformationnode.go b/typedapi/types/masterisstableindicatorclusterformationnode.go index bd22161d1b..b15dd580dc 100644 --- a/typedapi/types/masterisstableindicatorclusterformationnode.go +++ b/typedapi/types/masterisstableindicatorclusterformationnode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MasterIsStableIndicatorClusterFormationNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L99-L103 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L100-L104 type MasterIsStableIndicatorClusterFormationNode struct { ClusterFormationMessage string `json:"cluster_formation_message"` Name *string `json:"name,omitempty"` @@ -100,3 +100,5 @@ func NewMasterIsStableIndicatorClusterFormationNode() *MasterIsStableIndicatorCl return r } + +// false diff --git a/typedapi/types/masterisstableindicatordetails.go b/typedapi/types/masterisstableindicatordetails.go index e5cb24fb9e..ef6bff9814 100644 --- a/typedapi/types/masterisstableindicatordetails.go +++ b/typedapi/types/masterisstableindicatordetails.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // MasterIsStableIndicatorDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L85-L90 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L86-L91 type MasterIsStableIndicatorDetails struct { ClusterFormation []MasterIsStableIndicatorClusterFormationNode `json:"cluster_formation,omitempty"` CurrentMaster IndicatorNode `json:"current_master"` @@ -36,3 +36,5 @@ func NewMasterIsStableIndicatorDetails() *MasterIsStableIndicatorDetails { return r } + +// false diff --git a/typedapi/types/masterisstableindicatorexceptionfetchinghistory.go b/typedapi/types/masterisstableindicatorexceptionfetchinghistory.go index b1710e5df2..cd7a00c13a 100644 --- a/typedapi/types/masterisstableindicatorexceptionfetchinghistory.go +++ b/typedapi/types/masterisstableindicatorexceptionfetchinghistory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MasterIsStableIndicatorExceptionFetchingHistory type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L95-L98 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L96-L99 type MasterIsStableIndicatorExceptionFetchingHistory struct { Message string `json:"message"` StackTrace string `json:"stack_trace"` @@ -87,3 +87,5 @@ func NewMasterIsStableIndicatorExceptionFetchingHistory() *MasterIsStableIndicat return r } + +// false diff --git a/typedapi/types/masterrecord.go b/typedapi/types/masterrecord.go index 52038df801..5ad6556b2e 100644 --- a/typedapi/types/masterrecord.go +++ b/typedapi/types/masterrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MasterRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/master/types.ts#L20-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/master/types.ts#L20-L39 type MasterRecord struct { // Host host name Host *string `json:"host,omitempty"` @@ -117,3 +117,5 @@ func NewMasterRecord() *MasterRecord { return r } + +// false diff --git a/typedapi/types/matchallquery.go b/typedapi/types/matchallquery.go index 2ae0173fec..2352fbf3fe 100644 --- a/typedapi/types/matchallquery.go +++ b/typedapi/types/matchallquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MatchAllQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/MatchAllQuery.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/MatchAllQuery.ts#L22-L25 type MatchAllQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -96,3 +96,13 @@ func NewMatchAllQuery() *MatchAllQuery { return r } + +// true + +type MatchAllQueryVariant interface { + MatchAllQueryCaster() *MatchAllQuery +} + +func (s *MatchAllQuery) MatchAllQueryCaster() *MatchAllQuery { + return s +} diff --git a/typedapi/types/matchboolprefixquery.go b/typedapi/types/matchboolprefixquery.go index fcd945592a..8deaf29b0b 100644 --- a/typedapi/types/matchboolprefixquery.go +++ b/typedapi/types/matchboolprefixquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // MatchBoolPrefixQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L355-L412 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L355-L412 type MatchBoolPrefixQuery struct { // Analyzer Analyzer used to convert the text in the query value into tokens. Analyzer *string `json:"analyzer,omitempty"` @@ -232,3 +232,13 @@ func NewMatchBoolPrefixQuery() *MatchBoolPrefixQuery { return r } + +// true + +type MatchBoolPrefixQueryVariant interface { + MatchBoolPrefixQueryCaster() *MatchBoolPrefixQuery +} + +func (s *MatchBoolPrefixQuery) MatchBoolPrefixQueryCaster() *MatchBoolPrefixQuery { + return s +} diff --git a/typedapi/types/matchedfield.go b/typedapi/types/matchedfield.go index 93faf1f42b..52ef13124a 100644 --- a/typedapi/types/matchedfield.go +++ b/typedapi/types/matchedfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MatchedField type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/text_structure/test_grok_pattern/types.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/text_structure/test_grok_pattern/types.ts#L23-L27 type MatchedField struct { Length int `json:"length"` Match string `json:"match"` @@ -108,3 +108,5 @@ func NewMatchedField() *MatchedField { return r } + +// false diff --git a/typedapi/types/matchedtext.go b/typedapi/types/matchedtext.go index 2d2bc7f8c5..acf7c24230 100644 --- a/typedapi/types/matchedtext.go +++ b/typedapi/types/matchedtext.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MatchedText type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/text_structure/test_grok_pattern/types.ts#L29-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/text_structure/test_grok_pattern/types.ts#L29-L32 type MatchedText struct { Fields map[string][]MatchedField `json:"fields,omitempty"` Matched bool `json:"matched"` @@ -82,8 +82,10 @@ func (s *MatchedText) UnmarshalJSON(data []byte) error { // NewMatchedText returns a MatchedText. func NewMatchedText() *MatchedText { r := &MatchedText{ - Fields: make(map[string][]MatchedField, 0), + Fields: make(map[string][]MatchedField), } return r } + +// false diff --git a/typedapi/types/matchnonequery.go b/typedapi/types/matchnonequery.go index f2875639b0..ff46f5142f 100644 --- a/typedapi/types/matchnonequery.go +++ b/typedapi/types/matchnonequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MatchNoneQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/MatchNoneQuery.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/MatchNoneQuery.ts#L22-L25 type MatchNoneQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -96,3 +96,13 @@ func NewMatchNoneQuery() *MatchNoneQuery { return r } + +// true + +type MatchNoneQueryVariant interface { + MatchNoneQueryCaster() *MatchNoneQuery +} + +func (s *MatchNoneQuery) MatchNoneQueryCaster() *MatchNoneQuery { + return s +} diff --git a/typedapi/types/matchonlytextproperty.go b/typedapi/types/matchonlytextproperty.go index 33939737ea..92abb371d7 100644 --- a/typedapi/types/matchonlytextproperty.go +++ b/typedapi/types/matchonlytextproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // MatchOnlyTextProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L231-L256 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L247-L272 type MatchOnlyTextProperty struct { // CopyTo Allows you to copy the values of multiple fields into a group // field, which can then be queried as a single field. @@ -95,301 +95,313 @@ func (s *MatchOnlyTextProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -431,9 +443,19 @@ func (s MatchOnlyTextProperty) MarshalJSON() ([]byte, error) { // NewMatchOnlyTextProperty returns a MatchOnlyTextProperty. func NewMatchOnlyTextProperty() *MatchOnlyTextProperty { r := &MatchOnlyTextProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), } return r } + +// true + +type MatchOnlyTextPropertyVariant interface { + MatchOnlyTextPropertyCaster() *MatchOnlyTextProperty +} + +func (s *MatchOnlyTextProperty) MatchOnlyTextPropertyCaster() *MatchOnlyTextProperty { + return s +} diff --git a/typedapi/types/matchphraseprefixquery.go b/typedapi/types/matchphraseprefixquery.go index 351999a177..9f7ddf4ac4 100644 --- a/typedapi/types/matchphraseprefixquery.go +++ b/typedapi/types/matchphraseprefixquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // MatchPhrasePrefixQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L440-L469 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L440-L469 type MatchPhrasePrefixQuery struct { // Analyzer Analyzer used to convert text in the query value into tokens. Analyzer *string `json:"analyzer,omitempty"` @@ -183,3 +183,13 @@ func NewMatchPhrasePrefixQuery() *MatchPhrasePrefixQuery { return r } + +// true + +type MatchPhrasePrefixQueryVariant interface { + MatchPhrasePrefixQueryCaster() *MatchPhrasePrefixQuery +} + +func (s *MatchPhrasePrefixQuery) MatchPhrasePrefixQueryCaster() *MatchPhrasePrefixQuery { + return s +} diff --git a/typedapi/types/matchphrasequery.go b/typedapi/types/matchphrasequery.go index 539a34c639..c91a5c6dc8 100644 --- a/typedapi/types/matchphrasequery.go +++ b/typedapi/types/matchphrasequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // MatchPhraseQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L414-L438 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L414-L438 type MatchPhraseQuery struct { // Analyzer Analyzer used to convert the text in the query value into tokens. Analyzer *string `json:"analyzer,omitempty"` @@ -164,3 +164,13 @@ func NewMatchPhraseQuery() *MatchPhraseQuery { return r } + +// true + +type MatchPhraseQueryVariant interface { + MatchPhraseQueryCaster() *MatchPhraseQuery +} + +func (s *MatchPhraseQuery) MatchPhraseQueryCaster() *MatchPhraseQuery { + return s +} diff --git a/typedapi/types/matchquery.go b/typedapi/types/matchquery.go index 7d170f1903..5266609ba3 100644 --- a/typedapi/types/matchquery.go +++ b/typedapi/types/matchquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // MatchQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L285-L353 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L285-L353 type MatchQuery struct { // Analyzer Analyzer used to convert the text in the query value into tokens. Analyzer *string `json:"analyzer,omitempty"` @@ -279,3 +279,13 @@ func NewMatchQuery() *MatchQuery { return r } + +// true + +type MatchQueryVariant interface { + MatchQueryCaster() *MatchQuery +} + +func (s *MatchQuery) MatchQueryCaster() *MatchQuery { + return s +} diff --git a/typedapi/types/matrixstatsaggregate.go b/typedapi/types/matrixstatsaggregate.go index a5546279db..9e92fec965 100644 --- a/typedapi/types/matrixstatsaggregate.go +++ b/typedapi/types/matrixstatsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MatrixStatsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L866-L873 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L866-L873 type MatrixStatsAggregate struct { DocCount int64 `json:"doc_count"` Fields []MatrixStatsFields `json:"fields,omitempty"` @@ -89,3 +89,5 @@ func NewMatrixStatsAggregate() *MatrixStatsAggregate { return r } + +// false diff --git a/typedapi/types/matrixstatsaggregation.go b/typedapi/types/matrixstatsaggregation.go index 54149d6b12..dd21fb3351 100644 --- a/typedapi/types/matrixstatsaggregation.go +++ b/typedapi/types/matrixstatsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // MatrixStatsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/matrix.ts#L38-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/matrix.ts#L38-L44 type MatrixStatsAggregation struct { // Fields An array of fields for computing the statistics. Fields []string `json:"fields,omitempty"` @@ -95,8 +95,18 @@ func (s *MatrixStatsAggregation) UnmarshalJSON(data []byte) error { // NewMatrixStatsAggregation returns a MatrixStatsAggregation. func NewMatrixStatsAggregation() *MatrixStatsAggregation { r := &MatrixStatsAggregation{ - Missing: make(map[string]Float64, 0), + Missing: make(map[string]Float64), } return r } + +// true + +type MatrixStatsAggregationVariant interface { + MatrixStatsAggregationCaster() *MatrixStatsAggregation +} + +func (s *MatrixStatsAggregation) MatrixStatsAggregationCaster() *MatrixStatsAggregation { + return s +} diff --git a/typedapi/types/matrixstatsfields.go b/typedapi/types/matrixstatsfields.go index 5e2003e6c7..d6f9da6b6e 100644 --- a/typedapi/types/matrixstatsfields.go +++ b/typedapi/types/matrixstatsfields.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MatrixStatsFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L875-L884 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L875-L884 type MatrixStatsFields struct { Correlation map[string]Float64 `json:"correlation"` Count int64 `json:"count"` @@ -166,9 +166,11 @@ func (s *MatrixStatsFields) UnmarshalJSON(data []byte) error { // NewMatrixStatsFields returns a MatrixStatsFields. func NewMatrixStatsFields() *MatrixStatsFields { r := &MatrixStatsFields{ - Correlation: make(map[string]Float64, 0), - Covariance: make(map[string]Float64, 0), + Correlation: make(map[string]Float64), + Covariance: make(map[string]Float64), } return r } + +// false diff --git a/typedapi/types/maxaggregate.go b/typedapi/types/maxaggregate.go index da800237a2..9853adc9f9 100644 --- a/typedapi/types/maxaggregate.go +++ b/typedapi/types/maxaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MaxAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L205-L209 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L205-L209 type MaxAggregate struct { Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to @@ -89,3 +89,5 @@ func NewMaxAggregate() *MaxAggregate { return r } + +// false diff --git a/typedapi/types/maxaggregation.go b/typedapi/types/maxaggregation.go index 6bafb343b3..01238de61d 100644 --- a/typedapi/types/maxaggregation.go +++ b/typedapi/types/maxaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MaxAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L165-L165 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L165-L165 type MaxAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -95,3 +95,13 @@ func NewMaxAggregation() *MaxAggregation { return r } + +// true + +type MaxAggregationVariant interface { + MaxAggregationCaster() *MaxAggregation +} + +func (s *MaxAggregation) MaxAggregationCaster() *MaxAggregation { + return s +} diff --git a/typedapi/types/maxbucketaggregation.go b/typedapi/types/maxbucketaggregation.go index d50805c838..bc81d16b31 100644 --- a/typedapi/types/maxbucketaggregation.go +++ b/typedapi/types/maxbucketaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // MaxBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L244-L247 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L244-L247 type MaxBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -93,3 +93,13 @@ func NewMaxBucketAggregation() *MaxBucketAggregation { return r } + +// true + +type MaxBucketAggregationVariant interface { + MaxBucketAggregationCaster() *MaxBucketAggregation +} + +func (s *MaxBucketAggregation) MaxBucketAggregationCaster() *MaxBucketAggregation { + return s +} diff --git a/typedapi/types/maxmind.go b/typedapi/types/maxmind.go index 54ec858b1f..afa42bc508 100644 --- a/typedapi/types/maxmind.go +++ b/typedapi/types/maxmind.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // Maxmind type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Database.ts#L31-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Database.ts#L55-L57 type Maxmind struct { AccountId string `json:"account_id"` } @@ -66,3 +66,13 @@ func NewMaxmind() *Maxmind { return r } + +// true + +type MaxmindVariant interface { + MaxmindCaster() *Maxmind +} + +func (s *Maxmind) MaxmindCaster() *Maxmind { + return s +} diff --git a/typedapi/types/medianabsolutedeviationaggregate.go b/typedapi/types/medianabsolutedeviationaggregate.go index ee6eed54e8..300651b2bd 100644 --- a/typedapi/types/medianabsolutedeviationaggregate.go +++ b/typedapi/types/medianabsolutedeviationaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MedianAbsoluteDeviationAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L196-L197 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L196-L197 type MedianAbsoluteDeviationAggregate struct { Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to @@ -89,3 +89,5 @@ func NewMedianAbsoluteDeviationAggregate() *MedianAbsoluteDeviationAggregate { return r } + +// false diff --git a/typedapi/types/medianabsolutedeviationaggregation.go b/typedapi/types/medianabsolutedeviationaggregation.go index bf89400e13..7b3d25486c 100644 --- a/typedapi/types/medianabsolutedeviationaggregation.go +++ b/typedapi/types/medianabsolutedeviationaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MedianAbsoluteDeviationAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L167-L176 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L167-L176 type MedianAbsoluteDeviationAggregation struct { // Compression Limits the maximum number of nodes used by the underlying TDigest algorithm // to `20 * compression`, enabling control of memory usage and approximation @@ -115,3 +115,13 @@ func NewMedianAbsoluteDeviationAggregation() *MedianAbsoluteDeviationAggregation return r } + +// true + +type MedianAbsoluteDeviationAggregationVariant interface { + MedianAbsoluteDeviationAggregationCaster() *MedianAbsoluteDeviationAggregation +} + +func (s *MedianAbsoluteDeviationAggregation) MedianAbsoluteDeviationAggregationCaster() *MedianAbsoluteDeviationAggregation { + return s +} diff --git a/typedapi/types/memmlstats.go b/typedapi/types/memmlstats.go index 23ebdaab14..c80eac29f9 100644 --- a/typedapi/types/memmlstats.go +++ b/typedapi/types/memmlstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MemMlStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_memory_stats/types.ts#L90-L111 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_memory_stats/types.ts#L90-L111 type MemMlStats struct { // AnomalyDetectors Amount of native memory set aside for anomaly detection jobs. AnomalyDetectors ByteSize `json:"anomaly_detectors,omitempty"` @@ -192,3 +192,5 @@ func NewMemMlStats() *MemMlStats { return r } + +// false diff --git a/typedapi/types/memory.go b/typedapi/types/memory.go index c5b7261440..d5b6548975 100644 --- a/typedapi/types/memory.go +++ b/typedapi/types/memory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // Memory type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_memory_stats/types.ts#L25-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_memory_stats/types.ts#L25-L48 type Memory struct { Attributes map[string]string `json:"attributes"` EphemeralId string `json:"ephemeral_id"` @@ -108,8 +108,10 @@ func (s *Memory) UnmarshalJSON(data []byte) error { // NewMemory returns a Memory. func NewMemory() *Memory { r := &Memory{ - Attributes: make(map[string]string, 0), + Attributes: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/memorystats.go b/typedapi/types/memorystats.go index 4c9101b405..ee696ed3a0 100644 --- a/typedapi/types/memorystats.go +++ b/typedapi/types/memorystats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MemoryStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L632-L656 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L632-L656 type MemoryStats struct { // AdjustedTotalInBytes If the amount of physical memory has been overridden using the // `es`.`total_memory_bytes` system property then this reports the overridden @@ -219,3 +219,5 @@ func NewMemoryStats() *MemoryStats { return r } + +// false diff --git a/typedapi/types/memstats.go b/typedapi/types/memstats.go index fafeb7e81f..b59c904e7f 100644 --- a/typedapi/types/memstats.go +++ b/typedapi/types/memstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MemStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/get_memory_stats/types.ts#L65-L88 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/get_memory_stats/types.ts#L65-L88 type MemStats struct { // AdjustedTotal If the amount of physical memory has been overridden using the // es.total_memory_bytes system property @@ -124,3 +124,5 @@ func NewMemStats() *MemStats { return r } + +// false diff --git a/typedapi/types/merge.go b/typedapi/types/merge.go index 84c5360b98..20c36e6bb6 100644 --- a/typedapi/types/merge.go +++ b/typedapi/types/merge.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Merge type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L332-L334 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L346-L348 type Merge struct { Scheduler *MergeScheduler `json:"scheduler,omitempty"` } @@ -33,3 +33,13 @@ func NewMerge() *Merge { return r } + +// true + +type MergeVariant interface { + MergeCaster() *Merge +} + +func (s *Merge) MergeCaster() *Merge { + return s +} diff --git a/typedapi/types/mergescheduler.go b/typedapi/types/mergescheduler.go index 6bf9b6b77c..a171915ed2 100644 --- a/typedapi/types/mergescheduler.go +++ b/typedapi/types/mergescheduler.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // MergeScheduler type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L336-L339 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L350-L353 type MergeScheduler struct { MaxMergeCount Stringifiedinteger `json:"max_merge_count,omitempty"` MaxThreadCount Stringifiedinteger `json:"max_thread_count,omitempty"` @@ -72,3 +72,13 @@ func NewMergeScheduler() *MergeScheduler { return r } + +// true + +type MergeSchedulerVariant interface { + MergeSchedulerCaster() *MergeScheduler +} + +func (s *MergeScheduler) MergeSchedulerCaster() *MergeScheduler { + return s +} diff --git a/typedapi/types/mergesstats.go b/typedapi/types/mergesstats.go index 48f1c3d490..72b7c3cc89 100644 --- a/typedapi/types/mergesstats.go +++ b/typedapi/types/mergesstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MergesStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L161-L178 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L164-L181 type MergesStats struct { Current int64 `json:"current"` CurrentDocs int64 `json:"current_docs"` @@ -248,3 +248,5 @@ func NewMergesStats() *MergesStats { return r } + +// false diff --git a/typedapi/types/message.go b/typedapi/types/message.go new file mode 100644 index 0000000000..23dcd05c80 --- /dev/null +++ b/typedapi/types/message.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Message type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/chat_completion_unified/UnifiedRequest.ts#L145-L165 +type Message struct { + // Content The content of the message. + Content MessageContent `json:"content,omitempty"` + // Role The role of the message author. + Role string `json:"role"` + // ToolCallId The tool call that this message is responding to. + ToolCallId *string `json:"tool_call_id,omitempty"` + // ToolCalls The tool calls generated by the model. + ToolCalls []ToolCall `json:"tool_calls,omitempty"` +} + +func (s *Message) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "content": + if err := dec.Decode(&s.Content); err != nil { + return fmt.Errorf("%s | %w", "Content", err) + } + + case "role": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Role", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Role = o + + case "tool_call_id": + if err := dec.Decode(&s.ToolCallId); err != nil { + return fmt.Errorf("%s | %w", "ToolCallId", err) + } + + case "tool_calls": + if err := dec.Decode(&s.ToolCalls); err != nil { + return fmt.Errorf("%s | %w", "ToolCalls", err) + } + + } + } + return nil +} + +// NewMessage returns a Message. +func NewMessage() *Message { + r := &Message{} + + return r +} + +// true + +type MessageVariant interface { + MessageCaster() *Message +} + +func (s *Message) MessageCaster() *Message { + return s +} diff --git a/typedapi/types/rankfeaturefunction.go b/typedapi/types/messagecontent.go similarity index 64% rename from typedapi/types/rankfeaturefunction.go rename to typedapi/types/messagecontent.go index 17681532d1..8dd0585894 100644 --- a/typedapi/types/rankfeaturefunction.go +++ b/typedapi/types/messagecontent.go @@ -16,19 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types -// RankFeatureFunction type. +// MessageContent holds the union for the following types: // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L280-L280 -type RankFeatureFunction struct { -} - -// NewRankFeatureFunction returns a RankFeatureFunction. -func NewRankFeatureFunction() *RankFeatureFunction { - r := &RankFeatureFunction{} +// string +// []ContentObject +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/chat_completion_unified/UnifiedRequest.ts#L140-L143 +type MessageContent any - return r +type MessageContentVariant interface { + MessageContentCaster() *MessageContent } diff --git a/typedapi/types/metadata.go b/typedapi/types/metadata.go index 4a70629008..d94f311b0f 100644 --- a/typedapi/types/metadata.go +++ b/typedapi/types/metadata.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,5 +26,9 @@ import ( // Metadata type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L100-L100 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L93-L93 type Metadata map[string]json.RawMessage + +type MetadataVariant interface { + MetadataCaster() *Metadata +} diff --git a/typedapi/types/metrics.go b/typedapi/types/metrics.go index b188cca7af..1ac8404e81 100644 --- a/typedapi/types/metrics.go +++ b/typedapi/types/metrics.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Metrics type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L76-L76 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L70-L70 type Metrics []string diff --git a/typedapi/types/mgetoperation.go b/typedapi/types/mgetoperation.go index 57764e53d6..780486acbf 100644 --- a/typedapi/types/mgetoperation.go +++ b/typedapi/types/mgetoperation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // MgetOperation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/mget/types.ts#L32-L55 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/mget/types.ts#L32-L55 type MgetOperation struct { // Id_ The unique document ID. Id_ string `json:"_id"` @@ -152,3 +152,13 @@ func NewMgetOperation() *MgetOperation { return r } + +// true + +type MgetOperationVariant interface { + MgetOperationCaster() *MgetOperation +} + +func (s *MgetOperation) MgetOperationCaster() *MgetOperation { + return s +} diff --git a/typedapi/types/mgetresponseitem.go b/typedapi/types/mgetresponseitem.go index cf58505543..c39f06e609 100644 --- a/typedapi/types/mgetresponseitem.go +++ b/typedapi/types/mgetresponseitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // GetResult // MultiGetError // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/mget/types.ts#L57-L60 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/mget/types.ts#L57-L60 type MgetResponseItem any diff --git a/typedapi/types/migrateaction.go b/typedapi/types/migrateaction.go index 142e11d47b..1bfcf2b763 100644 --- a/typedapi/types/migrateaction.go +++ b/typedapi/types/migrateaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MigrateAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/_types/Phase.ts#L144-L146 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/_types/Phase.ts#L141-L143 type MigrateAction struct { Enabled *bool `json:"enabled,omitempty"` } @@ -76,3 +76,13 @@ func NewMigrateAction() *MigrateAction { return r } + +// true + +type MigrateActionVariant interface { + MigrateActionCaster() *MigrateAction +} + +func (s *MigrateAction) MigrateActionCaster() *MigrateAction { + return s +} diff --git a/typedapi/types/migratereindex.go b/typedapi/types/migratereindex.go new file mode 100644 index 0000000000..34b7d7db67 --- /dev/null +++ b/typedapi/types/migratereindex.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/modeenum" +) + +// MigrateReindex type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/migrate_reindex/MigrateReindexRequest.ts#L39-L48 +type MigrateReindex struct { + // Mode Reindex mode. Currently only 'upgrade' is supported. + Mode modeenum.ModeEnum `json:"mode"` + // Source The source index or data stream (only data streams are currently supported). + Source SourceIndex `json:"source"` +} + +// NewMigrateReindex returns a MigrateReindex. +func NewMigrateReindex() *MigrateReindex { + r := &MigrateReindex{} + + return r +} + +// true + +type MigrateReindexVariant interface { + MigrateReindexCaster() *MigrateReindex +} + +func (s *MigrateReindex) MigrateReindexCaster() *MigrateReindex { + return s +} diff --git a/typedapi/types/migrationfeatureindexinfo.go b/typedapi/types/migrationfeatureindexinfo.go index 1bad41e220..956d374a39 100644 --- a/typedapi/types/migrationfeatureindexinfo.go +++ b/typedapi/types/migrationfeatureindexinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // MigrationFeatureIndexInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L44-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L44-L48 type MigrationFeatureIndexInfo struct { FailureCause *ErrorCause `json:"failure_cause,omitempty"` Index string `json:"index"` @@ -78,3 +78,5 @@ func NewMigrationFeatureIndexInfo() *MigrationFeatureIndexInfo { return r } + +// false diff --git a/typedapi/types/minaggregate.go b/typedapi/types/minaggregate.go index 44c72c8b22..57bbdf97ff 100644 --- a/typedapi/types/minaggregate.go +++ b/typedapi/types/minaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MinAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L199-L203 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L199-L203 type MinAggregate struct { Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to @@ -89,3 +89,5 @@ func NewMinAggregate() *MinAggregate { return r } + +// false diff --git a/typedapi/types/minaggregation.go b/typedapi/types/minaggregation.go index a43ef7f7cb..544be042ee 100644 --- a/typedapi/types/minaggregation.go +++ b/typedapi/types/minaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MinAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L178-L178 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L178-L178 type MinAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -95,3 +95,13 @@ func NewMinAggregation() *MinAggregation { return r } + +// true + +type MinAggregationVariant interface { + MinAggregationCaster() *MinAggregation +} + +func (s *MinAggregation) MinAggregationCaster() *MinAggregation { + return s +} diff --git a/typedapi/types/minbucketaggregation.go b/typedapi/types/minbucketaggregation.go index d0ad197a22..9a685d0c7a 100644 --- a/typedapi/types/minbucketaggregation.go +++ b/typedapi/types/minbucketaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // MinBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L249-L252 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L249-L252 type MinBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -93,3 +93,13 @@ func NewMinBucketAggregation() *MinBucketAggregation { return r } + +// true + +type MinBucketAggregationVariant interface { + MinBucketAggregationCaster() *MinBucketAggregation +} + +func (s *MinBucketAggregation) MinBucketAggregationCaster() *MinBucketAggregation { + return s +} diff --git a/typedapi/types/minimallicenseinformation.go b/typedapi/types/minimallicenseinformation.go index 5e50e3cbf7..23ce4d48e3 100644 --- a/typedapi/types/minimallicenseinformation.go +++ b/typedapi/types/minimallicenseinformation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // MinimalLicenseInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/info/types.ts#L34-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/info/types.ts#L34-L40 type MinimalLicenseInformation struct { ExpiryDateInMillis int64 `json:"expiry_date_in_millis"` Mode licensetype.LicenseType `json:"mode"` @@ -101,3 +101,5 @@ func NewMinimalLicenseInformation() *MinimalLicenseInformation { return r } + +// false diff --git a/typedapi/types/minimumshouldmatch.go b/typedapi/types/minimumshouldmatch.go index 7c574ab0ed..8b6c5367d7 100644 --- a/typedapi/types/minimumshouldmatch.go +++ b/typedapi/types/minimumshouldmatch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // int // string // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L164-L168 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L160-L164 type MinimumShouldMatch any + +type MinimumShouldMatchVariant interface { + MinimumShouldMatchCaster() *MinimumShouldMatch +} diff --git a/typedapi/types/missing.go b/typedapi/types/missing.go index 643daa42e4..58208f7fa3 100644 --- a/typedapi/types/missing.go +++ b/typedapi/types/missing.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -27,5 +27,9 @@ package types // Float64 // bool // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/AggregationContainer.ts#L535-L535 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/AggregationContainer.ts#L535-L535 type Missing any + +type MissingVariant interface { + MissingCaster() *Missing +} diff --git a/typedapi/types/missingaggregate.go b/typedapi/types/missingaggregate.go index fadf029b31..d9bcb71ec4 100644 --- a/typedapi/types/missingaggregate.go +++ b/typedapi/types/missingaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // MissingAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L528-L532 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L528-L532 type MissingAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -631,8 +631,10 @@ func (s MissingAggregate) MarshalJSON() ([]byte, error) { // NewMissingAggregate returns a MissingAggregate. func NewMissingAggregate() *MissingAggregate { r := &MissingAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/missingaggregation.go b/typedapi/types/missingaggregation.go index ea1aac7b02..1f85f6a951 100644 --- a/typedapi/types/missingaggregation.go +++ b/typedapi/types/missingaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // MissingAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L593-L599 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L593-L599 type MissingAggregation struct { // Field The name of the field. Field *string `json:"field,omitempty"` @@ -73,3 +73,13 @@ func NewMissingAggregation() *MissingAggregation { return r } + +// true + +type MissingAggregationVariant interface { + MissingAggregationCaster() *MissingAggregation +} + +func (s *MissingAggregation) MissingAggregationCaster() *MissingAggregation { + return s +} diff --git a/typedapi/types/mlcounter.go b/typedapi/types/mlcounter.go index fdb876a948..ff89ca0da8 100644 --- a/typedapi/types/mlcounter.go +++ b/typedapi/types/mlcounter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MlCounter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L253-L255 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L265-L267 type MlCounter struct { Count int64 `json:"count"` } @@ -77,3 +77,5 @@ func NewMlCounter() *MlCounter { return r } + +// false diff --git a/typedapi/types/mldatafeed.go b/typedapi/types/mldatafeed.go index 4848522b2a..4b187ea8a8 100644 --- a/typedapi/types/mldatafeed.go +++ b/typedapi/types/mldatafeed.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MLDatafeed type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Datafeed.ts#L36-L60 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Datafeed.ts#L37-L61 type MLDatafeed struct { Aggregations map[string]Aggregations `json:"aggregations,omitempty"` // Authorization The security privileges that the datafeed uses to run its queries. If Elastic @@ -185,9 +185,11 @@ func (s *MLDatafeed) UnmarshalJSON(data []byte) error { // NewMLDatafeed returns a MLDatafeed. func NewMLDatafeed() *MLDatafeed { r := &MLDatafeed{ - Aggregations: make(map[string]Aggregations, 0), - ScriptFields: make(map[string]ScriptField, 0), + Aggregations: make(map[string]Aggregations), + ScriptFields: make(map[string]ScriptField), } return r } + +// false diff --git a/typedapi/types/mldataframeanalyticsjobs.go b/typedapi/types/mldataframeanalyticsjobs.go index d178f56e55..1690e1fc12 100644 --- a/typedapi/types/mldataframeanalyticsjobs.go +++ b/typedapi/types/mldataframeanalyticsjobs.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // MlDataFrameAnalyticsJobs type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L175-L180 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L187-L192 type MlDataFrameAnalyticsJobs struct { All_ MlDataFrameAnalyticsJobsCount `json:"_all"` AnalysisCounts *MlDataFrameAnalyticsJobsAnalysis `json:"analysis_counts,omitempty"` @@ -36,3 +36,5 @@ func NewMlDataFrameAnalyticsJobs() *MlDataFrameAnalyticsJobs { return r } + +// false diff --git a/typedapi/types/mldataframeanalyticsjobsanalysis.go b/typedapi/types/mldataframeanalyticsjobsanalysis.go index 2c4cd1f14c..c3870885ff 100644 --- a/typedapi/types/mldataframeanalyticsjobsanalysis.go +++ b/typedapi/types/mldataframeanalyticsjobsanalysis.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MlDataFrameAnalyticsJobsAnalysis type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L182-L186 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L194-L198 type MlDataFrameAnalyticsJobsAnalysis struct { Classification *int `json:"classification,omitempty"` OutlierDetection *int `json:"outlier_detection,omitempty"` @@ -112,3 +112,5 @@ func NewMlDataFrameAnalyticsJobsAnalysis() *MlDataFrameAnalyticsJobsAnalysis { return r } + +// false diff --git a/typedapi/types/mldataframeanalyticsjobscount.go b/typedapi/types/mldataframeanalyticsjobscount.go index aa68b05ca3..29846bb371 100644 --- a/typedapi/types/mldataframeanalyticsjobscount.go +++ b/typedapi/types/mldataframeanalyticsjobscount.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MlDataFrameAnalyticsJobsCount type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L192-L194 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L204-L206 type MlDataFrameAnalyticsJobsCount struct { Count int64 `json:"count"` } @@ -77,3 +77,5 @@ func NewMlDataFrameAnalyticsJobsCount() *MlDataFrameAnalyticsJobsCount { return r } + +// false diff --git a/typedapi/types/mldataframeanalyticsjobsmemory.go b/typedapi/types/mldataframeanalyticsjobsmemory.go index e44a5abd46..815ff02174 100644 --- a/typedapi/types/mldataframeanalyticsjobsmemory.go +++ b/typedapi/types/mldataframeanalyticsjobsmemory.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // MlDataFrameAnalyticsJobsMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L188-L190 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L200-L202 type MlDataFrameAnalyticsJobsMemory struct { PeakUsageBytes JobStatistics `json:"peak_usage_bytes"` } @@ -33,3 +33,5 @@ func NewMlDataFrameAnalyticsJobsMemory() *MlDataFrameAnalyticsJobsMemory { return r } + +// false diff --git a/typedapi/types/mlfilter.go b/typedapi/types/mlfilter.go index 8d6ba632ec..a82bd17ece 100644 --- a/typedapi/types/mlfilter.go +++ b/typedapi/types/mlfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MLFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Filter.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Filter.ts#L22-L29 type MLFilter struct { // Description A description of the filter. Description *string `json:"description,omitempty"` @@ -89,3 +89,5 @@ func NewMLFilter() *MLFilter { return r } + +// false diff --git a/typedapi/types/mlinference.go b/typedapi/types/mlinference.go index 125556da64..4a8bee658e 100644 --- a/typedapi/types/mlinference.go +++ b/typedapi/types/mlinference.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // MlInference type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L196-L204 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L208-L216 type MlInference struct { Deployments *MlInferenceDeployments `json:"deployments,omitempty"` IngestProcessors map[string]MlInferenceIngestProcessor `json:"ingest_processors"` @@ -32,8 +32,10 @@ type MlInference struct { // NewMlInference returns a MlInference. func NewMlInference() *MlInference { r := &MlInference{ - IngestProcessors: make(map[string]MlInferenceIngestProcessor, 0), + IngestProcessors: make(map[string]MlInferenceIngestProcessor), } return r } + +// false diff --git a/typedapi/types/mlinferencedeployments.go b/typedapi/types/mlinferencedeployments.go index b4dc941475..8592425f73 100644 --- a/typedapi/types/mlinferencedeployments.go +++ b/typedapi/types/mlinferencedeployments.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MlInferenceDeployments type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L225-L230 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L237-L242 type MlInferenceDeployments struct { Count int `json:"count"` InferenceCounts JobStatistics `json:"inference_counts"` @@ -96,3 +96,5 @@ func NewMlInferenceDeployments() *MlInferenceDeployments { return r } + +// false diff --git a/typedapi/types/mlinferencedeploymentstimems.go b/typedapi/types/mlinferencedeploymentstimems.go index c1a1633567..10c5d0ac99 100644 --- a/typedapi/types/mlinferencedeploymentstimems.go +++ b/typedapi/types/mlinferencedeploymentstimems.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MlInferenceDeploymentsTimeMs type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L232-L234 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L244-L246 type MlInferenceDeploymentsTimeMs struct { Avg Float64 `json:"avg"` } @@ -78,3 +78,5 @@ func NewMlInferenceDeploymentsTimeMs() *MlInferenceDeploymentsTimeMs { return r } + +// false diff --git a/typedapi/types/mlinferenceingestprocessor.go b/typedapi/types/mlinferenceingestprocessor.go index 2fbc4e100c..3de6c30eea 100644 --- a/typedapi/types/mlinferenceingestprocessor.go +++ b/typedapi/types/mlinferenceingestprocessor.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // MlInferenceIngestProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L206-L211 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L218-L223 type MlInferenceIngestProcessor struct { NumDocsProcessed MlInferenceIngestProcessorCount `json:"num_docs_processed"` NumFailures MlInferenceIngestProcessorCount `json:"num_failures"` @@ -36,3 +36,5 @@ func NewMlInferenceIngestProcessor() *MlInferenceIngestProcessor { return r } + +// false diff --git a/typedapi/types/mlinferenceingestprocessorcount.go b/typedapi/types/mlinferenceingestprocessorcount.go index 36dd1e06ec..2d0758d5dc 100644 --- a/typedapi/types/mlinferenceingestprocessorcount.go +++ b/typedapi/types/mlinferenceingestprocessorcount.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MlInferenceIngestProcessorCount type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L236-L240 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L248-L252 type MlInferenceIngestProcessorCount struct { Max int64 `json:"max"` Min int64 `json:"min"` @@ -109,3 +109,5 @@ func NewMlInferenceIngestProcessorCount() *MlInferenceIngestProcessorCount { return r } + +// false diff --git a/typedapi/types/mlinferencetrainedmodels.go b/typedapi/types/mlinferencetrainedmodels.go index 6460dac3b6..8af01b9454 100644 --- a/typedapi/types/mlinferencetrainedmodels.go +++ b/typedapi/types/mlinferencetrainedmodels.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // MlInferenceTrainedModels type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L213-L223 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L225-L235 type MlInferenceTrainedModels struct { All_ MlCounter `json:"_all"` Count *MlInferenceTrainedModelsCount `json:"count,omitempty"` @@ -37,3 +37,5 @@ func NewMlInferenceTrainedModels() *MlInferenceTrainedModels { return r } + +// false diff --git a/typedapi/types/mlinferencetrainedmodelscount.go b/typedapi/types/mlinferencetrainedmodelscount.go index 248d5c528f..9e601f800c 100644 --- a/typedapi/types/mlinferencetrainedmodelscount.go +++ b/typedapi/types/mlinferencetrainedmodelscount.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MlInferenceTrainedModelsCount type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L242-L251 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L254-L263 type MlInferenceTrainedModelsCount struct { Classification *int64 `json:"classification,omitempty"` Ner *int64 `json:"ner,omitempty"` @@ -189,3 +189,5 @@ func NewMlInferenceTrainedModelsCount() *MlInferenceTrainedModelsCount { return r } + +// false diff --git a/typedapi/types/mljobforecasts.go b/typedapi/types/mljobforecasts.go index 3829d368cd..1fbc812a1f 100644 --- a/typedapi/types/mljobforecasts.go +++ b/typedapi/types/mljobforecasts.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MlJobForecasts type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L170-L173 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L182-L185 type MlJobForecasts struct { ForecastedJobs int64 `json:"forecasted_jobs"` Total int64 `json:"total"` @@ -93,3 +93,5 @@ func NewMlJobForecasts() *MlJobForecasts { return r } + +// false diff --git a/typedapi/types/modelpackageconfig.go b/typedapi/types/modelpackageconfig.go new file mode 100644 index 0000000000..bfa9c41ebb --- /dev/null +++ b/typedapi/types/modelpackageconfig.go @@ -0,0 +1,203 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ModelPackageConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L256-L271 +type ModelPackageConfig struct { + CreateTime *int64 `json:"create_time,omitempty"` + Description *string `json:"description,omitempty"` + InferenceConfig map[string]json.RawMessage `json:"inference_config,omitempty"` + Metadata Metadata `json:"metadata,omitempty"` + MinimumVersion *string `json:"minimum_version,omitempty"` + ModelRepository *string `json:"model_repository,omitempty"` + ModelType *string `json:"model_type,omitempty"` + PackagedModelId string `json:"packaged_model_id"` + PlatformArchitecture *string `json:"platform_architecture,omitempty"` + PrefixStrings *TrainedModelPrefixStrings `json:"prefix_strings,omitempty"` + Sha256 *string `json:"sha256,omitempty"` + Size ByteSize `json:"size,omitempty"` + Tags []string `json:"tags,omitempty"` + VocabularyFile *string `json:"vocabulary_file,omitempty"` +} + +func (s *ModelPackageConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "create_time": + if err := dec.Decode(&s.CreateTime); err != nil { + return fmt.Errorf("%s | %w", "CreateTime", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "inference_config": + if s.InferenceConfig == nil { + s.InferenceConfig = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.InferenceConfig); err != nil { + return fmt.Errorf("%s | %w", "InferenceConfig", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "minimum_version": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MinimumVersion", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinimumVersion = &o + + case "model_repository": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelRepository", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelRepository = &o + + case "model_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelType = &o + + case "packaged_model_id": + if err := dec.Decode(&s.PackagedModelId); err != nil { + return fmt.Errorf("%s | %w", "PackagedModelId", err) + } + + case "platform_architecture": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PlatformArchitecture", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PlatformArchitecture = &o + + case "prefix_strings": + if err := dec.Decode(&s.PrefixStrings); err != nil { + return fmt.Errorf("%s | %w", "PrefixStrings", err) + } + + case "sha256": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Sha256", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Sha256 = &o + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + + case "tags": + if err := dec.Decode(&s.Tags); err != nil { + return fmt.Errorf("%s | %w", "Tags", err) + } + + case "vocabulary_file": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "VocabularyFile", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VocabularyFile = &o + + } + } + return nil +} + +// NewModelPackageConfig returns a ModelPackageConfig. +func NewModelPackageConfig() *ModelPackageConfig { + r := &ModelPackageConfig{ + InferenceConfig: make(map[string]json.RawMessage), + } + + return r +} + +// false diff --git a/typedapi/types/modelplotconfig.go b/typedapi/types/modelplotconfig.go index dfa5079724..2faf8d035c 100644 --- a/typedapi/types/modelplotconfig.go +++ b/typedapi/types/modelplotconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ModelPlotConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/ModelPlot.ts#L23-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/ModelPlot.ts#L23-L42 type ModelPlotConfig struct { // AnnotationsEnabled If true, enables calculation and storage of the model change annotations for // each entity that is being analyzed. @@ -105,3 +105,13 @@ func NewModelPlotConfig() *ModelPlotConfig { return r } + +// true + +type ModelPlotConfigVariant interface { + ModelPlotConfigCaster() *ModelPlotConfig +} + +func (s *ModelPlotConfig) ModelPlotConfigCaster() *ModelPlotConfig { + return s +} diff --git a/typedapi/types/modelsizestats.go b/typedapi/types/modelsizestats.go index 6c0a8c0576..98d856fa83 100644 --- a/typedapi/types/modelsizestats.go +++ b/typedapi/types/modelsizestats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // ModelSizeStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Model.ts#L59-L81 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Model.ts#L59-L82 type ModelSizeStats struct { AssignmentMemoryBasis *string `json:"assignment_memory_basis,omitempty"` BucketAllocationFailuresCount int64 `json:"bucket_allocation_failures_count"` @@ -49,6 +49,7 @@ type ModelSizeStats struct { ModelBytes ByteSize `json:"model_bytes"` ModelBytesExceeded ByteSize `json:"model_bytes_exceeded,omitempty"` ModelBytesMemoryLimit ByteSize `json:"model_bytes_memory_limit,omitempty"` + OutputMemoryAllocatorBytes ByteSize `json:"output_memory_allocator_bytes,omitempty"` PeakModelBytes ByteSize `json:"peak_model_bytes,omitempty"` RareCategoryCount int `json:"rare_category_count"` ResultType string `json:"result_type"` @@ -200,6 +201,11 @@ func (s *ModelSizeStats) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "ModelBytesMemoryLimit", err) } + case "output_memory_allocator_bytes": + if err := dec.Decode(&s.OutputMemoryAllocatorBytes); err != nil { + return fmt.Errorf("%s | %w", "OutputMemoryAllocatorBytes", err) + } + case "peak_model_bytes": if err := dec.Decode(&s.PeakModelBytes); err != nil { return fmt.Errorf("%s | %w", "PeakModelBytes", err) @@ -320,3 +326,5 @@ func NewModelSizeStats() *ModelSizeStats { return r } + +// false diff --git a/typedapi/types/modelsnapshot.go b/typedapi/types/modelsnapshot.go index 43ec3e3d51..5bd07b4a78 100644 --- a/typedapi/types/modelsnapshot.go +++ b/typedapi/types/modelsnapshot.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ModelSnapshot type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Model.ts#L25-L46 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Model.ts#L25-L46 type ModelSnapshot struct { // Description An optional description of the job. Description *string `json:"description,omitempty"` @@ -192,3 +192,5 @@ func NewModelSnapshot() *ModelSnapshot { return r } + +// false diff --git a/typedapi/types/modelsnapshotupgrade.go b/typedapi/types/modelsnapshotupgrade.go index e5476b4929..9e741f68e4 100644 --- a/typedapi/types/modelsnapshotupgrade.go +++ b/typedapi/types/modelsnapshotupgrade.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // ModelSnapshotUpgrade type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Model.ts#L48-L57 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Model.ts#L48-L57 type ModelSnapshotUpgrade struct { AssignmentExplanation string `json:"assignment_explanation"` JobId string `json:"job_id"` @@ -100,3 +100,5 @@ func NewModelSnapshotUpgrade() *ModelSnapshotUpgrade { return r } + +// false diff --git a/typedapi/types/monitoring.go b/typedapi/types/monitoring.go index fc26c74309..4f2d8e85f4 100644 --- a/typedapi/types/monitoring.go +++ b/typedapi/types/monitoring.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Monitoring type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L379-L382 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L387-L390 type Monitoring struct { Available bool `json:"available"` CollectionEnabled bool `json:"collection_enabled"` @@ -112,8 +112,10 @@ func (s *Monitoring) UnmarshalJSON(data []byte) error { // NewMonitoring returns a Monitoring. func NewMonitoring() *Monitoring { r := &Monitoring{ - EnabledExporters: make(map[string]int64, 0), + EnabledExporters: make(map[string]int64), } return r } + +// false diff --git a/typedapi/types/morelikethisquery.go b/typedapi/types/morelikethisquery.go index 2d4c2e932a..835c7b16d7 100644 --- a/typedapi/types/morelikethisquery.go +++ b/typedapi/types/morelikethisquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // MoreLikeThisQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L87-L172 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L87-L172 type MoreLikeThisQuery struct { // Analyzer The analyzer that is used to analyze the free form text. // Defaults to the analyzer associated with the first field in fields. @@ -371,3 +371,13 @@ func NewMoreLikeThisQuery() *MoreLikeThisQuery { return r } + +// true + +type MoreLikeThisQueryVariant interface { + MoreLikeThisQueryCaster() *MoreLikeThisQuery +} + +func (s *MoreLikeThisQuery) MoreLikeThisQueryCaster() *MoreLikeThisQuery { + return s +} diff --git a/typedapi/types/mountedsnapshot.go b/typedapi/types/mountedsnapshot.go index 8a0d32de69..b1dbc91b6d 100644 --- a/typedapi/types/mountedsnapshot.go +++ b/typedapi/types/mountedsnapshot.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // MountedSnapshot type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/searchable_snapshots/mount/types.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/searchable_snapshots/mount/types.ts#L23-L27 type MountedSnapshot struct { Indices []string `json:"indices"` Shards ShardStatistics `json:"shards"` @@ -89,3 +89,5 @@ func NewMountedSnapshot() *MountedSnapshot { return r } + +// false diff --git a/typedapi/types/movingaverageaggregation.go b/typedapi/types/movingaverageaggregation.go index e599334778..cf9bb47548 100644 --- a/typedapi/types/movingaverageaggregation.go +++ b/typedapi/types/movingaverageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -28,5 +28,9 @@ package types // HoltMovingAverageAggregation // HoltWintersMovingAverageAggregation // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L254-L260 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L254-L260 type MovingAverageAggregation any + +type MovingAverageAggregationVariant interface { + MovingAverageAggregationCaster() *MovingAverageAggregation +} diff --git a/typedapi/types/movingfunctionaggregation.go b/typedapi/types/movingfunctionaggregation.go index 5c618d7342..b7061dee4b 100644 --- a/typedapi/types/movingfunctionaggregation.go +++ b/typedapi/types/movingfunctionaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // MovingFunctionAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L314-L332 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L314-L332 type MovingFunctionAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -146,3 +146,13 @@ func NewMovingFunctionAggregation() *MovingFunctionAggregation { return r } + +// true + +type MovingFunctionAggregationVariant interface { + MovingFunctionAggregationCaster() *MovingFunctionAggregation +} + +func (s *MovingFunctionAggregation) MovingFunctionAggregationCaster() *MovingFunctionAggregation { + return s +} diff --git a/typedapi/types/movingpercentilesaggregation.go b/typedapi/types/movingpercentilesaggregation.go index fff3692a83..6b7149cef9 100644 --- a/typedapi/types/movingpercentilesaggregation.go +++ b/typedapi/types/movingpercentilesaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // MovingPercentilesAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L334-L349 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L334-L349 type MovingPercentilesAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -147,3 +147,13 @@ func NewMovingPercentilesAggregation() *MovingPercentilesAggregation { return r } + +// true + +type MovingPercentilesAggregationVariant interface { + MovingPercentilesAggregationCaster() *MovingPercentilesAggregation +} + +func (s *MovingPercentilesAggregation) MovingPercentilesAggregationCaster() *MovingPercentilesAggregation { + return s +} diff --git a/typedapi/types/msearchrequestitem.go b/typedapi/types/msearchrequestitem.go index b884db279a..1ab7b9aa02 100644 --- a/typedapi/types/msearchrequestitem.go +++ b/typedapi/types/msearchrequestitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // MultisearchHeader // MultisearchBody // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/msearch/types.ts#L47-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/msearch/types.ts#L47-L50 type MsearchRequestItem any + +type MsearchRequestItemVariant interface { + MsearchRequestItemCaster() *MsearchRequestItem +} diff --git a/typedapi/types/msearchresponseitem.go b/typedapi/types/msearchresponseitem.go index 57fae279ce..13b915d11c 100644 --- a/typedapi/types/msearchresponseitem.go +++ b/typedapi/types/msearchresponseitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // MultiSearchItem // ErrorResponseBase // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/msearch/types.ts#L211-L214 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/msearch/types.ts#L211-L214 type MsearchResponseItem any diff --git a/typedapi/types/mtermvectorsoperation.go b/typedapi/types/mtermvectorsoperation.go index ac78109985..e46c0ff2d1 100644 --- a/typedapi/types/mtermvectorsoperation.go +++ b/typedapi/types/mtermvectorsoperation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // MTermVectorsOperation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/mtermvectors/types.ts#L35-L94 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/mtermvectors/types.ts#L35-L94 type MTermVectorsOperation struct { // Doc An artificial document (a document not present in the index) for which you // want to retrieve term vectors. @@ -215,3 +215,13 @@ func NewMTermVectorsOperation() *MTermVectorsOperation { return r } + +// true + +type MTermVectorsOperationVariant interface { + MTermVectorsOperationCaster() *MTermVectorsOperation +} + +func (s *MTermVectorsOperation) MTermVectorsOperationCaster() *MTermVectorsOperation { + return s +} diff --git a/typedapi/types/multibucketaggregatebaseadjacencymatrixbucket.go b/typedapi/types/multibucketaggregatebaseadjacencymatrixbucket.go deleted file mode 100644 index f1344b05eb..0000000000 --- a/typedapi/types/multibucketaggregatebaseadjacencymatrixbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseAdjacencyMatrixBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseAdjacencyMatrixBucket struct { - Buckets BucketsAdjacencyMatrixBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseAdjacencyMatrixBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]AdjacencyMatrixBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []AdjacencyMatrixBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseAdjacencyMatrixBucket returns a MultiBucketAggregateBaseAdjacencyMatrixBucket. -func NewMultiBucketAggregateBaseAdjacencyMatrixBucket() *MultiBucketAggregateBaseAdjacencyMatrixBucket { - r := &MultiBucketAggregateBaseAdjacencyMatrixBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasecompositebucket.go b/typedapi/types/multibucketaggregatebasecompositebucket.go deleted file mode 100644 index cf85be2b7d..0000000000 --- a/typedapi/types/multibucketaggregatebasecompositebucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseCompositeBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseCompositeBucket struct { - Buckets BucketsCompositeBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseCompositeBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]CompositeBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []CompositeBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseCompositeBucket returns a MultiBucketAggregateBaseCompositeBucket. -func NewMultiBucketAggregateBaseCompositeBucket() *MultiBucketAggregateBaseCompositeBucket { - r := &MultiBucketAggregateBaseCompositeBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasedatehistogrambucket.go b/typedapi/types/multibucketaggregatebasedatehistogrambucket.go deleted file mode 100644 index 4f434256b8..0000000000 --- a/typedapi/types/multibucketaggregatebasedatehistogrambucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseDateHistogramBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseDateHistogramBucket struct { - Buckets BucketsDateHistogramBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseDateHistogramBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]DateHistogramBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []DateHistogramBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseDateHistogramBucket returns a MultiBucketAggregateBaseDateHistogramBucket. -func NewMultiBucketAggregateBaseDateHistogramBucket() *MultiBucketAggregateBaseDateHistogramBucket { - r := &MultiBucketAggregateBaseDateHistogramBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasedoubletermsbucket.go b/typedapi/types/multibucketaggregatebasedoubletermsbucket.go deleted file mode 100644 index be40707cb7..0000000000 --- a/typedapi/types/multibucketaggregatebasedoubletermsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseDoubleTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseDoubleTermsBucket struct { - Buckets BucketsDoubleTermsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseDoubleTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]DoubleTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []DoubleTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseDoubleTermsBucket returns a MultiBucketAggregateBaseDoubleTermsBucket. -func NewMultiBucketAggregateBaseDoubleTermsBucket() *MultiBucketAggregateBaseDoubleTermsBucket { - r := &MultiBucketAggregateBaseDoubleTermsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasefiltersbucket.go b/typedapi/types/multibucketaggregatebasefiltersbucket.go deleted file mode 100644 index 2d736856e2..0000000000 --- a/typedapi/types/multibucketaggregatebasefiltersbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseFiltersBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseFiltersBucket struct { - Buckets BucketsFiltersBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseFiltersBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]FiltersBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []FiltersBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseFiltersBucket returns a MultiBucketAggregateBaseFiltersBucket. -func NewMultiBucketAggregateBaseFiltersBucket() *MultiBucketAggregateBaseFiltersBucket { - r := &MultiBucketAggregateBaseFiltersBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasefrequentitemsetsbucket.go b/typedapi/types/multibucketaggregatebasefrequentitemsetsbucket.go deleted file mode 100644 index ad8184f1eb..0000000000 --- a/typedapi/types/multibucketaggregatebasefrequentitemsetsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseFrequentItemSetsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseFrequentItemSetsBucket struct { - Buckets BucketsFrequentItemSetsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseFrequentItemSetsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]FrequentItemSetsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []FrequentItemSetsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseFrequentItemSetsBucket returns a MultiBucketAggregateBaseFrequentItemSetsBucket. -func NewMultiBucketAggregateBaseFrequentItemSetsBucket() *MultiBucketAggregateBaseFrequentItemSetsBucket { - r := &MultiBucketAggregateBaseFrequentItemSetsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasegeohashgridbucket.go b/typedapi/types/multibucketaggregatebasegeohashgridbucket.go deleted file mode 100644 index c64d04bb5e..0000000000 --- a/typedapi/types/multibucketaggregatebasegeohashgridbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseGeoHashGridBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseGeoHashGridBucket struct { - Buckets BucketsGeoHashGridBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseGeoHashGridBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]GeoHashGridBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []GeoHashGridBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseGeoHashGridBucket returns a MultiBucketAggregateBaseGeoHashGridBucket. -func NewMultiBucketAggregateBaseGeoHashGridBucket() *MultiBucketAggregateBaseGeoHashGridBucket { - r := &MultiBucketAggregateBaseGeoHashGridBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasegeohexgridbucket.go b/typedapi/types/multibucketaggregatebasegeohexgridbucket.go deleted file mode 100644 index 97fdcf5a91..0000000000 --- a/typedapi/types/multibucketaggregatebasegeohexgridbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseGeoHexGridBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseGeoHexGridBucket struct { - Buckets BucketsGeoHexGridBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseGeoHexGridBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]GeoHexGridBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []GeoHexGridBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseGeoHexGridBucket returns a MultiBucketAggregateBaseGeoHexGridBucket. -func NewMultiBucketAggregateBaseGeoHexGridBucket() *MultiBucketAggregateBaseGeoHexGridBucket { - r := &MultiBucketAggregateBaseGeoHexGridBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasegeotilegridbucket.go b/typedapi/types/multibucketaggregatebasegeotilegridbucket.go deleted file mode 100644 index a5eedf6934..0000000000 --- a/typedapi/types/multibucketaggregatebasegeotilegridbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseGeoTileGridBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseGeoTileGridBucket struct { - Buckets BucketsGeoTileGridBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseGeoTileGridBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]GeoTileGridBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []GeoTileGridBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseGeoTileGridBucket returns a MultiBucketAggregateBaseGeoTileGridBucket. -func NewMultiBucketAggregateBaseGeoTileGridBucket() *MultiBucketAggregateBaseGeoTileGridBucket { - r := &MultiBucketAggregateBaseGeoTileGridBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasehistogrambucket.go b/typedapi/types/multibucketaggregatebasehistogrambucket.go deleted file mode 100644 index 2ce3263f01..0000000000 --- a/typedapi/types/multibucketaggregatebasehistogrambucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseHistogramBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseHistogramBucket struct { - Buckets BucketsHistogramBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseHistogramBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]HistogramBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []HistogramBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseHistogramBucket returns a MultiBucketAggregateBaseHistogramBucket. -func NewMultiBucketAggregateBaseHistogramBucket() *MultiBucketAggregateBaseHistogramBucket { - r := &MultiBucketAggregateBaseHistogramBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebaseipprefixbucket.go b/typedapi/types/multibucketaggregatebaseipprefixbucket.go deleted file mode 100644 index af32270fc4..0000000000 --- a/typedapi/types/multibucketaggregatebaseipprefixbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseIpPrefixBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseIpPrefixBucket struct { - Buckets BucketsIpPrefixBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseIpPrefixBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]IpPrefixBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []IpPrefixBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseIpPrefixBucket returns a MultiBucketAggregateBaseIpPrefixBucket. -func NewMultiBucketAggregateBaseIpPrefixBucket() *MultiBucketAggregateBaseIpPrefixBucket { - r := &MultiBucketAggregateBaseIpPrefixBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebaseiprangebucket.go b/typedapi/types/multibucketaggregatebaseiprangebucket.go deleted file mode 100644 index 4bca5ac069..0000000000 --- a/typedapi/types/multibucketaggregatebaseiprangebucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseIpRangeBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseIpRangeBucket struct { - Buckets BucketsIpRangeBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseIpRangeBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]IpRangeBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []IpRangeBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseIpRangeBucket returns a MultiBucketAggregateBaseIpRangeBucket. -func NewMultiBucketAggregateBaseIpRangeBucket() *MultiBucketAggregateBaseIpRangeBucket { - r := &MultiBucketAggregateBaseIpRangeBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebaselongraretermsbucket.go b/typedapi/types/multibucketaggregatebaselongraretermsbucket.go deleted file mode 100644 index f33f16564c..0000000000 --- a/typedapi/types/multibucketaggregatebaselongraretermsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseLongRareTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseLongRareTermsBucket struct { - Buckets BucketsLongRareTermsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseLongRareTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]LongRareTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []LongRareTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseLongRareTermsBucket returns a MultiBucketAggregateBaseLongRareTermsBucket. -func NewMultiBucketAggregateBaseLongRareTermsBucket() *MultiBucketAggregateBaseLongRareTermsBucket { - r := &MultiBucketAggregateBaseLongRareTermsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebaselongtermsbucket.go b/typedapi/types/multibucketaggregatebaselongtermsbucket.go deleted file mode 100644 index 88cc87ab41..0000000000 --- a/typedapi/types/multibucketaggregatebaselongtermsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseLongTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseLongTermsBucket struct { - Buckets BucketsLongTermsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseLongTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]LongTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []LongTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseLongTermsBucket returns a MultiBucketAggregateBaseLongTermsBucket. -func NewMultiBucketAggregateBaseLongTermsBucket() *MultiBucketAggregateBaseLongTermsBucket { - r := &MultiBucketAggregateBaseLongTermsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasemultitermsbucket.go b/typedapi/types/multibucketaggregatebasemultitermsbucket.go deleted file mode 100644 index ef8a06a61f..0000000000 --- a/typedapi/types/multibucketaggregatebasemultitermsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseMultiTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseMultiTermsBucket struct { - Buckets BucketsMultiTermsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseMultiTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]MultiTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []MultiTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseMultiTermsBucket returns a MultiBucketAggregateBaseMultiTermsBucket. -func NewMultiBucketAggregateBaseMultiTermsBucket() *MultiBucketAggregateBaseMultiTermsBucket { - r := &MultiBucketAggregateBaseMultiTermsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebaserangebucket.go b/typedapi/types/multibucketaggregatebaserangebucket.go deleted file mode 100644 index b53a0f4a79..0000000000 --- a/typedapi/types/multibucketaggregatebaserangebucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseRangeBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseRangeBucket struct { - Buckets BucketsRangeBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseRangeBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]RangeBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []RangeBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseRangeBucket returns a MultiBucketAggregateBaseRangeBucket. -func NewMultiBucketAggregateBaseRangeBucket() *MultiBucketAggregateBaseRangeBucket { - r := &MultiBucketAggregateBaseRangeBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasesignificantlongtermsbucket.go b/typedapi/types/multibucketaggregatebasesignificantlongtermsbucket.go deleted file mode 100644 index b40ce9e03e..0000000000 --- a/typedapi/types/multibucketaggregatebasesignificantlongtermsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseSignificantLongTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseSignificantLongTermsBucket struct { - Buckets BucketsSignificantLongTermsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseSignificantLongTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]SignificantLongTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []SignificantLongTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseSignificantLongTermsBucket returns a MultiBucketAggregateBaseSignificantLongTermsBucket. -func NewMultiBucketAggregateBaseSignificantLongTermsBucket() *MultiBucketAggregateBaseSignificantLongTermsBucket { - r := &MultiBucketAggregateBaseSignificantLongTermsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasesignificantstringtermsbucket.go b/typedapi/types/multibucketaggregatebasesignificantstringtermsbucket.go deleted file mode 100644 index 3373422cad..0000000000 --- a/typedapi/types/multibucketaggregatebasesignificantstringtermsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseSignificantStringTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseSignificantStringTermsBucket struct { - Buckets BucketsSignificantStringTermsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseSignificantStringTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]SignificantStringTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []SignificantStringTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseSignificantStringTermsBucket returns a MultiBucketAggregateBaseSignificantStringTermsBucket. -func NewMultiBucketAggregateBaseSignificantStringTermsBucket() *MultiBucketAggregateBaseSignificantStringTermsBucket { - r := &MultiBucketAggregateBaseSignificantStringTermsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasestringraretermsbucket.go b/typedapi/types/multibucketaggregatebasestringraretermsbucket.go deleted file mode 100644 index a5bfb6a390..0000000000 --- a/typedapi/types/multibucketaggregatebasestringraretermsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseStringRareTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseStringRareTermsBucket struct { - Buckets BucketsStringRareTermsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseStringRareTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]StringRareTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []StringRareTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseStringRareTermsBucket returns a MultiBucketAggregateBaseStringRareTermsBucket. -func NewMultiBucketAggregateBaseStringRareTermsBucket() *MultiBucketAggregateBaseStringRareTermsBucket { - r := &MultiBucketAggregateBaseStringRareTermsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasestringtermsbucket.go b/typedapi/types/multibucketaggregatebasestringtermsbucket.go deleted file mode 100644 index cea95d34ad..0000000000 --- a/typedapi/types/multibucketaggregatebasestringtermsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseStringTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseStringTermsBucket struct { - Buckets BucketsStringTermsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseStringTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]StringTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []StringTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseStringTermsBucket returns a MultiBucketAggregateBaseStringTermsBucket. -func NewMultiBucketAggregateBaseStringTermsBucket() *MultiBucketAggregateBaseStringTermsBucket { - r := &MultiBucketAggregateBaseStringTermsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasetimeseriesbucket.go b/typedapi/types/multibucketaggregatebasetimeseriesbucket.go deleted file mode 100644 index f1600c18a4..0000000000 --- a/typedapi/types/multibucketaggregatebasetimeseriesbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseTimeSeriesBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseTimeSeriesBucket struct { - Buckets BucketsTimeSeriesBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseTimeSeriesBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]TimeSeriesBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []TimeSeriesBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseTimeSeriesBucket returns a MultiBucketAggregateBaseTimeSeriesBucket. -func NewMultiBucketAggregateBaseTimeSeriesBucket() *MultiBucketAggregateBaseTimeSeriesBucket { - r := &MultiBucketAggregateBaseTimeSeriesBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasevariablewidthhistogrambucket.go b/typedapi/types/multibucketaggregatebasevariablewidthhistogrambucket.go deleted file mode 100644 index 599062dd69..0000000000 --- a/typedapi/types/multibucketaggregatebasevariablewidthhistogrambucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseVariableWidthHistogramBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseVariableWidthHistogramBucket struct { - Buckets BucketsVariableWidthHistogramBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseVariableWidthHistogramBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]VariableWidthHistogramBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []VariableWidthHistogramBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseVariableWidthHistogramBucket returns a MultiBucketAggregateBaseVariableWidthHistogramBucket. -func NewMultiBucketAggregateBaseVariableWidthHistogramBucket() *MultiBucketAggregateBaseVariableWidthHistogramBucket { - r := &MultiBucketAggregateBaseVariableWidthHistogramBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasevoid.go b/typedapi/types/multibucketaggregatebasevoid.go deleted file mode 100644 index 784516a1b3..0000000000 --- a/typedapi/types/multibucketaggregatebasevoid.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseVoid type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L357-L359 -type MultiBucketAggregateBaseVoid struct { - Buckets BucketsVoid `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseVoid) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]any, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []any{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseVoid returns a MultiBucketAggregateBaseVoid. -func NewMultiBucketAggregateBaseVoid() *MultiBucketAggregateBaseVoid { - r := &MultiBucketAggregateBaseVoid{} - - return r -} diff --git a/typedapi/types/multigeterror.go b/typedapi/types/multigeterror.go index f1c22ff3d3..43dc635f11 100644 --- a/typedapi/types/multigeterror.go +++ b/typedapi/types/multigeterror.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // MultiGetError type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/mget/types.ts#L62-L66 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/mget/types.ts#L62-L66 type MultiGetError struct { Error ErrorCause `json:"error"` Id_ string `json:"_id"` @@ -78,3 +78,5 @@ func NewMultiGetError() *MultiGetError { return r } + +// false diff --git a/typedapi/types/multimatchquery.go b/typedapi/types/multimatchquery.go index 286743cfe0..2dbce04c3d 100644 --- a/typedapi/types/multimatchquery.go +++ b/typedapi/types/multimatchquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -35,7 +35,7 @@ import ( // MultiMatchQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L471-L557 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L471-L557 type MultiMatchQuery struct { // Analyzer Analyzer used to convert the text in the query value into tokens. Analyzer *string `json:"analyzer,omitempty"` @@ -334,3 +334,13 @@ func NewMultiMatchQuery() *MultiMatchQuery { return r } + +// true + +type MultiMatchQueryVariant interface { + MultiMatchQueryCaster() *MultiMatchQuery +} + +func (s *MultiMatchQuery) MultiMatchQueryCaster() *MultiMatchQuery { + return s +} diff --git a/typedapi/types/multiplexertokenfilter.go b/typedapi/types/multiplexertokenfilter.go index b7d6ef34ad..60a12a92d2 100644 --- a/typedapi/types/multiplexertokenfilter.go +++ b/typedapi/types/multiplexertokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // MultiplexerTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L261-L265 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L261-L265 type MultiplexerTokenFilter struct { Filters []string `json:"filters"` PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` @@ -99,3 +99,13 @@ func NewMultiplexerTokenFilter() *MultiplexerTokenFilter { return r } + +// true + +type MultiplexerTokenFilterVariant interface { + MultiplexerTokenFilterCaster() *MultiplexerTokenFilter +} + +func (s *MultiplexerTokenFilter) MultiplexerTokenFilterCaster() *MultiplexerTokenFilter { + return s +} diff --git a/typedapi/types/multisearchbody.go b/typedapi/types/multisearchbody.go index b76f82ef5b..bdfa18000f 100644 --- a/typedapi/types/multisearchbody.go +++ b/typedapi/types/multisearchbody.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MultisearchBody type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/msearch/types.ts#L70-L204 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/msearch/types.ts#L70-L204 type MultisearchBody struct { Aggregations map[string]Aggregations `json:"aggregations,omitempty"` Collapse *FieldCollapse `json:"collapse,omitempty"` @@ -478,10 +478,20 @@ func (s *MultisearchBody) UnmarshalJSON(data []byte) error { // NewMultisearchBody returns a MultisearchBody. func NewMultisearchBody() *MultisearchBody { r := &MultisearchBody{ - Aggregations: make(map[string]Aggregations, 0), - Ext: make(map[string]json.RawMessage, 0), - ScriptFields: make(map[string]ScriptField, 0), + Aggregations: make(map[string]Aggregations), + Ext: make(map[string]json.RawMessage), + ScriptFields: make(map[string]ScriptField), } return r } + +// true + +type MultisearchBodyVariant interface { + MultisearchBodyCaster() *MultisearchBody +} + +func (s *MultisearchBody) MultisearchBodyCaster() *MultisearchBody { + return s +} diff --git a/typedapi/types/multisearchheader.go b/typedapi/types/multisearchheader.go index 007f34b359..1deaa1e448 100644 --- a/typedapi/types/multisearchheader.go +++ b/typedapi/types/multisearchheader.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // MultisearchHeader type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/msearch/types.ts#L52-L67 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/msearch/types.ts#L52-L67 type MultisearchHeader struct { AllowNoIndices *bool `json:"allow_no_indices,omitempty"` AllowPartialSearchResults *bool `json:"allow_partial_search_results,omitempty"` @@ -213,3 +213,13 @@ func NewMultisearchHeader() *MultisearchHeader { return r } + +// true + +type MultisearchHeaderVariant interface { + MultisearchHeaderCaster() *MultisearchHeader +} + +func (s *MultisearchHeader) MultisearchHeaderCaster() *MultisearchHeader { + return s +} diff --git a/typedapi/types/multisearchitem.go b/typedapi/types/multisearchitem.go index 601e5d7b74..0dc87e107d 100644 --- a/typedapi/types/multisearchitem.go +++ b/typedapi/types/multisearchitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,23 +32,47 @@ import ( // MultiSearchItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/msearch/types.ts#L216-L219 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/msearch/types.ts#L216-L219 type MultiSearchItem struct { - Aggregations map[string]Aggregate `json:"aggregations,omitempty"` - Clusters_ *ClusterStatistics `json:"_clusters,omitempty"` - Fields map[string]json.RawMessage `json:"fields,omitempty"` - Hits HitsMetadata `json:"hits"` - MaxScore *Float64 `json:"max_score,omitempty"` - NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` - PitId *string `json:"pit_id,omitempty"` - Profile *Profile `json:"profile,omitempty"` - ScrollId_ *string `json:"_scroll_id,omitempty"` - Shards_ ShardStatistics `json:"_shards"` - Status *int `json:"status,omitempty"` - Suggest map[string][]Suggest `json:"suggest,omitempty"` - TerminatedEarly *bool `json:"terminated_early,omitempty"` - TimedOut bool `json:"timed_out"` - Took int64 `json:"took"` + Aggregations map[string]Aggregate `json:"aggregations,omitempty"` + Clusters_ *ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Hits The returned documents and metadata. + Hits HitsMetadata `json:"hits"` + MaxScore *Float64 `json:"max_score,omitempty"` + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *Profile `json:"profile,omitempty"` + // ScrollId_ The identifier for the search and its search context. + // You can use this scroll ID with the scroll API to retrieve the next batch of + // search results for the request. + // This property is returned only if the `scroll` query parameter is specified + // in the request. + ScrollId_ *string `json:"_scroll_id,omitempty"` + // Shards_ A count of shards used for the request. + Shards_ ShardStatistics `json:"_shards"` + Status *int `json:"status,omitempty"` + Suggest map[string][]Suggest `json:"suggest,omitempty"` + TerminatedEarly *bool `json:"terminated_early,omitempty"` + // TimedOut If `true`, the request timed out before completion; returned results may be + // partial or empty. + TimedOut bool `json:"timed_out"` + // Took The number of milliseconds it took Elasticsearch to run the request. + // This value is calculated by measuring the time elapsed between receipt of a + // request on the coordinating node and the time at which the coordinating node + // is ready to send the response. + // It includes: + // + // * Communication time between the coordinating node and data nodes + // * Time the request spends in the search thread pool, queued for execution + // * Actual run time + // + // It does not include: + // + // * Time needed to send the request to Elasticsearch + // * Time needed to serialize the JSON response + // * Time needed to send the response to a client + Took int64 `json:"took"` } func (s *MultiSearchItem) UnmarshalJSON(data []byte) error { @@ -797,10 +821,12 @@ func (s *MultiSearchItem) UnmarshalJSON(data []byte) error { // NewMultiSearchItem returns a MultiSearchItem. func NewMultiSearchItem() *MultiSearchItem { r := &MultiSearchItem{ - Aggregations: make(map[string]Aggregate, 0), - Fields: make(map[string]json.RawMessage, 0), - Suggest: make(map[string][]Suggest, 0), + Aggregations: make(map[string]Aggregate), + Fields: make(map[string]json.RawMessage), + Suggest: make(map[string][]Suggest), } return r } + +// false diff --git a/typedapi/types/multisearchresult.go b/typedapi/types/multisearchresult.go deleted file mode 100644 index f6c757f6b3..0000000000 --- a/typedapi/types/multisearchresult.go +++ /dev/null @@ -1,128 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// MultiSearchResult type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/msearch/types.ts#L206-L209 -type MultiSearchResult struct { - Responses []MsearchResponseItem `json:"responses"` - Took int64 `json:"took"` -} - -func (s *MultiSearchResult) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "responses": - messageArray := []json.RawMessage{} - if err := dec.Decode(&messageArray); err != nil { - return fmt.Errorf("%s | %w", "Responses", err) - } - responses_field: - for _, message := range messageArray { - keyDec := json.NewDecoder(bytes.NewReader(message)) - for { - t, err := keyDec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return fmt.Errorf("%s | %w", "Responses", err) - } - - switch t { - - case "aggregations", "_clusters", "fields", "hits", "max_score", "num_reduce_phases", "pit_id", "profile", "_scroll_id", "_shards", "suggest", "terminated_early", "timed_out", "took": - o := NewMultiSearchItem() - localDec := json.NewDecoder(bytes.NewReader(message)) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Responses", err) - } - s.Responses = append(s.Responses, o) - continue responses_field - - case "error": - o := NewErrorResponseBase() - localDec := json.NewDecoder(bytes.NewReader(message)) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Responses", err) - } - s.Responses = append(s.Responses, o) - continue responses_field - - } - } - - var o any - localDec := json.NewDecoder(bytes.NewReader(message)) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Responses", err) - } - s.Responses = append(s.Responses, o) - } - - case "took": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Took", err) - } - s.Took = value - case float64: - f := int64(v) - s.Took = f - } - - } - } - return nil -} - -// NewMultiSearchResult returns a MultiSearchResult. -func NewMultiSearchResult() *MultiSearchResult { - r := &MultiSearchResult{} - - return r -} diff --git a/typedapi/types/multitermlookup.go b/typedapi/types/multitermlookup.go index 634fdbffde..7beb32b1ea 100644 --- a/typedapi/types/multitermlookup.go +++ b/typedapi/types/multitermlookup.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // MultiTermLookup type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L643-L653 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L643-L653 type MultiTermLookup struct { // Field A fields from which to retrieve terms. Field string `json:"field"` @@ -75,3 +75,13 @@ func NewMultiTermLookup() *MultiTermLookup { return r } + +// true + +type MultiTermLookupVariant interface { + MultiTermLookupCaster() *MultiTermLookup +} + +func (s *MultiTermLookup) MultiTermLookupCaster() *MultiTermLookup { + return s +} diff --git a/typedapi/types/multitermsaggregate.go b/typedapi/types/multitermsaggregate.go index c9de6815d7..58931755c3 100644 --- a/typedapi/types/multitermsaggregate.go +++ b/typedapi/types/multitermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MultiTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L501-L506 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L501-L506 type MultiTermsAggregate struct { Buckets BucketsMultiTermsBucket `json:"buckets"` DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` @@ -121,3 +121,5 @@ func NewMultiTermsAggregate() *MultiTermsAggregate { return r } + +// false diff --git a/typedapi/types/multitermsaggregation.go b/typedapi/types/multitermsaggregation.go index 532443f39d..468a87b5eb 100644 --- a/typedapi/types/multitermsaggregation.go +++ b/typedapi/types/multitermsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // MultiTermsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L601-L641 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L601-L641 type MultiTermsAggregation struct { // CollectMode Specifies the strategy for data collection. CollectMode *termsaggregationcollectmode.TermsAggregationCollectMode `json:"collect_mode,omitempty"` @@ -191,3 +191,13 @@ func NewMultiTermsAggregation() *MultiTermsAggregation { return r } + +// true + +type MultiTermsAggregationVariant interface { + MultiTermsAggregationCaster() *MultiTermsAggregation +} + +func (s *MultiTermsAggregation) MultiTermsAggregationCaster() *MultiTermsAggregation { + return s +} diff --git a/typedapi/types/multitermsbucket.go b/typedapi/types/multitermsbucket.go index 9acba61c0a..b267ecc7bd 100644 --- a/typedapi/types/multitermsbucket.go +++ b/typedapi/types/multitermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // MultiTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L508-L512 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L508-L512 type MultiTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -660,8 +660,10 @@ func (s MultiTermsBucket) MarshalJSON() ([]byte, error) { // NewMultiTermsBucket returns a MultiTermsBucket. func NewMultiTermsBucket() *MultiTermsBucket { r := &MultiTermsBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/murmur3hashproperty.go b/typedapi/types/murmur3hashproperty.go index b2698c46a3..185b561795 100644 --- a/typedapi/types/murmur3hashproperty.go +++ b/typedapi/types/murmur3hashproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // Murmur3HashProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/specialized.ts#L81-L83 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/specialized.ts#L90-L92 type Murmur3HashProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -41,10 +42,11 @@ type Murmur3HashProperty struct { Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { @@ -116,301 +118,313 @@ func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -459,301 +473,313 @@ func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -773,6 +799,11 @@ func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -787,15 +818,16 @@ func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { func (s Murmur3HashProperty) MarshalJSON() ([]byte, error) { type innerMurmur3HashProperty Murmur3HashProperty tmp := innerMurmur3HashProperty{ - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Properties: s.Properties, - Store: s.Store, - Type: s.Type, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "murmur3" @@ -806,10 +838,20 @@ func (s Murmur3HashProperty) MarshalJSON() ([]byte, error) { // NewMurmur3HashProperty returns a Murmur3HashProperty. func NewMurmur3HashProperty() *Murmur3HashProperty { r := &Murmur3HashProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type Murmur3HashPropertyVariant interface { + Murmur3HashPropertyCaster() *Murmur3HashProperty +} + +func (s *Murmur3HashProperty) Murmur3HashPropertyCaster() *Murmur3HashProperty { + return s +} diff --git a/typedapi/types/mutualinformationheuristic.go b/typedapi/types/mutualinformationheuristic.go index 03a422f27d..cc6cf44950 100644 --- a/typedapi/types/mutualinformationheuristic.go +++ b/typedapi/types/mutualinformationheuristic.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // MutualInformationHeuristic type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L800-L809 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L800-L809 type MutualInformationHeuristic struct { // BackgroundIsSuperset Set to `false` if you defined a custom background filter that represents a // different set of documents that you want to compare to. @@ -95,3 +95,13 @@ func NewMutualInformationHeuristic() *MutualInformationHeuristic { return r } + +// true + +type MutualInformationHeuristicVariant interface { + MutualInformationHeuristicCaster() *MutualInformationHeuristic +} + +func (s *MutualInformationHeuristic) MutualInformationHeuristicCaster() *MutualInformationHeuristic { + return s +} diff --git a/typedapi/types/names.go b/typedapi/types/names.go index 29dcbde055..b4e2ef69c1 100644 --- a/typedapi/types/names.go +++ b/typedapi/types/names.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Names type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L81-L81 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L75-L75 type Names []string + +type NamesVariant interface { + NamesCaster() *Names +} diff --git a/typedapi/types/nativecode.go b/typedapi/types/nativecode.go index 7ff31bedac..663f5d4ab9 100644 --- a/typedapi/types/nativecode.go +++ b/typedapi/types/nativecode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NativeCode type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/info/types.ts#L29-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/info/types.ts#L29-L32 type NativeCode struct { BuildHash string `json:"build_hash"` Version string `json:"version"` @@ -80,3 +80,5 @@ func NewNativeCode() *NativeCode { return r } + +// false diff --git a/typedapi/types/nativecodeinformation.go b/typedapi/types/nativecodeinformation.go index dae9cbd741..7a51abcb9d 100644 --- a/typedapi/types/nativecodeinformation.go +++ b/typedapi/types/nativecodeinformation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NativeCodeInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/info/types.ts#L29-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/info/types.ts#L29-L32 type NativeCodeInformation struct { BuildHash string `json:"build_hash"` Version string `json:"version"` @@ -80,3 +80,5 @@ func NewNativeCodeInformation() *NativeCodeInformation { return r } + +// false diff --git a/typedapi/types/nerinferenceoptions.go b/typedapi/types/nerinferenceoptions.go index b3e61bf5d4..b1e7d6b8ce 100644 --- a/typedapi/types/nerinferenceoptions.go +++ b/typedapi/types/nerinferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NerInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L255-L264 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L242-L251 type NerInferenceOptions struct { // ClassificationLabels The token classification labels. Must be IOB formatted tags ClassificationLabels []string `json:"classification_labels,omitempty"` @@ -96,3 +96,13 @@ func NewNerInferenceOptions() *NerInferenceOptions { return r } + +// true + +type NerInferenceOptionsVariant interface { + NerInferenceOptionsCaster() *NerInferenceOptions +} + +func (s *NerInferenceOptions) NerInferenceOptionsCaster() *NerInferenceOptions { + return s +} diff --git a/typedapi/types/nerinferenceupdateoptions.go b/typedapi/types/nerinferenceupdateoptions.go index e71d23438f..a327b1ad8c 100644 --- a/typedapi/types/nerinferenceupdateoptions.go +++ b/typedapi/types/nerinferenceupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NerInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L404-L409 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L392-L397 type NerInferenceUpdateOptions struct { // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. @@ -83,3 +83,13 @@ func NewNerInferenceUpdateOptions() *NerInferenceUpdateOptions { return r } + +// true + +type NerInferenceUpdateOptionsVariant interface { + NerInferenceUpdateOptionsCaster() *NerInferenceUpdateOptions +} + +func (s *NerInferenceUpdateOptions) NerInferenceUpdateOptionsCaster() *NerInferenceUpdateOptions { + return s +} diff --git a/typedapi/types/nestedaggregate.go b/typedapi/types/nestedaggregate.go index 7a7abfad08..95bf3e2f50 100644 --- a/typedapi/types/nestedaggregate.go +++ b/typedapi/types/nestedaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // NestedAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L534-L538 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L534-L538 type NestedAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -631,8 +631,10 @@ func (s NestedAggregate) MarshalJSON() ([]byte, error) { // NewNestedAggregate returns a NestedAggregate. func NewNestedAggregate() *NestedAggregate { r := &NestedAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/nestedaggregation.go b/typedapi/types/nestedaggregation.go index 7532ebfcc9..e5c2bd6e46 100644 --- a/typedapi/types/nestedaggregation.go +++ b/typedapi/types/nestedaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // NestedAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L655-L660 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L655-L660 type NestedAggregation struct { // Path The path to the field of type `nested`. Path *string `json:"path,omitempty"` @@ -67,3 +67,13 @@ func NewNestedAggregation() *NestedAggregation { return r } + +// true + +type NestedAggregationVariant interface { + NestedAggregationCaster() *NestedAggregation +} + +func (s *NestedAggregation) NestedAggregationCaster() *NestedAggregation { + return s +} diff --git a/typedapi/types/nestedidentity.go b/typedapi/types/nestedidentity.go index 2f770374e9..d97a422886 100644 --- a/typedapi/types/nestedidentity.go +++ b/typedapi/types/nestedidentity.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NestedIdentity type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/hits.ts#L90-L94 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/hits.ts#L89-L93 type NestedIdentity struct { Field string `json:"field"` Nested_ *NestedIdentity `json:"_nested,omitempty"` @@ -90,3 +90,5 @@ func NewNestedIdentity() *NestedIdentity { return r } + +// false diff --git a/typedapi/types/nestedproperty.go b/typedapi/types/nestedproperty.go index 79b800f18f..bea7db11e6 100644 --- a/typedapi/types/nestedproperty.go +++ b/typedapi/types/nestedproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // NestedProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/complex.ts#L38-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/complex.ts#L39-L44 type NestedProperty struct { CopyTo []string `json:"copy_to,omitempty"` Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` @@ -43,10 +44,11 @@ type NestedProperty struct { IncludeInParent *bool `json:"include_in_parent,omitempty"` IncludeInRoot *bool `json:"include_in_root,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *NestedProperty) UnmarshalJSON(data []byte) error { @@ -118,301 +120,313 @@ func (s *NestedProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -489,301 +503,313 @@ func (s *NestedProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -803,6 +829,11 @@ func (s *NestedProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -817,17 +848,18 @@ func (s *NestedProperty) UnmarshalJSON(data []byte) error { func (s NestedProperty) MarshalJSON() ([]byte, error) { type innerNestedProperty NestedProperty tmp := innerNestedProperty{ - CopyTo: s.CopyTo, - Dynamic: s.Dynamic, - Enabled: s.Enabled, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - IncludeInParent: s.IncludeInParent, - IncludeInRoot: s.IncludeInRoot, - Meta: s.Meta, - Properties: s.Properties, - Store: s.Store, - Type: s.Type, + CopyTo: s.CopyTo, + Dynamic: s.Dynamic, + Enabled: s.Enabled, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IncludeInParent: s.IncludeInParent, + IncludeInRoot: s.IncludeInRoot, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "nested" @@ -838,10 +870,20 @@ func (s NestedProperty) MarshalJSON() ([]byte, error) { // NewNestedProperty returns a NestedProperty. func NewNestedProperty() *NestedProperty { r := &NestedProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type NestedPropertyVariant interface { + NestedPropertyCaster() *NestedProperty +} + +func (s *NestedProperty) NestedPropertyCaster() *NestedProperty { + return s +} diff --git a/typedapi/types/nestedquery.go b/typedapi/types/nestedquery.go index 0ef27775d4..6963bf9e54 100644 --- a/typedapi/types/nestedquery.go +++ b/typedapi/types/nestedquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // NestedQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/joining.ts#L112-L139 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/joining.ts#L112-L139 type NestedQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -49,7 +49,7 @@ type NestedQuery struct { // Path Path to the nested object you wish to search. Path string `json:"path"` // Query Query you wish to run on nested objects in the path. - Query *Query `json:"query,omitempty"` + Query Query `json:"query"` QueryName_ *string `json:"_name,omitempty"` // ScoreMode How scores for matching child objects affect the root parent document’s // relevance score. @@ -144,3 +144,13 @@ func NewNestedQuery() *NestedQuery { return r } + +// true + +type NestedQueryVariant interface { + NestedQueryCaster() *NestedQuery +} + +func (s *NestedQuery) NestedQueryCaster() *NestedQuery { + return s +} diff --git a/typedapi/types/nestedsortvalue.go b/typedapi/types/nestedsortvalue.go index 5ba57f980a..67c51a4b42 100644 --- a/typedapi/types/nestedsortvalue.go +++ b/typedapi/types/nestedsortvalue.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NestedSortValue type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/sort.ts#L29-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/sort.ts#L29-L34 type NestedSortValue struct { Filter *Query `json:"filter,omitempty"` MaxChildren *int `json:"max_children,omitempty"` @@ -96,3 +96,13 @@ func NewNestedSortValue() *NestedSortValue { return r } + +// true + +type NestedSortValueVariant interface { + NestedSortValueCaster() *NestedSortValue +} + +func (s *NestedSortValue) NestedSortValueCaster() *NestedSortValue { + return s +} diff --git a/typedapi/types/networkdirectionprocessor.go b/typedapi/types/networkdirectionprocessor.go index 7410f048bb..88aee8e0cd 100644 --- a/typedapi/types/networkdirectionprocessor.go +++ b/typedapi/types/networkdirectionprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NetworkDirectionProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1190-L1224 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1231-L1265 type NetworkDirectionProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type NetworkDirectionProcessor struct { // DestinationIp Field containing the destination IP address. DestinationIp *string `json:"destination_ip,omitempty"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If true and any required fields are missing, the processor quietly exits @@ -97,16 +97,9 @@ func (s *NetworkDirectionProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -184,3 +177,13 @@ func NewNetworkDirectionProcessor() *NetworkDirectionProcessor { return r } + +// true + +type NetworkDirectionProcessorVariant interface { + NetworkDirectionProcessorCaster() *NetworkDirectionProcessor +} + +func (s *NetworkDirectionProcessor) NetworkDirectionProcessorCaster() *NetworkDirectionProcessor { + return s +} diff --git a/typedapi/types/nevercondition.go b/typedapi/types/nevercondition.go index 4bc72e6ba5..41fb3e290c 100644 --- a/typedapi/types/nevercondition.go +++ b/typedapi/types/nevercondition.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NeverCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Conditions.ts#L72-L72 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Conditions.ts#L72-L72 type NeverCondition struct { } @@ -32,3 +32,13 @@ func NewNeverCondition() *NeverCondition { return r } + +// true + +type NeverConditionVariant interface { + NeverConditionCaster() *NeverCondition +} + +func (s *NeverCondition) NeverConditionCaster() *NeverCondition { + return s +} diff --git a/typedapi/types/ngramtokenfilter.go b/typedapi/types/ngramtokenfilter.go index 48d0ea36c4..e3d031517b 100644 --- a/typedapi/types/ngramtokenfilter.go +++ b/typedapi/types/ngramtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NGramTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L267-L272 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L267-L272 type NGramTokenFilter struct { MaxGram *int `json:"max_gram,omitempty"` MinGram *int `json:"min_gram,omitempty"` @@ -129,3 +129,13 @@ func NewNGramTokenFilter() *NGramTokenFilter { return r } + +// true + +type NGramTokenFilterVariant interface { + NGramTokenFilterCaster() *NGramTokenFilter +} + +func (s *NGramTokenFilter) NGramTokenFilterCaster() *NGramTokenFilter { + return s +} diff --git a/typedapi/types/ngramtokenizer.go b/typedapi/types/ngramtokenizer.go index 7377528598..be0a64e780 100644 --- a/typedapi/types/ngramtokenizer.go +++ b/typedapi/types/ngramtokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // NGramTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L84-L93 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L84-L93 type NGramTokenizer struct { CustomTokenChars *string `json:"custom_token_chars,omitempty"` MaxGram *int `json:"max_gram,omitempty"` @@ -145,3 +145,13 @@ func NewNGramTokenizer() *NGramTokenizer { return r } + +// true + +type NGramTokenizerVariant interface { + NGramTokenizerCaster() *NGramTokenizer +} + +func (s *NGramTokenizer) NGramTokenizerCaster() *NGramTokenizer { + return s +} diff --git a/typedapi/types/nlpberttokenizationconfig.go b/typedapi/types/nlpberttokenizationconfig.go index 9c09204ef5..e1815fb418 100644 --- a/typedapi/types/nlpberttokenizationconfig.go +++ b/typedapi/types/nlpberttokenizationconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // NlpBertTokenizationConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L131-L158 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L161-L162 type NlpBertTokenizationConfig struct { // DoLowerCase Should the tokenizer lower case the text DoLowerCase *bool `json:"do_lower_case,omitempty"` @@ -140,3 +140,13 @@ func NewNlpBertTokenizationConfig() *NlpBertTokenizationConfig { return r } + +// true + +type NlpBertTokenizationConfigVariant interface { + NlpBertTokenizationConfigCaster() *NlpBertTokenizationConfig +} + +func (s *NlpBertTokenizationConfig) NlpBertTokenizationConfigCaster() *NlpBertTokenizationConfig { + return s +} diff --git a/typedapi/types/nlprobertatokenizationconfig.go b/typedapi/types/nlprobertatokenizationconfig.go index 733e0e6b2b..6d37d2eff1 100644 --- a/typedapi/types/nlprobertatokenizationconfig.go +++ b/typedapi/types/nlprobertatokenizationconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,10 +33,12 @@ import ( // NlpRobertaTokenizationConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L160-L187 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L164-L171 type NlpRobertaTokenizationConfig struct { // AddPrefixSpace Should the tokenizer prefix input with a space character AddPrefixSpace *bool `json:"add_prefix_space,omitempty"` + // DoLowerCase Should the tokenizer lower case the text + DoLowerCase *bool `json:"do_lower_case,omitempty"` // MaxSequenceLength Maximum input sequence length for the model MaxSequenceLength *int `json:"max_sequence_length,omitempty"` // Span Tokenization spanning options. Special value of -1 indicates no spanning @@ -78,6 +80,20 @@ func (s *NlpRobertaTokenizationConfig) UnmarshalJSON(data []byte) error { s.AddPrefixSpace = &v } + case "do_lower_case": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DoLowerCase", err) + } + s.DoLowerCase = &value + case bool: + s.DoLowerCase = &v + } + case "max_sequence_length": var tmp any @@ -140,3 +156,13 @@ func NewNlpRobertaTokenizationConfig() *NlpRobertaTokenizationConfig { return r } + +// true + +type NlpRobertaTokenizationConfigVariant interface { + NlpRobertaTokenizationConfigCaster() *NlpRobertaTokenizationConfig +} + +func (s *NlpRobertaTokenizationConfig) NlpRobertaTokenizationConfigCaster() *NlpRobertaTokenizationConfig { + return s +} diff --git a/typedapi/types/nlptokenizationupdateoptions.go b/typedapi/types/nlptokenizationupdateoptions.go index 13ff441c6a..171a5f7c0d 100644 --- a/typedapi/types/nlptokenizationupdateoptions.go +++ b/typedapi/types/nlptokenizationupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // NlpTokenizationUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L356-L361 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L344-L349 type NlpTokenizationUpdateOptions struct { // Span Span options to apply Span *int `json:"span,omitempty"` @@ -88,3 +88,13 @@ func NewNlpTokenizationUpdateOptions() *NlpTokenizationUpdateOptions { return r } + +// true + +type NlpTokenizationUpdateOptionsVariant interface { + NlpTokenizationUpdateOptionsCaster() *NlpTokenizationUpdateOptions +} + +func (s *NlpTokenizationUpdateOptions) NlpTokenizationUpdateOptionsCaster() *NlpTokenizationUpdateOptions { + return s +} diff --git a/typedapi/types/node.go b/typedapi/types/node.go index 0cb5dbe2b3..3831f56832 100644 --- a/typedapi/types/node.go +++ b/typedapi/types/node.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Node type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/searchable_snapshots/cache_stats/Response.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/searchable_snapshots/cache_stats/Response.ts#L30-L32 type Node struct { SharedCache Shared `json:"shared_cache"` } @@ -33,3 +33,5 @@ func NewNode() *Node { return r } + +// false diff --git a/typedapi/types/nodeallocationexplanation.go b/typedapi/types/nodeallocationexplanation.go index d691212656..305cdf1324 100644 --- a/typedapi/types/nodeallocationexplanation.go +++ b/typedapi/types/nodeallocationexplanation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // NodeAllocationExplanation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/allocation_explain/types.ts#L103-L117 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/allocation_explain/types.ts#L103-L117 type NodeAllocationExplanation struct { Deciders []AllocationDecision `json:"deciders"` NodeAttributes map[string]string `json:"node_attributes"` @@ -129,8 +129,10 @@ func (s *NodeAllocationExplanation) UnmarshalJSON(data []byte) error { // NewNodeAllocationExplanation returns a NodeAllocationExplanation. func NewNodeAllocationExplanation() *NodeAllocationExplanation { r := &NodeAllocationExplanation{ - NodeAttributes: make(map[string]string, 0), + NodeAttributes: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/nodeattributes.go b/typedapi/types/nodeattributes.go index 350f14ff37..e63a578518 100644 --- a/typedapi/types/nodeattributes.go +++ b/typedapi/types/nodeattributes.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // NodeAttributes type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Node.ts#L41-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Node.ts#L41-L52 type NodeAttributes struct { // Attributes Lists node attributes. Attributes map[string]string `json:"attributes"` @@ -95,8 +95,10 @@ func (s *NodeAttributes) UnmarshalJSON(data []byte) error { // NewNodeAttributes returns a NodeAttributes. func NewNodeAttributes() *NodeAttributes { r := &NodeAttributes{ - Attributes: make(map[string]string, 0), + Attributes: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/nodeattributesrecord.go b/typedapi/types/nodeattributesrecord.go index a21e95afee..e0fdbb76cd 100644 --- a/typedapi/types/nodeattributesrecord.go +++ b/typedapi/types/nodeattributesrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeAttributesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/nodeattrs/types.ts#L20-L55 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/nodeattrs/types.ts#L20-L55 type NodeAttributesRecord struct { // Attr The attribute name. Attr *string `json:"attr,omitempty"` @@ -173,3 +173,5 @@ func NewNodeAttributesRecord() *NodeAttributesRecord { return r } + +// false diff --git a/typedapi/types/nodebufferpool.go b/typedapi/types/nodebufferpool.go index a542b93b87..6bd2c63618 100644 --- a/typedapi/types/nodebufferpool.go +++ b/typedapi/types/nodebufferpool.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeBufferPool type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L859-L880 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L859-L880 type NodeBufferPool struct { // Count Number of buffer pools. Count *int64 `json:"count,omitempty"` @@ -140,3 +140,5 @@ func NewNodeBufferPool() *NodeBufferPool { return r } + +// false diff --git a/typedapi/types/nodediskusage.go b/typedapi/types/nodediskusage.go index 8e70bd5b22..d1dd084b47 100644 --- a/typedapi/types/nodediskusage.go +++ b/typedapi/types/nodediskusage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // NodeDiskUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/allocation_explain/types.ts#L57-L61 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/allocation_explain/types.ts#L57-L61 type NodeDiskUsage struct { LeastAvailable DiskUsage `json:"least_available"` MostAvailable DiskUsage `json:"most_available"` @@ -78,3 +78,5 @@ func NewNodeDiskUsage() *NodeDiskUsage { return r } + +// false diff --git a/typedapi/types/nodeids.go b/typedapi/types/nodeids.go index f3a9efe5ae..25d704c72c 100644 --- a/typedapi/types/nodeids.go +++ b/typedapi/types/nodeids.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NodeIds type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L64-L64 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L58-L58 type NodeIds []string diff --git a/typedapi/types/nodeinfo.go b/typedapi/types/nodeinfo.go index bd757ef2fc..5b212c63bc 100644 --- a/typedapi/types/nodeinfo.go +++ b/typedapi/types/nodeinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // NodeInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L31-L67 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L31-L67 type NodeInfo struct { Aggregations map[string]NodeInfoAggregation `json:"aggregations,omitempty"` Attributes map[string]string `json:"attributes"` @@ -254,10 +254,12 @@ func (s *NodeInfo) UnmarshalJSON(data []byte) error { // NewNodeInfo returns a NodeInfo. func NewNodeInfo() *NodeInfo { r := &NodeInfo{ - Aggregations: make(map[string]NodeInfoAggregation, 0), - Attributes: make(map[string]string, 0), - ThreadPool: make(map[string]NodeThreadPoolInfo, 0), + Aggregations: make(map[string]NodeInfoAggregation), + Attributes: make(map[string]string), + ThreadPool: make(map[string]NodeThreadPoolInfo), } return r } + +// false diff --git a/typedapi/types/nodeinfoaction.go b/typedapi/types/nodeinfoaction.go index e40642a9b8..a14669cfc8 100644 --- a/typedapi/types/nodeinfoaction.go +++ b/typedapi/types/nodeinfoaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L184-L186 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L184-L186 type NodeInfoAction struct { DestructiveRequiresName string `json:"destructive_requires_name"` } @@ -74,3 +74,5 @@ func NewNodeInfoAction() *NodeInfoAction { return r } + +// false diff --git a/typedapi/types/nodeinfoaggregation.go b/typedapi/types/nodeinfoaggregation.go index 40f25d93ef..7cbd6ad3f4 100644 --- a/typedapi/types/nodeinfoaggregation.go +++ b/typedapi/types/nodeinfoaggregation.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NodeInfoAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L235-L237 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L235-L237 type NodeInfoAggregation struct { Types []string `json:"types"` } @@ -33,3 +33,5 @@ func NewNodeInfoAggregation() *NodeInfoAggregation { return r } + +// false diff --git a/typedapi/types/nodeinfobootstrap.go b/typedapi/types/nodeinfobootstrap.go index 1603cda763..915f01db36 100644 --- a/typedapi/types/nodeinfobootstrap.go +++ b/typedapi/types/nodeinfobootstrap.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoBootstrap type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L204-L206 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L204-L206 type NodeInfoBootstrap struct { MemoryLock string `json:"memory_lock"` } @@ -74,3 +74,5 @@ func NewNodeInfoBootstrap() *NodeInfoBootstrap { return r } + +// false diff --git a/typedapi/types/nodeinfoclient.go b/typedapi/types/nodeinfoclient.go index d15c016d43..e837870d34 100644 --- a/typedapi/types/nodeinfoclient.go +++ b/typedapi/types/nodeinfoclient.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoClient type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L188-L190 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L188-L190 type NodeInfoClient struct { Type string `json:"type"` } @@ -74,3 +74,5 @@ func NewNodeInfoClient() *NodeInfoClient { return r } + +// false diff --git a/typedapi/types/nodeinfodiscover.go b/typedapi/types/nodeinfodiscover.go index e81e7093ba..3225287e70 100644 --- a/typedapi/types/nodeinfodiscover.go +++ b/typedapi/types/nodeinfodiscover.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoDiscover type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L173-L182 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L173-L182 type NodeInfoDiscover struct { NodeInfoDiscover map[string]json.RawMessage `json:"-"` SeedHosts []string `json:"seed_hosts,omitempty"` @@ -126,8 +126,10 @@ func (s NodeInfoDiscover) MarshalJSON() ([]byte, error) { // NewNodeInfoDiscover returns a NodeInfoDiscover. func NewNodeInfoDiscover() *NodeInfoDiscover { r := &NodeInfoDiscover{ - NodeInfoDiscover: make(map[string]json.RawMessage, 0), + NodeInfoDiscover: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/nodeinfohttp.go b/typedapi/types/nodeinfohttp.go index 743942361d..a069f53f4c 100644 --- a/typedapi/types/nodeinfohttp.go +++ b/typedapi/types/nodeinfohttp.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoHttp type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L311-L316 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L311-L316 type NodeInfoHttp struct { BoundAddress []string `json:"bound_address"` MaxContentLength ByteSize `json:"max_content_length,omitempty"` @@ -102,3 +102,5 @@ func NewNodeInfoHttp() *NodeInfoHttp { return r } + +// false diff --git a/typedapi/types/nodeinfoingest.go b/typedapi/types/nodeinfoingest.go index 689e9daecb..42725d378b 100644 --- a/typedapi/types/nodeinfoingest.go +++ b/typedapi/types/nodeinfoingest.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NodeInfoIngest type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L227-L229 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L227-L229 type NodeInfoIngest struct { Processors []NodeInfoIngestProcessor `json:"processors"` } @@ -33,3 +33,5 @@ func NewNodeInfoIngest() *NodeInfoIngest { return r } + +// false diff --git a/typedapi/types/nodeinfoingestdownloader.go b/typedapi/types/nodeinfoingestdownloader.go index 5b2af8bc6d..9e95264ab1 100644 --- a/typedapi/types/nodeinfoingestdownloader.go +++ b/typedapi/types/nodeinfoingestdownloader.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoIngestDownloader type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L128-L130 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L128-L130 type NodeInfoIngestDownloader struct { Enabled string `json:"enabled"` } @@ -74,3 +74,5 @@ func NewNodeInfoIngestDownloader() *NodeInfoIngestDownloader { return r } + +// false diff --git a/typedapi/types/nodeinfoingestinfo.go b/typedapi/types/nodeinfoingestinfo.go index 71823f0a42..4b27765711 100644 --- a/typedapi/types/nodeinfoingestinfo.go +++ b/typedapi/types/nodeinfoingestinfo.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NodeInfoIngestInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L124-L126 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L124-L126 type NodeInfoIngestInfo struct { Downloader NodeInfoIngestDownloader `json:"downloader"` } @@ -33,3 +33,5 @@ func NewNodeInfoIngestInfo() *NodeInfoIngestInfo { return r } + +// false diff --git a/typedapi/types/nodeinfoingestprocessor.go b/typedapi/types/nodeinfoingestprocessor.go index 0dc3065374..c4acefc752 100644 --- a/typedapi/types/nodeinfoingestprocessor.go +++ b/typedapi/types/nodeinfoingestprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoIngestProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L231-L233 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L231-L233 type NodeInfoIngestProcessor struct { Type string `json:"type"` } @@ -74,3 +74,5 @@ func NewNodeInfoIngestProcessor() *NodeInfoIngestProcessor { return r } + +// false diff --git a/typedapi/types/nodeinfojvmmemory.go b/typedapi/types/nodeinfojvmmemory.go index 3bdbae10da..8de453e592 100644 --- a/typedapi/types/nodeinfojvmmemory.go +++ b/typedapi/types/nodeinfojvmmemory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoJvmMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L318-L329 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L318-L329 type NodeInfoJvmMemory struct { DirectMax ByteSize `json:"direct_max,omitempty"` DirectMaxInBytes int64 `json:"direct_max_in_bytes"` @@ -171,3 +171,5 @@ func NewNodeInfoJvmMemory() *NodeInfoJvmMemory { return r } + +// false diff --git a/typedapi/types/nodeinfomemory.go b/typedapi/types/nodeinfomemory.go index 2c9069e0aa..c90cc0b6a5 100644 --- a/typedapi/types/nodeinfomemory.go +++ b/typedapi/types/nodeinfomemory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L331-L334 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L331-L334 type NodeInfoMemory struct { Total string `json:"total"` TotalInBytes int64 `json:"total_in_bytes"` @@ -90,3 +90,5 @@ func NewNodeInfoMemory() *NodeInfoMemory { return r } + +// false diff --git a/typedapi/types/nodeinfonetwork.go b/typedapi/types/nodeinfonetwork.go index 43052033b1..1e783151d8 100644 --- a/typedapi/types/nodeinfonetwork.go +++ b/typedapi/types/nodeinfonetwork.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoNetwork type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L336-L339 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L336-L339 type NodeInfoNetwork struct { PrimaryInterface NodeInfoNetworkInterface `json:"primary_interface"` RefreshInterval int `json:"refresh_interval"` @@ -84,3 +84,5 @@ func NewNodeInfoNetwork() *NodeInfoNetwork { return r } + +// false diff --git a/typedapi/types/nodeinfonetworkinterface.go b/typedapi/types/nodeinfonetworkinterface.go index e9cebbaf42..de65dd5e73 100644 --- a/typedapi/types/nodeinfonetworkinterface.go +++ b/typedapi/types/nodeinfonetworkinterface.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoNetworkInterface type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L341-L345 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L341-L345 type NodeInfoNetworkInterface struct { Address string `json:"address"` MacAddress string `json:"mac_address"` @@ -93,3 +93,5 @@ func NewNodeInfoNetworkInterface() *NodeInfoNetworkInterface { return r } + +// false diff --git a/typedapi/types/nodeinfooscpu.go b/typedapi/types/nodeinfooscpu.go index dbdf0557fd..2e829e47f4 100644 --- a/typedapi/types/nodeinfooscpu.go +++ b/typedapi/types/nodeinfooscpu.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoOSCPU type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L347-L356 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L347-L356 type NodeInfoOSCPU struct { CacheSize string `json:"cache_size"` CacheSizeInBytes int `json:"cache_size_in_bytes"` @@ -185,3 +185,5 @@ func NewNodeInfoOSCPU() *NodeInfoOSCPU { return r } + +// false diff --git a/typedapi/types/nodeinfopath.go b/typedapi/types/nodeinfopath.go index 65390af614..6ee3b4a515 100644 --- a/typedapi/types/nodeinfopath.go +++ b/typedapi/types/nodeinfopath.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoPath type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L158-L163 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L158-L163 type NodeInfoPath struct { Data []string `json:"data,omitempty"` Home *string `json:"home,omitempty"` @@ -55,8 +55,19 @@ func (s *NodeInfoPath) UnmarshalJSON(data []byte) error { switch t { case "data": - if err := dec.Decode(&s.Data); err != nil { - return fmt.Errorf("%s | %w", "Data", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Data", err) + } + + s.Data = append(s.Data, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Data); err != nil { + return fmt.Errorf("%s | %w", "Data", err) + } } case "home": @@ -99,3 +110,5 @@ func NewNodeInfoPath() *NodeInfoPath { return r } + +// false diff --git a/typedapi/types/nodeinforepositories.go b/typedapi/types/nodeinforepositories.go index 3cced34665..9bc40ae68c 100644 --- a/typedapi/types/nodeinforepositories.go +++ b/typedapi/types/nodeinforepositories.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NodeInfoRepositories type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L165-L167 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L165-L167 type NodeInfoRepositories struct { Url NodeInfoRepositoriesUrl `json:"url"` } @@ -33,3 +33,5 @@ func NewNodeInfoRepositories() *NodeInfoRepositories { return r } + +// false diff --git a/typedapi/types/nodeinforepositoriesurl.go b/typedapi/types/nodeinforepositoriesurl.go index 4b337f239a..cc84e2b70d 100644 --- a/typedapi/types/nodeinforepositoriesurl.go +++ b/typedapi/types/nodeinforepositoriesurl.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoRepositoriesUrl type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L169-L171 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L169-L171 type NodeInfoRepositoriesUrl struct { AllowedUrls string `json:"allowed_urls"` } @@ -74,3 +74,5 @@ func NewNodeInfoRepositoriesUrl() *NodeInfoRepositoriesUrl { return r } + +// false diff --git a/typedapi/types/nodeinfoscript.go b/typedapi/types/nodeinfoscript.go index 96d3585b0d..3e420358f5 100644 --- a/typedapi/types/nodeinfoscript.go +++ b/typedapi/types/nodeinfoscript.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoScript type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L289-L292 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L289-L292 type NodeInfoScript struct { AllowedTypes string `json:"allowed_types"` DisableMaxCompilationsRate *string `json:"disable_max_compilations_rate,omitempty"` @@ -87,3 +87,5 @@ func NewNodeInfoScript() *NodeInfoScript { return r } + +// false diff --git a/typedapi/types/nodeinfosearch.go b/typedapi/types/nodeinfosearch.go index 3b8e228fce..bdbeac6e56 100644 --- a/typedapi/types/nodeinfosearch.go +++ b/typedapi/types/nodeinfosearch.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NodeInfoSearch type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L294-L296 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L294-L296 type NodeInfoSearch struct { Remote NodeInfoSearchRemote `json:"remote"` } @@ -33,3 +33,5 @@ func NewNodeInfoSearch() *NodeInfoSearch { return r } + +// false diff --git a/typedapi/types/nodeinfosearchremote.go b/typedapi/types/nodeinfosearchremote.go index c03b4665ea..03a8a4b7b8 100644 --- a/typedapi/types/nodeinfosearchremote.go +++ b/typedapi/types/nodeinfosearchremote.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoSearchRemote type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L298-L300 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L298-L300 type NodeInfoSearchRemote struct { Connect string `json:"connect"` } @@ -74,3 +74,5 @@ func NewNodeInfoSearchRemote() *NodeInfoSearchRemote { return r } + +// false diff --git a/typedapi/types/nodeinfosettings.go b/typedapi/types/nodeinfosettings.go index c227248016..ca9541b5f4 100644 --- a/typedapi/types/nodeinfosettings.go +++ b/typedapi/types/nodeinfosettings.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NodeInfoSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L69-L85 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L69-L85 type NodeInfoSettings struct { Action *NodeInfoAction `json:"action,omitempty"` Bootstrap *NodeInfoBootstrap `json:"bootstrap,omitempty"` @@ -47,3 +47,5 @@ func NewNodeInfoSettings() *NodeInfoSettings { return r } + +// false diff --git a/typedapi/types/nodeinfosettingscluster.go b/typedapi/types/nodeinfosettingscluster.go index 33112f7f9b..f709c8fc36 100644 --- a/typedapi/types/nodeinfosettingscluster.go +++ b/typedapi/types/nodeinfosettingscluster.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // NodeInfoSettingsCluster type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L132-L142 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L132-L142 type NodeInfoSettingsCluster struct { DeprecationIndexing *DeprecationIndexing `json:"deprecation_indexing,omitempty"` Election NodeInfoSettingsClusterElection `json:"election"` @@ -90,3 +90,5 @@ func NewNodeInfoSettingsCluster() *NodeInfoSettingsCluster { return r } + +// false diff --git a/typedapi/types/nodeinfosettingsclusterelection.go b/typedapi/types/nodeinfosettingsclusterelection.go index e9f90314a6..cb27ea6f73 100644 --- a/typedapi/types/nodeinfosettingsclusterelection.go +++ b/typedapi/types/nodeinfosettingsclusterelection.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // NodeInfoSettingsClusterElection type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L148-L150 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L148-L150 type NodeInfoSettingsClusterElection struct { Strategy string `json:"strategy"` } @@ -66,3 +66,5 @@ func NewNodeInfoSettingsClusterElection() *NodeInfoSettingsClusterElection { return r } + +// false diff --git a/typedapi/types/nodeinfosettingshttp.go b/typedapi/types/nodeinfosettingshttp.go index 80a838e909..10fee65c65 100644 --- a/typedapi/types/nodeinfosettingshttp.go +++ b/typedapi/types/nodeinfosettingshttp.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoSettingsHttp type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L192-L197 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L192-L197 type NodeInfoSettingsHttp struct { Compression string `json:"compression,omitempty"` Port string `json:"port,omitempty"` @@ -106,3 +106,5 @@ func NewNodeInfoSettingsHttp() *NodeInfoSettingsHttp { return r } + +// false diff --git a/typedapi/types/nodeinfosettingshttptype.go b/typedapi/types/nodeinfosettingshttptype.go index a7851d5901..a04df7a23e 100644 --- a/typedapi/types/nodeinfosettingshttptype.go +++ b/typedapi/types/nodeinfosettingshttptype.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoSettingsHttpType type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L199-L202 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L199-L202 type NodeInfoSettingsHttpType struct { Default string `json:"default"` } @@ -86,3 +86,5 @@ func NewNodeInfoSettingsHttpType() *NodeInfoSettingsHttpType { return r } + +// false diff --git a/typedapi/types/nodeinfosettingsingest.go b/typedapi/types/nodeinfosettingsingest.go index 559e661e7f..470ec3875d 100644 --- a/typedapi/types/nodeinfosettingsingest.go +++ b/typedapi/types/nodeinfosettingsingest.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NodeInfoSettingsIngest type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L87-L122 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L87-L122 type NodeInfoSettingsIngest struct { Append *NodeInfoIngestInfo `json:"append,omitempty"` Attachment *NodeInfoIngestInfo `json:"attachment,omitempty"` @@ -66,3 +66,5 @@ func NewNodeInfoSettingsIngest() *NodeInfoSettingsIngest { return r } + +// false diff --git a/typedapi/types/nodeinfosettingsnetwork.go b/typedapi/types/nodeinfosettingsnetwork.go index 73f8529399..b7adaeced8 100644 --- a/typedapi/types/nodeinfosettingsnetwork.go +++ b/typedapi/types/nodeinfosettingsnetwork.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // NodeInfoSettingsNetwork type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L223-L225 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L223-L225 type NodeInfoSettingsNetwork struct { Host []string `json:"host,omitempty"` } @@ -77,3 +77,5 @@ func NewNodeInfoSettingsNetwork() *NodeInfoSettingsNetwork { return r } + +// false diff --git a/typedapi/types/nodeinfosettingsnode.go b/typedapi/types/nodeinfosettingsnode.go index 9f25a2442b..1127479c1c 100644 --- a/typedapi/types/nodeinfosettingsnode.go +++ b/typedapi/types/nodeinfosettingsnode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoSettingsNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L152-L156 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L152-L156 type NodeInfoSettingsNode struct { Attr map[string]json.RawMessage `json:"attr"` MaxLocalStorageNodes *string `json:"max_local_storage_nodes,omitempty"` @@ -86,8 +86,10 @@ func (s *NodeInfoSettingsNode) UnmarshalJSON(data []byte) error { // NewNodeInfoSettingsNode returns a NodeInfoSettingsNode. func NewNodeInfoSettingsNode() *NodeInfoSettingsNode { r := &NodeInfoSettingsNode{ - Attr: make(map[string]json.RawMessage, 0), + Attr: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/nodeinfosettingstransport.go b/typedapi/types/nodeinfosettingstransport.go index 51ca30b336..d78a7ae340 100644 --- a/typedapi/types/nodeinfosettingstransport.go +++ b/typedapi/types/nodeinfosettingstransport.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoSettingsTransport type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L208-L212 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L208-L212 type NodeInfoSettingsTransport struct { Features *NodeInfoSettingsTransportFeatures `json:"features,omitempty"` Type NodeInfoSettingsTransportType `json:"type"` @@ -86,3 +86,5 @@ func NewNodeInfoSettingsTransport() *NodeInfoSettingsTransport { return r } + +// false diff --git a/typedapi/types/nodeinfosettingstransportfeatures.go b/typedapi/types/nodeinfosettingstransportfeatures.go index d507a561a3..508abd0a5f 100644 --- a/typedapi/types/nodeinfosettingstransportfeatures.go +++ b/typedapi/types/nodeinfosettingstransportfeatures.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoSettingsTransportFeatures type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L219-L221 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L219-L221 type NodeInfoSettingsTransportFeatures struct { XPack string `json:"x-pack"` } @@ -74,3 +74,5 @@ func NewNodeInfoSettingsTransportFeatures() *NodeInfoSettingsTransportFeatures { return r } + +// false diff --git a/typedapi/types/nodeinfosettingstransporttype.go b/typedapi/types/nodeinfosettingstransporttype.go index 045f23d251..05b6fab03b 100644 --- a/typedapi/types/nodeinfosettingstransporttype.go +++ b/typedapi/types/nodeinfosettingstransporttype.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoSettingsTransportType type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L214-L217 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L214-L217 type NodeInfoSettingsTransportType struct { Default string `json:"default"` } @@ -86,3 +86,5 @@ func NewNodeInfoSettingsTransportType() *NodeInfoSettingsTransportType { return r } + +// false diff --git a/typedapi/types/nodeinfotransport.go b/typedapi/types/nodeinfotransport.go index 7fc6ccf5c9..6a0e2beddc 100644 --- a/typedapi/types/nodeinfotransport.go +++ b/typedapi/types/nodeinfotransport.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoTransport type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L358-L362 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L358-L362 type NodeInfoTransport struct { BoundAddress []string `json:"bound_address"` Profiles map[string]string `json:"profiles"` @@ -86,8 +86,10 @@ func (s *NodeInfoTransport) UnmarshalJSON(data []byte) error { // NewNodeInfoTransport returns a NodeInfoTransport. func NewNodeInfoTransport() *NodeInfoTransport { r := &NodeInfoTransport{ - Profiles: make(map[string]string, 0), + Profiles: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/nodeinfoxpack.go b/typedapi/types/nodeinfoxpack.go index c62f921092..7c03530500 100644 --- a/typedapi/types/nodeinfoxpack.go +++ b/typedapi/types/nodeinfoxpack.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // NodeInfoXpack type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L239-L244 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L239-L244 type NodeInfoXpack struct { License *NodeInfoXpackLicense `json:"license,omitempty"` Ml *NodeInfoXpackMl `json:"ml,omitempty"` @@ -37,8 +37,10 @@ type NodeInfoXpack struct { // NewNodeInfoXpack returns a NodeInfoXpack. func NewNodeInfoXpack() *NodeInfoXpack { r := &NodeInfoXpack{ - Notification: make(map[string]json.RawMessage, 0), + Notification: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/nodeinfoxpacklicense.go b/typedapi/types/nodeinfoxpacklicense.go index b46d12680a..1c1c442551 100644 --- a/typedapi/types/nodeinfoxpacklicense.go +++ b/typedapi/types/nodeinfoxpacklicense.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NodeInfoXpackLicense type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L281-L283 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L281-L283 type NodeInfoXpackLicense struct { SelfGenerated NodeInfoXpackLicenseType `json:"self_generated"` } @@ -33,3 +33,5 @@ func NewNodeInfoXpackLicense() *NodeInfoXpackLicense { return r } + +// false diff --git a/typedapi/types/nodeinfoxpacklicensetype.go b/typedapi/types/nodeinfoxpacklicensetype.go index c322539dfe..40d3ccf0d0 100644 --- a/typedapi/types/nodeinfoxpacklicensetype.go +++ b/typedapi/types/nodeinfoxpacklicensetype.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoXpackLicenseType type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L285-L287 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L285-L287 type NodeInfoXpackLicenseType struct { Type string `json:"type"` } @@ -74,3 +74,5 @@ func NewNodeInfoXpackLicenseType() *NodeInfoXpackLicenseType { return r } + +// false diff --git a/typedapi/types/nodeinfoxpackml.go b/typedapi/types/nodeinfoxpackml.go index 16535632a6..2b68da4ec8 100644 --- a/typedapi/types/nodeinfoxpackml.go +++ b/typedapi/types/nodeinfoxpackml.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoXpackMl type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L253-L255 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L253-L255 type NodeInfoXpackMl struct { UseAutoMachineMemoryPercent *bool `json:"use_auto_machine_memory_percent,omitempty"` } @@ -76,3 +76,5 @@ func NewNodeInfoXpackMl() *NodeInfoXpackMl { return r } + +// false diff --git a/typedapi/types/nodeinfoxpacksecurity.go b/typedapi/types/nodeinfoxpacksecurity.go index 708c806b6e..b5f7e1ccc0 100644 --- a/typedapi/types/nodeinfoxpacksecurity.go +++ b/typedapi/types/nodeinfoxpacksecurity.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoXpackSecurity type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L246-L251 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L246-L251 type NodeInfoXpackSecurity struct { Authc *NodeInfoXpackSecurityAuthc `json:"authc,omitempty"` Enabled string `json:"enabled"` @@ -92,3 +92,5 @@ func NewNodeInfoXpackSecurity() *NodeInfoXpackSecurity { return r } + +// false diff --git a/typedapi/types/nodeinfoxpacksecurityauthc.go b/typedapi/types/nodeinfoxpacksecurityauthc.go index 7522bceff2..7cc6b09141 100644 --- a/typedapi/types/nodeinfoxpacksecurityauthc.go +++ b/typedapi/types/nodeinfoxpacksecurityauthc.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NodeInfoXpackSecurityAuthc type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L261-L264 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L261-L264 type NodeInfoXpackSecurityAuthc struct { Realms *NodeInfoXpackSecurityAuthcRealms `json:"realms,omitempty"` Token *NodeInfoXpackSecurityAuthcToken `json:"token,omitempty"` @@ -34,3 +34,5 @@ func NewNodeInfoXpackSecurityAuthc() *NodeInfoXpackSecurityAuthc { return r } + +// false diff --git a/typedapi/types/nodeinfoxpacksecurityauthcrealms.go b/typedapi/types/nodeinfoxpacksecurityauthcrealms.go index 632e326903..029dfa15bf 100644 --- a/typedapi/types/nodeinfoxpacksecurityauthcrealms.go +++ b/typedapi/types/nodeinfoxpacksecurityauthcrealms.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NodeInfoXpackSecurityAuthcRealms type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L266-L270 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L266-L270 type NodeInfoXpackSecurityAuthcRealms struct { File map[string]NodeInfoXpackSecurityAuthcRealmsStatus `json:"file,omitempty"` Native map[string]NodeInfoXpackSecurityAuthcRealmsStatus `json:"native,omitempty"` @@ -32,10 +32,12 @@ type NodeInfoXpackSecurityAuthcRealms struct { // NewNodeInfoXpackSecurityAuthcRealms returns a NodeInfoXpackSecurityAuthcRealms. func NewNodeInfoXpackSecurityAuthcRealms() *NodeInfoXpackSecurityAuthcRealms { r := &NodeInfoXpackSecurityAuthcRealms{ - File: make(map[string]NodeInfoXpackSecurityAuthcRealmsStatus, 0), - Native: make(map[string]NodeInfoXpackSecurityAuthcRealmsStatus, 0), - Pki: make(map[string]NodeInfoXpackSecurityAuthcRealmsStatus, 0), + File: make(map[string]NodeInfoXpackSecurityAuthcRealmsStatus), + Native: make(map[string]NodeInfoXpackSecurityAuthcRealmsStatus), + Pki: make(map[string]NodeInfoXpackSecurityAuthcRealmsStatus), } return r } + +// false diff --git a/typedapi/types/nodeinfoxpacksecurityauthcrealmsstatus.go b/typedapi/types/nodeinfoxpacksecurityauthcrealmsstatus.go index c4710075d3..05c38fb99a 100644 --- a/typedapi/types/nodeinfoxpacksecurityauthcrealmsstatus.go +++ b/typedapi/types/nodeinfoxpacksecurityauthcrealmsstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoXpackSecurityAuthcRealmsStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L276-L279 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L276-L279 type NodeInfoXpackSecurityAuthcRealmsStatus struct { Enabled *string `json:"enabled,omitempty"` Order string `json:"order"` @@ -87,3 +87,5 @@ func NewNodeInfoXpackSecurityAuthcRealmsStatus() *NodeInfoXpackSecurityAuthcReal return r } + +// false diff --git a/typedapi/types/nodeinfoxpacksecurityauthctoken.go b/typedapi/types/nodeinfoxpacksecurityauthctoken.go index 0a5f0bbf26..76d264795d 100644 --- a/typedapi/types/nodeinfoxpacksecurityauthctoken.go +++ b/typedapi/types/nodeinfoxpacksecurityauthctoken.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeInfoXpackSecurityAuthcToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L272-L274 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L272-L274 type NodeInfoXpackSecurityAuthcToken struct { Enabled string `json:"enabled"` } @@ -74,3 +74,5 @@ func NewNodeInfoXpackSecurityAuthcToken() *NodeInfoXpackSecurityAuthcToken { return r } + +// false diff --git a/typedapi/types/nodeinfoxpacksecurityssl.go b/typedapi/types/nodeinfoxpacksecurityssl.go index e8d9ef9291..f3c050e3ad 100644 --- a/typedapi/types/nodeinfoxpacksecurityssl.go +++ b/typedapi/types/nodeinfoxpacksecurityssl.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NodeInfoXpackSecuritySsl type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L257-L259 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L257-L259 type NodeInfoXpackSecuritySsl struct { Ssl map[string]string `json:"ssl"` } @@ -30,8 +30,10 @@ type NodeInfoXpackSecuritySsl struct { // NewNodeInfoXpackSecuritySsl returns a NodeInfoXpackSecuritySsl. func NewNodeInfoXpackSecuritySsl() *NodeInfoXpackSecuritySsl { r := &NodeInfoXpackSecuritySsl{ - Ssl: make(map[string]string, 0), + Ssl: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/nodejvminfo.go b/typedapi/types/nodejvminfo.go index 86d9e0b214..cdf42522e2 100644 --- a/typedapi/types/nodejvminfo.go +++ b/typedapi/types/nodejvminfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeJvmInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L364-L378 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L364-L378 type NodeJvmInfo struct { GcCollectors []string `json:"gc_collectors"` InputArguments []string `json:"input_arguments"` @@ -167,3 +167,5 @@ func NewNodeJvmInfo() *NodeJvmInfo { return r } + +// false diff --git a/typedapi/types/nodeoperatingsysteminfo.go b/typedapi/types/nodeoperatingsysteminfo.go index 258a44c3c7..f32b8d313f 100644 --- a/typedapi/types/nodeoperatingsysteminfo.go +++ b/typedapi/types/nodeoperatingsysteminfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeOperatingSystemInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L380-L397 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L380-L397 type NodeOperatingSystemInfo struct { // AllocatedProcessors The number of processors actually used to calculate thread pool size. This // number can be set with the node.processors setting of a node and defaults to @@ -158,3 +158,5 @@ func NewNodeOperatingSystemInfo() *NodeOperatingSystemInfo { return r } + +// false diff --git a/typedapi/types/nodepackagingtype.go b/typedapi/types/nodepackagingtype.go index f5635f3317..4ac843c2ab 100644 --- a/typedapi/types/nodepackagingtype.go +++ b/typedapi/types/nodepackagingtype.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodePackagingType type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L526-L539 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L526-L539 type NodePackagingType struct { // Count Number of selected nodes using the distribution flavor and file type. Count int `json:"count"` @@ -107,3 +107,5 @@ func NewNodePackagingType() *NodePackagingType { return r } + +// false diff --git a/typedapi/types/nodeprocessinfo.go b/typedapi/types/nodeprocessinfo.go index db9db14465..0616b3b544 100644 --- a/typedapi/types/nodeprocessinfo.go +++ b/typedapi/types/nodeprocessinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeProcessInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L399-L406 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L399-L406 type NodeProcessInfo struct { // Id Process identifier (PID) Id int64 `json:"id"` @@ -101,3 +101,5 @@ func NewNodeProcessInfo() *NodeProcessInfo { return r } + +// false diff --git a/typedapi/types/nodereloadresult.go b/typedapi/types/nodereloadresult.go index 534800f6c4..f94da08837 100644 --- a/typedapi/types/nodereloadresult.go +++ b/typedapi/types/nodereloadresult.go @@ -16,14 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types -// NodeReloadResult holds the union for the following types: -// -// Stats -// NodeReloadError +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// NodeReloadResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/NodeReloadResult.ts#L29-L30 -type NodeReloadResult any +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/NodeReloadResult.ts#L23-L26 +type NodeReloadResult struct { + Name string `json:"name"` + ReloadException *ErrorCause `json:"reload_exception,omitempty"` +} + +func (s *NodeReloadResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "reload_exception": + if err := dec.Decode(&s.ReloadException); err != nil { + return fmt.Errorf("%s | %w", "ReloadException", err) + } + + } + } + return nil +} + +// NewNodeReloadResult returns a NodeReloadResult. +func NewNodeReloadResult() *NodeReloadResult { + r := &NodeReloadResult{} + + return r +} + +// false diff --git a/typedapi/types/nodescontext.go b/typedapi/types/nodescontext.go index 24a888964f..b81a993b47 100644 --- a/typedapi/types/nodescontext.go +++ b/typedapi/types/nodescontext.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodesContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L1068-L1073 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L1068-L1073 type NodesContext struct { CacheEvictions *int64 `json:"cache_evictions,omitempty"` CompilationLimitTriggered *int64 `json:"compilation_limit_triggered,omitempty"` @@ -122,3 +122,5 @@ func NewNodesContext() *NodesContext { return r } + +// false diff --git a/typedapi/types/nodescredentials.go b/typedapi/types/nodescredentials.go index 80e1acb511..df930c2ef1 100644 --- a/typedapi/types/nodescredentials.go +++ b/typedapi/types/nodescredentials.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NodesCredentials type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_service_credentials/types.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_service_credentials/types.ts#L23-L28 type NodesCredentials struct { // FileTokens File-backed tokens collected from all nodes FileTokens map[string]NodesCredentialsFileToken `json:"file_tokens"` @@ -33,8 +33,10 @@ type NodesCredentials struct { // NewNodesCredentials returns a NodesCredentials. func NewNodesCredentials() *NodesCredentials { r := &NodesCredentials{ - FileTokens: make(map[string]NodesCredentialsFileToken, 0), + FileTokens: make(map[string]NodesCredentialsFileToken), } return r } + +// false diff --git a/typedapi/types/nodescredentialsfiletoken.go b/typedapi/types/nodescredentialsfiletoken.go index 17988361c0..8534eefa02 100644 --- a/typedapi/types/nodescredentialsfiletoken.go +++ b/typedapi/types/nodescredentialsfiletoken.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NodesCredentialsFileToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_service_credentials/types.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_service_credentials/types.ts#L30-L32 type NodesCredentialsFileToken struct { Nodes []string `json:"nodes"` } @@ -33,3 +33,5 @@ func NewNodesCredentialsFileToken() *NodesCredentialsFileToken { return r } + +// false diff --git a/typedapi/types/nodeshard.go b/typedapi/types/nodeshard.go index fbb4060d72..f2dad55696 100644 --- a/typedapi/types/nodeshard.go +++ b/typedapi/types/nodeshard.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // NodeShard type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Node.ts#L54-L65 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Node.ts#L54-L65 type NodeShard struct { AllocationId map[string]string `json:"allocation_id,omitempty"` Index string `json:"index"` @@ -146,9 +146,11 @@ func (s *NodeShard) UnmarshalJSON(data []byte) error { // NewNodeShard returns a NodeShard. func NewNodeShard() *NodeShard { r := &NodeShard{ - AllocationId: make(map[string]string, 0), - RecoverySource: make(map[string]string, 0), + AllocationId: make(map[string]string), + RecoverySource: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/nodeshutdownstatus.go b/typedapi/types/nodeshutdownstatus.go index 756fa5822e..b10d69eea9 100644 --- a/typedapi/types/nodeshutdownstatus.go +++ b/typedapi/types/nodeshutdownstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // NodeShutdownStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L29-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L29-L38 type NodeShutdownStatus struct { NodeId string `json:"node_id"` PersistentTasks PersistentTaskStatus `json:"persistent_tasks"` @@ -119,3 +119,5 @@ func NewNodeShutdownStatus() *NodeShutdownStatus { return r } + +// false diff --git a/typedapi/types/nodesindexingpressure.go b/typedapi/types/nodesindexingpressure.go index c70660fd49..3cf2911656 100644 --- a/typedapi/types/nodesindexingpressure.go +++ b/typedapi/types/nodesindexingpressure.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NodesIndexingPressure type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L116-L121 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L116-L121 type NodesIndexingPressure struct { // Memory Contains statistics for memory consumption from indexing load. Memory *NodesIndexingPressureMemory `json:"memory,omitempty"` @@ -34,3 +34,5 @@ func NewNodesIndexingPressure() *NodesIndexingPressure { return r } + +// false diff --git a/typedapi/types/nodesindexingpressurememory.go b/typedapi/types/nodesindexingpressurememory.go index 0515eef7be..ae40093e2f 100644 --- a/typedapi/types/nodesindexingpressurememory.go +++ b/typedapi/types/nodesindexingpressurememory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodesIndexingPressureMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L123-L142 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L123-L142 type NodesIndexingPressureMemory struct { // Current Contains statistics for current indexing load. Current *PressureMemory `json:"current,omitempty"` @@ -101,3 +101,5 @@ func NewNodesIndexingPressureMemory() *NodesIndexingPressureMemory { return r } + +// false diff --git a/typedapi/types/nodesingest.go b/typedapi/types/nodesingest.go index 541bd6f382..519cb244eb 100644 --- a/typedapi/types/nodesingest.go +++ b/typedapi/types/nodesingest.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // NodesIngest type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L345-L354 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L345-L354 type NodesIngest struct { // Pipelines Contains statistics about ingest pipelines for the node. Pipelines map[string]IngestStats `json:"pipelines,omitempty"` @@ -33,8 +33,10 @@ type NodesIngest struct { // NewNodesIngest returns a NodesIngest. func NewNodesIngest() *NodesIngest { r := &NodesIngest{ - Pipelines: make(map[string]IngestStats, 0), + Pipelines: make(map[string]IngestStats), } return r } + +// false diff --git a/typedapi/types/nodesrecord.go b/typedapi/types/nodesrecord.go index 930f250f0a..0dde49176e 100644 --- a/typedapi/types/nodesrecord.go +++ b/typedapi/types/nodesrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/nodes/types.ts#L23-L542 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/nodes/types.ts#L23-L542 type NodesRecord struct { // Build The Elasticsearch build hash. Build *string `json:"build,omitempty"` @@ -1342,3 +1342,5 @@ func NewNodesRecord() *NodesRecord { return r } + +// false diff --git a/typedapi/types/nodestatistics.go b/typedapi/types/nodestatistics.go index 07a8fc7fc7..2cf8cad471 100644 --- a/typedapi/types/nodestatistics.go +++ b/typedapi/types/nodestatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Node.ts#L28-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Node.ts#L28-L39 type NodeStatistics struct { // Failed Number of nodes that rejected the request or failed to respond. If this value // is not 0, a reason for the rejection or failure is included in the response. @@ -122,3 +122,5 @@ func NewNodeStatistics() *NodeStatistics { return r } + +// false diff --git a/typedapi/types/nodetasks.go b/typedapi/types/nodetasks.go index a45576778c..12363ab0bd 100644 --- a/typedapi/types/nodetasks.go +++ b/typedapi/types/nodetasks.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // NodeTasks type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/tasks/_types/TaskListResponseBase.ts#L49-L57 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/tasks/_types/TaskListResponseBase.ts#L49-L57 type NodeTasks struct { Attributes map[string]string `json:"attributes,omitempty"` Host *string `json:"host,omitempty"` @@ -105,9 +105,11 @@ func (s *NodeTasks) UnmarshalJSON(data []byte) error { // NewNodeTasks returns a NodeTasks. func NewNodeTasks() *NodeTasks { r := &NodeTasks{ - Attributes: make(map[string]string, 0), - Tasks: make(map[string]TaskInfo, 0), + Attributes: make(map[string]string), + Tasks: make(map[string]TaskInfo), } return r } + +// false diff --git a/typedapi/types/nodethreadpoolinfo.go b/typedapi/types/nodethreadpoolinfo.go index 5f16d45cf7..d09087aea6 100644 --- a/typedapi/types/nodethreadpoolinfo.go +++ b/typedapi/types/nodethreadpoolinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NodeThreadPoolInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/info/types.ts#L302-L309 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/info/types.ts#L302-L309 type NodeThreadPoolInfo struct { Core *int `json:"core,omitempty"` KeepAlive Duration `json:"keep_alive,omitempty"` @@ -148,3 +148,5 @@ func NewNodeThreadPoolInfo() *NodeThreadPoolInfo { return r } + +// false diff --git a/typedapi/types/nodeusage.go b/typedapi/types/nodeusage.go index 325c711f5d..7a016d5ef3 100644 --- a/typedapi/types/nodeusage.go +++ b/typedapi/types/nodeusage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // NodeUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/usage/types.ts#L25-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/usage/types.ts#L25-L30 type NodeUsage struct { Aggregations map[string]json.RawMessage `json:"aggregations"` RestActions map[string]int `json:"rest_actions"` @@ -87,9 +87,11 @@ func (s *NodeUsage) UnmarshalJSON(data []byte) error { // NewNodeUsage returns a NodeUsage. func NewNodeUsage() *NodeUsage { r := &NodeUsage{ - Aggregations: make(map[string]json.RawMessage, 0), - RestActions: make(map[string]int, 0), + Aggregations: make(map[string]json.RawMessage), + RestActions: make(map[string]int), } return r } + +// false diff --git a/typedapi/types/norianalyzer.go b/typedapi/types/norianalyzer.go index 5b62e92699..4f8ea4f448 100644 --- a/typedapi/types/norianalyzer.go +++ b/typedapi/types/norianalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // NoriAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L312-L318 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L323-L330 type NoriAnalyzer struct { DecompoundMode *noridecompoundmode.NoriDecompoundMode `json:"decompound_mode,omitempty"` Stoptags []string `json:"stoptags,omitempty"` @@ -116,3 +116,13 @@ func NewNoriAnalyzer() *NoriAnalyzer { return r } + +// true + +type NoriAnalyzerVariant interface { + NoriAnalyzerCaster() *NoriAnalyzer +} + +func (s *NoriAnalyzer) NoriAnalyzerCaster() *NoriAnalyzer { + return s +} diff --git a/typedapi/types/noripartofspeechtokenfilter.go b/typedapi/types/noripartofspeechtokenfilter.go index cebd70a76a..7f4d67a439 100644 --- a/typedapi/types/noripartofspeechtokenfilter.go +++ b/typedapi/types/noripartofspeechtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // NoriPartOfSpeechTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L274-L277 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L274-L277 type NoriPartOfSpeechTokenFilter struct { Stoptags []string `json:"stoptags,omitempty"` Type string `json:"type,omitempty"` @@ -92,3 +92,13 @@ func NewNoriPartOfSpeechTokenFilter() *NoriPartOfSpeechTokenFilter { return r } + +// true + +type NoriPartOfSpeechTokenFilterVariant interface { + NoriPartOfSpeechTokenFilterCaster() *NoriPartOfSpeechTokenFilter +} + +func (s *NoriPartOfSpeechTokenFilter) NoriPartOfSpeechTokenFilterCaster() *NoriPartOfSpeechTokenFilter { + return s +} diff --git a/typedapi/types/noritokenizer.go b/typedapi/types/noritokenizer.go index 2c2f49d318..1fc7c5e230 100644 --- a/typedapi/types/noritokenizer.go +++ b/typedapi/types/noritokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // NoriTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/nori-plugin.ts#L28-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/nori-plugin.ts#L28-L34 type NoriTokenizer struct { DecompoundMode *noridecompoundmode.NoriDecompoundMode `json:"decompound_mode,omitempty"` DiscardPunctuation *bool `json:"discard_punctuation,omitempty"` @@ -132,3 +132,13 @@ func NewNoriTokenizer() *NoriTokenizer { return r } + +// true + +type NoriTokenizerVariant interface { + NoriTokenizerCaster() *NoriTokenizer +} + +func (s *NoriTokenizer) NoriTokenizerCaster() *NoriTokenizer { + return s +} diff --git a/typedapi/types/normalizeaggregation.go b/typedapi/types/normalizeaggregation.go index a09d5646e1..59d3b47c31 100644 --- a/typedapi/types/normalizeaggregation.go +++ b/typedapi/types/normalizeaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // NormalizeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L351-L359 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L351-L359 type NormalizeAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -101,3 +101,13 @@ func NewNormalizeAggregation() *NormalizeAggregation { return r } + +// true + +type NormalizeAggregationVariant interface { + NormalizeAggregationCaster() *NormalizeAggregation +} + +func (s *NormalizeAggregation) NormalizeAggregationCaster() *NormalizeAggregation { + return s +} diff --git a/typedapi/types/normalizer.go b/typedapi/types/normalizer.go index 0c2979acde..6f0ef6eb0f 100644 --- a/typedapi/types/normalizer.go +++ b/typedapi/types/normalizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // LowercaseNormalizer // CustomNormalizer // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/normalizers.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/normalizers.ts#L20-L24 type Normalizer any + +type NormalizerVariant interface { + NormalizerCaster() *Normalizer +} diff --git a/typedapi/types/norwegiananalyzer.go b/typedapi/types/norwegiananalyzer.go index 27f4521f92..1528f2a3c6 100644 --- a/typedapi/types/norwegiananalyzer.go +++ b/typedapi/types/norwegiananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // NorwegianAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L237-L242 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L248-L253 type NorwegianAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewNorwegianAnalyzer() *NorwegianAnalyzer { return r } + +// true + +type NorwegianAnalyzerVariant interface { + NorwegianAnalyzerCaster() *NorwegianAnalyzer +} + +func (s *NorwegianAnalyzer) NorwegianAnalyzerCaster() *NorwegianAnalyzer { + return s +} diff --git a/typedapi/types/numberrangequery.go b/typedapi/types/numberrangequery.go index 29184a3a99..c7bf1aa06f 100644 --- a/typedapi/types/numberrangequery.go +++ b/typedapi/types/numberrangequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // NumberRangeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L172-L172 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L172-L172 type NumberRangeQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -189,3 +189,13 @@ func NewNumberRangeQuery() *NumberRangeQuery { return r } + +// true + +type NumberRangeQueryVariant interface { + NumberRangeQueryCaster() *NumberRangeQuery +} + +func (s *NumberRangeQuery) NumberRangeQueryCaster() *NumberRangeQuery { + return s +} diff --git a/typedapi/types/numericdecayfunction.go b/typedapi/types/numericdecayfunction.go index e3a9eda667..add1d77195 100644 --- a/typedapi/types/numericdecayfunction.go +++ b/typedapi/types/numericdecayfunction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,7 +29,7 @@ import ( // NumericDecayFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L208-L208 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L208-L208 type NumericDecayFunction struct { DecayFunctionBasedoubledouble map[string]DecayPlacementdoubledouble `json:"-"` // MultiValueMode Determines how the distance is calculated when a field used for computing the @@ -69,8 +69,18 @@ func (s NumericDecayFunction) MarshalJSON() ([]byte, error) { // NewNumericDecayFunction returns a NumericDecayFunction. func NewNumericDecayFunction() *NumericDecayFunction { r := &NumericDecayFunction{ - DecayFunctionBasedoubledouble: make(map[string]DecayPlacementdoubledouble, 0), + DecayFunctionBasedoubledouble: make(map[string]DecayPlacementdoubledouble), } return r } + +// true + +type NumericDecayFunctionVariant interface { + NumericDecayFunctionCaster() *NumericDecayFunction +} + +func (s *NumericDecayFunction) NumericDecayFunctionCaster() *NumericDecayFunction { + return s +} diff --git a/typedapi/types/numericfielddata.go b/typedapi/types/numericfielddata.go index 991e538fcb..1efcbf8284 100644 --- a/typedapi/types/numericfielddata.go +++ b/typedapi/types/numericfielddata.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // NumericFielddata type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/NumericFielddata.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/NumericFielddata.ts#L22-L24 type NumericFielddata struct { Format numericfielddataformat.NumericFielddataFormat `json:"format"` } @@ -37,3 +37,13 @@ func NewNumericFielddata() *NumericFielddata { return r } + +// true + +type NumericFielddataVariant interface { + NumericFielddataCaster() *NumericFielddata +} + +func (s *NumericFielddata) NumericFielddataCaster() *NumericFielddata { + return s +} diff --git a/typedapi/types/objectproperty.go b/typedapi/types/objectproperty.go index 54256e4d82..5ef0ae97e7 100644 --- a/typedapi/types/objectproperty.go +++ b/typedapi/types/objectproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,13 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/subobjects" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // ObjectProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/complex.ts#L45-L49 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/complex.ts#L46-L50 type ObjectProperty struct { CopyTo []string `json:"copy_to,omitempty"` Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` @@ -41,11 +43,12 @@ type ObjectProperty struct { Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Subobjects *bool `json:"subobjects,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + Subobjects *subobjects.Subobjects `json:"subobjects,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *ObjectProperty) UnmarshalJSON(data []byte) error { @@ -117,301 +120,313 @@ func (s *ObjectProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -460,301 +475,313 @@ func (s *ObjectProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -775,17 +802,13 @@ func (s *ObjectProperty) UnmarshalJSON(data []byte) error { } case "subobjects": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "Subobjects", err) - } - s.Subobjects = &value - case bool: - s.Subobjects = &v + if err := dec.Decode(&s.Subobjects); err != nil { + return fmt.Errorf("%s | %w", "Subobjects", err) + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) } case "type": @@ -802,16 +825,17 @@ func (s *ObjectProperty) UnmarshalJSON(data []byte) error { func (s ObjectProperty) MarshalJSON() ([]byte, error) { type innerObjectProperty ObjectProperty tmp := innerObjectProperty{ - CopyTo: s.CopyTo, - Dynamic: s.Dynamic, - Enabled: s.Enabled, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Properties: s.Properties, - Store: s.Store, - Subobjects: s.Subobjects, - Type: s.Type, + CopyTo: s.CopyTo, + Dynamic: s.Dynamic, + Enabled: s.Enabled, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + Subobjects: s.Subobjects, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "object" @@ -822,10 +846,20 @@ func (s ObjectProperty) MarshalJSON() ([]byte, error) { // NewObjectProperty returns a ObjectProperty. func NewObjectProperty() *ObjectProperty { r := &ObjectProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type ObjectPropertyVariant interface { + ObjectPropertyCaster() *ObjectProperty +} + +func (s *ObjectProperty) ObjectPropertyCaster() *ObjectProperty { + return s +} diff --git a/typedapi/types/onehotencodingpreprocessor.go b/typedapi/types/onehotencodingpreprocessor.go index 290e8809ff..b98a0058a6 100644 --- a/typedapi/types/onehotencodingpreprocessor.go +++ b/typedapi/types/onehotencodingpreprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // OneHotEncodingPreprocessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model/types.ts#L44-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model/types.ts#L44-L47 type OneHotEncodingPreprocessor struct { Field string `json:"field"` HotMap map[string]string `json:"hot_map"` @@ -80,8 +80,18 @@ func (s *OneHotEncodingPreprocessor) UnmarshalJSON(data []byte) error { // NewOneHotEncodingPreprocessor returns a OneHotEncodingPreprocessor. func NewOneHotEncodingPreprocessor() *OneHotEncodingPreprocessor { r := &OneHotEncodingPreprocessor{ - HotMap: make(map[string]string, 0), + HotMap: make(map[string]string), } return r } + +// true + +type OneHotEncodingPreprocessorVariant interface { + OneHotEncodingPreprocessorCaster() *OneHotEncodingPreprocessor +} + +func (s *OneHotEncodingPreprocessor) OneHotEncodingPreprocessorCaster() *OneHotEncodingPreprocessor { + return s +} diff --git a/typedapi/types/openaiservicesettings.go b/typedapi/types/openaiservicesettings.go new file mode 100644 index 0000000000..6acf52fcd5 --- /dev/null +++ b/typedapi/types/openaiservicesettings.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// OpenAIServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/put_openai/PutOpenAiRequest.ts#L94-L136 +type OpenAIServiceSettings struct { + // ApiKey A valid API key of your OpenAI account. + // You can find your OpenAI API keys in your OpenAI account under the API keys + // section. + // + // IMPORTANT: You need to provide the API key only once, during the inference + // model creation. + // The get inference endpoint API does not retrieve your API key. + // After creating the inference model, you cannot change the associated API key. + // If you want to use a different API key, delete the inference model and + // recreate it with the same name and the updated API key. + ApiKey string `json:"api_key"` + // Dimensions The number of dimensions the resulting output embeddings should have. + // It is supported only in `text-embedding-3` and later models. + // If it is not set, the OpenAI defined default for the model is used. + Dimensions *int `json:"dimensions,omitempty"` + // ModelId The name of the model to use for the inference task. + // Refer to the OpenAI documentation for the list of available text embedding + // models. + ModelId string `json:"model_id"` + // OrganizationId The unique identifier for your organization. + // You can find the Organization ID in your OpenAI account under *Settings > + // Organizations*. + OrganizationId *string `json:"organization_id,omitempty"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // OpenAI. + // The `openai` service sets a default number of requests allowed per minute + // depending on the task type. + // For `text_embedding`, it is set to `3000`. + // For `completion`, it is set to `500`. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` + // Url The URL endpoint to use for the requests. + // It can be changed for testing purposes. + Url *string `json:"url,omitempty"` +} + +func (s *OpenAIServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKey = o + + case "dimensions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Dimensions", err) + } + s.Dimensions = &value + case float64: + f := int(v) + s.Dimensions = &f + } + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "organization_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "OrganizationId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OrganizationId = &o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Url", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = &o + + } + } + return nil +} + +// NewOpenAIServiceSettings returns a OpenAIServiceSettings. +func NewOpenAIServiceSettings() *OpenAIServiceSettings { + r := &OpenAIServiceSettings{} + + return r +} + +// true + +type OpenAIServiceSettingsVariant interface { + OpenAIServiceSettingsCaster() *OpenAIServiceSettings +} + +func (s *OpenAIServiceSettings) OpenAIServiceSettingsCaster() *OpenAIServiceSettings { + return s +} diff --git a/typedapi/types/openaitasksettings.go b/typedapi/types/openaitasksettings.go new file mode 100644 index 0000000000..8f50504c27 --- /dev/null +++ b/typedapi/types/openaitasksettings.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// OpenAITaskSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/put_openai/PutOpenAiRequest.ts#L138-L144 +type OpenAITaskSettings struct { + // User For a `completion` or `text_embedding` task, specify the user issuing the + // request. + // This informaiton can be used for abuse detection. + User *string `json:"user,omitempty"` +} + +func (s *OpenAITaskSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "user": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "User", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.User = &o + + } + } + return nil +} + +// NewOpenAITaskSettings returns a OpenAITaskSettings. +func NewOpenAITaskSettings() *OpenAITaskSettings { + r := &OpenAITaskSettings{} + + return r +} + +// true + +type OpenAITaskSettingsVariant interface { + OpenAITaskSettingsCaster() *OpenAITaskSettings +} + +func (s *OpenAITaskSettings) OpenAITaskSettingsCaster() *OpenAITaskSettings { + return s +} diff --git a/typedapi/types/operatingsystem.go b/typedapi/types/operatingsystem.go index 332e92c740..d3615a4e86 100644 --- a/typedapi/types/operatingsystem.go +++ b/typedapi/types/operatingsystem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // OperatingSystem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L1016-L1022 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L1016-L1022 type OperatingSystem struct { Cgroup *Cgroup `json:"cgroup,omitempty"` Cpu *Cpu `json:"cpu,omitempty"` @@ -101,3 +101,5 @@ func NewOperatingSystem() *OperatingSystem { return r } + +// false diff --git a/typedapi/types/operatingsystemmemoryinfo.go b/typedapi/types/operatingsystemmemoryinfo.go index 47ce153f4f..09a2602459 100644 --- a/typedapi/types/operatingsystemmemoryinfo.go +++ b/typedapi/types/operatingsystemmemoryinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // OperatingSystemMemoryInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/stats/types.ts#L541-L568 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/stats/types.ts#L541-L568 type OperatingSystemMemoryInfo struct { // AdjustedTotalInBytes Total amount, in bytes, of memory across all selected nodes, but using the // value specified using the `es.total_memory_bytes` system property instead of @@ -167,3 +167,5 @@ func NewOperatingSystemMemoryInfo() *OperatingSystemMemoryInfo { return r } + +// false diff --git a/typedapi/types/operationcontainer.go b/typedapi/types/operationcontainer.go index df4a199c60..f10c781240 100644 --- a/typedapi/types/operationcontainer.go +++ b/typedapi/types/operationcontainer.go @@ -16,31 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // OperationContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/bulk/types.ts#L145-L167 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/bulk/types.ts#L158-L180 type OperationContainer struct { - // Create Indexes the specified document if it does not already exist. + AdditionalOperationContainerProperty map[string]json.RawMessage `json:"-"` + // Create Index the specified document if it does not already exist. // The following line must contain the source data to be indexed. Create *CreateOperation `json:"create,omitempty"` - // Delete Removes the specified document from the index. + // Delete Remove the specified document from the index. Delete *DeleteOperation `json:"delete,omitempty"` - // Index Indexes the specified document. - // If the document exists, replaces the document and increments the version. + // Index Index the specified document. + // If the document exists, it replaces the document and increments the version. // The following line must contain the source data to be indexed. Index *IndexOperation `json:"index,omitempty"` - // Update Performs a partial document update. + // Update Perform a partial document update. // The following line must contain the partial document and update options. Update *UpdateOperation `json:"update,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s OperationContainer) MarshalJSON() ([]byte, error) { + type opt OperationContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalOperationContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalOperationContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewOperationContainer returns a OperationContainer. func NewOperationContainer() *OperationContainer { - r := &OperationContainer{} + r := &OperationContainer{ + AdditionalOperationContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type OperationContainerVariant interface { + OperationContainerCaster() *OperationContainer +} + +func (s *OperationContainer) OperationContainerCaster() *OperationContainer { + return s +} diff --git a/typedapi/types/outlierdetectionparameters.go b/typedapi/types/outlierdetectionparameters.go index 33beae6913..23586bd3f1 100644 --- a/typedapi/types/outlierdetectionparameters.go +++ b/typedapi/types/outlierdetectionparameters.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // OutlierDetectionParameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L526-L560 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L528-L562 type OutlierDetectionParameters struct { // ComputeFeatureInfluence Specifies whether the feature influence calculation is enabled. ComputeFeatureInfluence *bool `json:"compute_feature_influence,omitempty"` @@ -178,3 +178,5 @@ func NewOutlierDetectionParameters() *OutlierDetectionParameters { return r } + +// false diff --git a/typedapi/types/overallbucket.go b/typedapi/types/overallbucket.go index 1bac27c184..3c61e57750 100644 --- a/typedapi/types/overallbucket.go +++ b/typedapi/types/overallbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // OverallBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Bucket.ts#L129-L144 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Bucket.ts#L129-L144 type OverallBucket struct { // BucketSpan The length of the bucket in seconds. Matches the job with the longest // bucket_span value. @@ -48,7 +48,7 @@ type OverallBucket struct { // Timestamp The start time of the bucket for which these results were calculated. Timestamp int64 `json:"timestamp"` // TimestampString The start time of the bucket for which these results were calculated. - TimestampString DateTime `json:"timestamp_string"` + TimestampString DateTime `json:"timestamp_string,omitempty"` } func (s *OverallBucket) UnmarshalJSON(data []byte) error { @@ -139,3 +139,5 @@ func NewOverallBucket() *OverallBucket { return r } + +// false diff --git a/typedapi/types/overallbucketjob.go b/typedapi/types/overallbucketjob.go index c7260c5880..3eac9f29a5 100644 --- a/typedapi/types/overallbucketjob.go +++ b/typedapi/types/overallbucketjob.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // OverallBucketJob type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Bucket.ts#L145-L148 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Bucket.ts#L145-L148 type OverallBucketJob struct { JobId string `json:"job_id"` MaxAnomalyScore Float64 `json:"max_anomaly_score"` @@ -84,3 +84,5 @@ func NewOverallBucketJob() *OverallBucketJob { return r } + +// false diff --git a/typedapi/types/overlapping.go b/typedapi/types/overlapping.go index db1f2795af..d4b996e7ec 100644 --- a/typedapi/types/overlapping.go +++ b/typedapi/types/overlapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // Overlapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L39-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L39-L42 type Overlapping struct { IndexPatterns []string `json:"index_patterns"` Name string `json:"name"` @@ -72,3 +72,5 @@ func NewOverlapping() *Overlapping { return r } + +// false diff --git a/typedapi/types/page.go b/typedapi/types/page.go index c28c629bdd..5c2db7bf4d 100644 --- a/typedapi/types/page.go +++ b/typedapi/types/page.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Page type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Page.ts#L22-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Page.ts#L22-L33 type Page struct { // From Skips the specified number of items. From *int `json:"from,omitempty"` @@ -97,3 +97,13 @@ func NewPage() *Page { return r } + +// true + +type PageVariant interface { + PageCaster() *Page +} + +func (s *Page) PageCaster() *Page { + return s +} diff --git a/typedapi/types/pagerdutyaction.go b/typedapi/types/pagerdutyaction.go index 3d026426a1..8f5a5888d8 100644 --- a/typedapi/types/pagerdutyaction.go +++ b/typedapi/types/pagerdutyaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // PagerDutyAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L54-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L54-L54 type PagerDutyAction struct { Account *string `json:"account,omitempty"` AttachPayload bool `json:"attach_payload"` @@ -161,3 +161,13 @@ func NewPagerDutyAction() *PagerDutyAction { return r } + +// true + +type PagerDutyActionVariant interface { + PagerDutyActionCaster() *PagerDutyAction +} + +func (s *PagerDutyAction) PagerDutyActionCaster() *PagerDutyAction { + return s +} diff --git a/typedapi/types/pagerdutycontext.go b/typedapi/types/pagerdutycontext.go index 03035a0a45..d27ccddf61 100644 --- a/typedapi/types/pagerdutycontext.go +++ b/typedapi/types/pagerdutycontext.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // PagerDutyContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L61-L65 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L61-L65 type PagerDutyContext struct { Href *string `json:"href,omitempty"` Src *string `json:"src,omitempty"` @@ -95,3 +95,13 @@ func NewPagerDutyContext() *PagerDutyContext { return r } + +// true + +type PagerDutyContextVariant interface { + PagerDutyContextCaster() *PagerDutyContext +} + +func (s *PagerDutyContext) PagerDutyContextCaster() *PagerDutyContext { + return s +} diff --git a/typedapi/types/pagerdutyevent.go b/typedapi/types/pagerdutyevent.go index 7f30b73aa8..9c3702c851 100644 --- a/typedapi/types/pagerdutyevent.go +++ b/typedapi/types/pagerdutyevent.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // PagerDutyEvent type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L40-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L40-L52 type PagerDutyEvent struct { Account *string `json:"account,omitempty"` AttachPayload bool `json:"attach_payload"` @@ -161,3 +161,5 @@ func NewPagerDutyEvent() *PagerDutyEvent { return r } + +// false diff --git a/typedapi/types/pagerdutyeventproxy.go b/typedapi/types/pagerdutyeventproxy.go index a9b333fc15..3fb418251e 100644 --- a/typedapi/types/pagerdutyeventproxy.go +++ b/typedapi/types/pagerdutyeventproxy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PagerDutyEventProxy type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L56-L59 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L56-L59 type PagerDutyEventProxy struct { Host *string `json:"host,omitempty"` Port *int `json:"port,omitempty"` @@ -84,3 +84,13 @@ func NewPagerDutyEventProxy() *PagerDutyEventProxy { return r } + +// true + +type PagerDutyEventProxyVariant interface { + PagerDutyEventProxyCaster() *PagerDutyEventProxy +} + +func (s *PagerDutyEventProxy) PagerDutyEventProxyCaster() *PagerDutyEventProxy { + return s +} diff --git a/typedapi/types/pagerdutyresult.go b/typedapi/types/pagerdutyresult.go index 606019fefc..9ce6e94066 100644 --- a/typedapi/types/pagerdutyresult.go +++ b/typedapi/types/pagerdutyresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PagerDutyResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L78-L83 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L78-L83 type PagerDutyResult struct { Event PagerDutyEvent `json:"event"` Reason *string `json:"reason,omitempty"` @@ -92,3 +92,5 @@ func NewPagerDutyResult() *PagerDutyResult { return r } + +// false diff --git a/typedapi/types/painlesscontextsetup.go b/typedapi/types/painlesscontextsetup.go index 2f3ae4c9dc..aab2a97599 100644 --- a/typedapi/types/painlesscontextsetup.go +++ b/typedapi/types/painlesscontextsetup.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,13 +30,22 @@ import ( // PainlessContextSetup type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/scripts_painless_execute/types.ts#L25-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/scripts_painless_execute/types.ts#L27-L46 type PainlessContextSetup struct { - // Document Document that’s temporarily indexed in-memory and accessible from the script. + // Document Document that's temporarily indexed in-memory and accessible from the script. Document json.RawMessage `json:"document,omitempty"` - // Index Index containing a mapping that’s compatible with the indexed document. + // Index Index containing a mapping that's compatible with the indexed document. // You may specify a remote index by prefixing the index with the remote cluster // alias. + // For example, `remote1:my_index` indicates that you want to run the painless + // script against the "my_index" index on the "remote1" cluster. + // This request will be forwarded to the "remote1" cluster if you have + // configured a connection to that remote cluster. + // + // NOTE: Wildcards are not accepted in the index expression for this endpoint. + // The expression `*:myindex` will return the error "No such remote cluster" and + // the expression `logs*` or `remote1:logs*` will return the error "index not + // found". Index string `json:"index"` // Query Use this parameter to specify a query for computing a score. Query *Query `json:"query,omitempty"` @@ -83,3 +92,13 @@ func NewPainlessContextSetup() *PainlessContextSetup { return r } + +// true + +type PainlessContextSetupVariant interface { + PainlessContextSetupCaster() *PainlessContextSetup +} + +func (s *PainlessContextSetup) PainlessContextSetupCaster() *PainlessContextSetup { + return s +} diff --git a/typedapi/types/parentaggregate.go b/typedapi/types/parentaggregate.go index 1a6a62ce1b..0d487ae474 100644 --- a/typedapi/types/parentaggregate.go +++ b/typedapi/types/parentaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // ParentAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L894-L898 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L894-L898 type ParentAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -631,8 +631,10 @@ func (s ParentAggregate) MarshalJSON() ([]byte, error) { // NewParentAggregate returns a ParentAggregate. func NewParentAggregate() *ParentAggregate { r := &ParentAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/parentaggregation.go b/typedapi/types/parentaggregation.go index b1a3122a59..f525812c43 100644 --- a/typedapi/types/parentaggregation.go +++ b/typedapi/types/parentaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ParentAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L662-L667 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L662-L667 type ParentAggregation struct { // Type The child type that should be selected. Type *string `json:"type,omitempty"` @@ -67,3 +67,13 @@ func NewParentAggregation() *ParentAggregation { return r } + +// true + +type ParentAggregationVariant interface { + ParentAggregationCaster() *ParentAggregation +} + +func (s *ParentAggregation) ParentAggregationCaster() *ParentAggregation { + return s +} diff --git a/typedapi/types/parentidquery.go b/typedapi/types/parentidquery.go index ae762b002a..be6c012c9d 100644 --- a/typedapi/types/parentidquery.go +++ b/typedapi/types/parentidquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ParentIdQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/joining.ts#L141-L158 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/joining.ts#L141-L158 type ParentIdQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -127,3 +127,13 @@ func NewParentIdQuery() *ParentIdQuery { return r } + +// true + +type ParentIdQueryVariant interface { + ParentIdQueryCaster() *ParentIdQuery +} + +func (s *ParentIdQuery) ParentIdQueryCaster() *ParentIdQuery { + return s +} diff --git a/typedapi/types/parenttaskinfo.go b/typedapi/types/parenttaskinfo.go index b4a51cae8c..016586c12a 100644 --- a/typedapi/types/parenttaskinfo.go +++ b/typedapi/types/parenttaskinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,12 +31,22 @@ import ( // ParentTaskInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/tasks/_types/TaskListResponseBase.ts#L45-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/tasks/_types/TaskListResponseBase.ts#L45-L47 type ParentTaskInfo struct { - Action string `json:"action"` - Cancellable bool `json:"cancellable"` - Cancelled *bool `json:"cancelled,omitempty"` - Children []TaskInfo `json:"children,omitempty"` + Action string `json:"action"` + Cancellable bool `json:"cancellable"` + Cancelled *bool `json:"cancelled,omitempty"` + Children []TaskInfo `json:"children,omitempty"` + // Description Human readable text that identifies the particular request that the task is + // performing. + // For example, it might identify the search request being performed by a search + // task. + // Other kinds of tasks have different descriptions, like `_reindex` which has + // the source and the destination, or `_bulk` which just has the number of + // requests and the destination indices. + // Many requests will have only an empty description because more detailed + // information about the request is not easily available or particularly helpful + // in identifying the request. Description *string `json:"description,omitempty"` Headers map[string]string `json:"headers"` Id int64 `json:"id"` @@ -45,7 +55,13 @@ type ParentTaskInfo struct { RunningTime Duration `json:"running_time,omitempty"` RunningTimeInNanos int64 `json:"running_time_in_nanos"` StartTimeInMillis int64 `json:"start_time_in_millis"` - // Status Task status information can vary wildly from task to task. + // Status The internal status of the task, which varies from task to task. + // The format also varies. + // While the goal is to keep the status for a particular task consistent from + // version to version, this is not always possible because sometimes the + // implementation changes. + // Fields might be removed from the status for a particular request so any + // parsing you do of the status might break in minor releases. Status json.RawMessage `json:"status,omitempty"` Type string `json:"type"` } @@ -195,8 +211,10 @@ func (s *ParentTaskInfo) UnmarshalJSON(data []byte) error { // NewParentTaskInfo returns a ParentTaskInfo. func NewParentTaskInfo() *ParentTaskInfo { r := &ParentTaskInfo{ - Headers: make(map[string]string, 0), + Headers: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/passthroughinferenceoptions.go b/typedapi/types/passthroughinferenceoptions.go index aefcb1287c..1b17f444c7 100644 --- a/typedapi/types/passthroughinferenceoptions.go +++ b/typedapi/types/passthroughinferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PassThroughInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L224-L231 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L208-L215 type PassThroughInferenceOptions struct { // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. @@ -89,3 +89,13 @@ func NewPassThroughInferenceOptions() *PassThroughInferenceOptions { return r } + +// true + +type PassThroughInferenceOptionsVariant interface { + PassThroughInferenceOptionsCaster() *PassThroughInferenceOptions +} + +func (s *PassThroughInferenceOptions) PassThroughInferenceOptionsCaster() *PassThroughInferenceOptions { + return s +} diff --git a/typedapi/types/passthroughinferenceupdateoptions.go b/typedapi/types/passthroughinferenceupdateoptions.go index fc4c6fad58..0737b813b6 100644 --- a/typedapi/types/passthroughinferenceupdateoptions.go +++ b/typedapi/types/passthroughinferenceupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PassThroughInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L385-L390 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L373-L378 type PassThroughInferenceUpdateOptions struct { // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. @@ -83,3 +83,13 @@ func NewPassThroughInferenceUpdateOptions() *PassThroughInferenceUpdateOptions { return r } + +// true + +type PassThroughInferenceUpdateOptionsVariant interface { + PassThroughInferenceUpdateOptionsCaster() *PassThroughInferenceUpdateOptions +} + +func (s *PassThroughInferenceUpdateOptions) PassThroughInferenceUpdateOptionsCaster() *PassThroughInferenceUpdateOptions { + return s +} diff --git a/typedapi/types/passthroughobjectproperty.go b/typedapi/types/passthroughobjectproperty.go new file mode 100644 index 0000000000..fafd32f57b --- /dev/null +++ b/typedapi/types/passthroughobjectproperty.go @@ -0,0 +1,891 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +// PassthroughObjectProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/complex.ts#L52-L57 +type PassthroughObjectProperty struct { + CopyTo []string `json:"copy_to,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Priority *int `json:"priority,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *PassthroughObjectProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "priority": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Priority", err) + } + s.Priority = &value + case float64: + f := int(v) + s.Priority = &f + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_dimension": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimension", err) + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PassthroughObjectProperty) MarshalJSON() ([]byte, error) { + type innerPassthroughObjectProperty PassthroughObjectProperty + tmp := innerPassthroughObjectProperty{ + CopyTo: s.CopyTo, + Dynamic: s.Dynamic, + Enabled: s.Enabled, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Priority: s.Priority, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesDimension: s.TimeSeriesDimension, + Type: s.Type, + } + + tmp.Type = "passthrough" + + return json.Marshal(tmp) +} + +// NewPassthroughObjectProperty returns a PassthroughObjectProperty. +func NewPassthroughObjectProperty() *PassthroughObjectProperty { + r := &PassthroughObjectProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +// true + +type PassthroughObjectPropertyVariant interface { + PassthroughObjectPropertyCaster() *PassthroughObjectProperty +} + +func (s *PassthroughObjectProperty) PassthroughObjectPropertyCaster() *PassthroughObjectProperty { + return s +} diff --git a/typedapi/types/pathhierarchytokenizer.go b/typedapi/types/pathhierarchytokenizer.go index f6df339380..3dea5fc3d5 100644 --- a/typedapi/types/pathhierarchytokenizer.go +++ b/typedapi/types/pathhierarchytokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PathHierarchyTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L95-L102 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L95-L102 type PathHierarchyTokenizer struct { BufferSize Stringifiedinteger `json:"buffer_size,omitempty"` Delimiter *string `json:"delimiter,omitempty"` @@ -135,3 +135,13 @@ func NewPathHierarchyTokenizer() *PathHierarchyTokenizer { return r } + +// true + +type PathHierarchyTokenizerVariant interface { + PathHierarchyTokenizerCaster() *PathHierarchyTokenizer +} + +func (s *PathHierarchyTokenizer) PathHierarchyTokenizerCaster() *PathHierarchyTokenizer { + return s +} diff --git a/typedapi/types/patternanalyzer.go b/typedapi/types/patternanalyzer.go index ae2e84f6e7..946c9230ee 100644 --- a/typedapi/types/patternanalyzer.go +++ b/typedapi/types/patternanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,14 +31,25 @@ import ( // PatternAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L320-L327 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L332-L365 type PatternAnalyzer struct { - Flags *string `json:"flags,omitempty"` - Lowercase *bool `json:"lowercase,omitempty"` - Pattern string `json:"pattern"` + // Flags Java regular expression flags. Flags should be pipe-separated, eg + // "CASE_INSENSITIVE|COMMENTS". + Flags *string `json:"flags,omitempty"` + // Lowercase Should terms be lowercased or not. + // Defaults to `true`. + Lowercase *bool `json:"lowercase,omitempty"` + // Pattern A Java regular expression. + // Defaults to `\W+`. + Pattern *string `json:"pattern,omitempty"` + // Stopwords A pre-defined stop words list like `_english_` or an array containing a list + // of stop words. + // Defaults to `_none_`. Stopwords []string `json:"stopwords,omitempty"` - Type string `json:"type,omitempty"` - Version *string `json:"version,omitempty"` + // StopwordsPath The path to a file containing stop words. + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` } func (s *PatternAnalyzer) UnmarshalJSON(data []byte) error { @@ -92,7 +103,7 @@ func (s *PatternAnalyzer) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Pattern = o + s.Pattern = &o case "stopwords": rawMsg := json.RawMessage{} @@ -110,6 +121,18 @@ func (s *PatternAnalyzer) UnmarshalJSON(data []byte) error { } } + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -129,12 +152,13 @@ func (s *PatternAnalyzer) UnmarshalJSON(data []byte) error { func (s PatternAnalyzer) MarshalJSON() ([]byte, error) { type innerPatternAnalyzer PatternAnalyzer tmp := innerPatternAnalyzer{ - Flags: s.Flags, - Lowercase: s.Lowercase, - Pattern: s.Pattern, - Stopwords: s.Stopwords, - Type: s.Type, - Version: s.Version, + Flags: s.Flags, + Lowercase: s.Lowercase, + Pattern: s.Pattern, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + Version: s.Version, } tmp.Type = "pattern" @@ -148,3 +172,13 @@ func NewPatternAnalyzer() *PatternAnalyzer { return r } + +// true + +type PatternAnalyzerVariant interface { + PatternAnalyzerCaster() *PatternAnalyzer +} + +func (s *PatternAnalyzer) PatternAnalyzerCaster() *PatternAnalyzer { + return s +} diff --git a/typedapi/types/patterncapturetokenfilter.go b/typedapi/types/patterncapturetokenfilter.go index f8101da327..04f45bff5a 100644 --- a/typedapi/types/patterncapturetokenfilter.go +++ b/typedapi/types/patterncapturetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // PatternCaptureTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L279-L283 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L279-L283 type PatternCaptureTokenFilter struct { Patterns []string `json:"patterns"` PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` @@ -99,3 +99,13 @@ func NewPatternCaptureTokenFilter() *PatternCaptureTokenFilter { return r } + +// true + +type PatternCaptureTokenFilterVariant interface { + PatternCaptureTokenFilterCaster() *PatternCaptureTokenFilter +} + +func (s *PatternCaptureTokenFilter) PatternCaptureTokenFilterCaster() *PatternCaptureTokenFilter { + return s +} diff --git a/typedapi/types/patternreplacecharfilter.go b/typedapi/types/patternreplacecharfilter.go index 9aef72550d..6ebc988c33 100644 --- a/typedapi/types/patternreplacecharfilter.go +++ b/typedapi/types/patternreplacecharfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PatternReplaceCharFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/char_filters.ts#L57-L62 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/char_filters.ts#L57-L62 type PatternReplaceCharFilter struct { Flags *string `json:"flags,omitempty"` Pattern string `json:"pattern"` @@ -128,3 +128,13 @@ func NewPatternReplaceCharFilter() *PatternReplaceCharFilter { return r } + +// true + +type PatternReplaceCharFilterVariant interface { + PatternReplaceCharFilterCaster() *PatternReplaceCharFilter +} + +func (s *PatternReplaceCharFilter) PatternReplaceCharFilterCaster() *PatternReplaceCharFilter { + return s +} diff --git a/typedapi/types/patternreplacetokenfilter.go b/typedapi/types/patternreplacetokenfilter.go index 471657b183..3fabbd686b 100644 --- a/typedapi/types/patternreplacetokenfilter.go +++ b/typedapi/types/patternreplacetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PatternReplaceTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L285-L291 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L285-L291 type PatternReplaceTokenFilter struct { All *bool `json:"all,omitempty"` Flags *string `json:"flags,omitempty"` @@ -144,3 +144,13 @@ func NewPatternReplaceTokenFilter() *PatternReplaceTokenFilter { return r } + +// true + +type PatternReplaceTokenFilterVariant interface { + PatternReplaceTokenFilterCaster() *PatternReplaceTokenFilter +} + +func (s *PatternReplaceTokenFilter) PatternReplaceTokenFilterCaster() *PatternReplaceTokenFilter { + return s +} diff --git a/typedapi/types/patterntokenizer.go b/typedapi/types/patterntokenizer.go index 7e44e1ac43..823f928b7b 100644 --- a/typedapi/types/patterntokenizer.go +++ b/typedapi/types/patterntokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PatternTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L104-L109 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L104-L109 type PatternTokenizer struct { Flags *string `json:"flags,omitempty"` Group *int `json:"group,omitempty"` @@ -132,3 +132,13 @@ func NewPatternTokenizer() *PatternTokenizer { return r } + +// true + +type PatternTokenizerVariant interface { + PatternTokenizerCaster() *PatternTokenizer +} + +func (s *PatternTokenizer) PatternTokenizerCaster() *PatternTokenizer { + return s +} diff --git a/typedapi/types/pendingtask.go b/typedapi/types/pendingtask.go index 681d7dd487..ae30635218 100644 --- a/typedapi/types/pendingtask.go +++ b/typedapi/types/pendingtask.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PendingTask type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/pending_tasks/types.ts#L23-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/pending_tasks/types.ts#L23-L47 type PendingTask struct { // Executing Indicates whether the pending tasks are currently executing or not. Executing bool `json:"executing"` @@ -142,3 +142,5 @@ func NewPendingTask() *PendingTask { return r } + +// false diff --git a/typedapi/types/pendingtasksrecord.go b/typedapi/types/pendingtasksrecord.go index 2f8142b440..b5b544011b 100644 --- a/typedapi/types/pendingtasksrecord.go +++ b/typedapi/types/pendingtasksrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PendingTasksRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/pending_tasks/types.ts#L20-L41 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/pending_tasks/types.ts#L20-L41 type PendingTasksRecord struct { // InsertOrder The task insertion order. InsertOrder *string `json:"insertOrder,omitempty"` @@ -117,3 +117,5 @@ func NewPendingTasksRecord() *PendingTasksRecord { return r } + +// false diff --git a/typedapi/types/percentage.go b/typedapi/types/percentage.go index 83f6ac767d..c5c33ea4c5 100644 --- a/typedapi/types/percentage.go +++ b/typedapi/types/percentage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // string // float32 // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Numeric.ts#L28-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Numeric.ts#L28-L28 type Percentage any + +type PercentageVariant interface { + PercentageCaster() *Percentage +} diff --git a/typedapi/types/percentagescoreheuristic.go b/typedapi/types/percentagescoreheuristic.go index 7e75443eab..6581547b78 100644 --- a/typedapi/types/percentagescoreheuristic.go +++ b/typedapi/types/percentagescoreheuristic.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // PercentageScoreHeuristic type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L811-L811 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L811-L811 type PercentageScoreHeuristic struct { } @@ -32,3 +32,13 @@ func NewPercentageScoreHeuristic() *PercentageScoreHeuristic { return r } + +// true + +type PercentageScoreHeuristicVariant interface { + PercentageScoreHeuristicCaster() *PercentageScoreHeuristic +} + +func (s *PercentageScoreHeuristic) PercentageScoreHeuristicCaster() *PercentageScoreHeuristic { + return s +} diff --git a/typedapi/types/percentileranksaggregation.go b/typedapi/types/percentileranksaggregation.go index 21062e69f7..f3aec8578a 100644 --- a/typedapi/types/percentileranksaggregation.go +++ b/typedapi/types/percentileranksaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PercentileRanksAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L180-L202 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L180-L202 type PercentileRanksAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -136,3 +136,13 @@ func NewPercentileRanksAggregation() *PercentileRanksAggregation { return r } + +// true + +type PercentileRanksAggregationVariant interface { + PercentileRanksAggregationCaster() *PercentileRanksAggregation +} + +func (s *PercentileRanksAggregation) PercentileRanksAggregationCaster() *PercentileRanksAggregation { + return s +} diff --git a/typedapi/types/percentiles.go b/typedapi/types/percentiles.go index 634fa4c959..4a4125e561 100644 --- a/typedapi/types/percentiles.go +++ b/typedapi/types/percentiles.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // KeyedPercentiles // []ArrayPercentilesItem // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L152-L153 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L152-L153 type Percentiles any diff --git a/typedapi/types/percentilesaggregation.go b/typedapi/types/percentilesaggregation.go index f1e455ccf0..edb645b64c 100644 --- a/typedapi/types/percentilesaggregation.go +++ b/typedapi/types/percentilesaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PercentilesAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L204-L223 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L204-L223 type PercentilesAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -136,3 +136,13 @@ func NewPercentilesAggregation() *PercentilesAggregation { return r } + +// true + +type PercentilesAggregationVariant interface { + PercentilesAggregationCaster() *PercentilesAggregation +} + +func (s *PercentilesAggregation) PercentilesAggregationCaster() *PercentilesAggregation { + return s +} diff --git a/typedapi/types/percentilesbucketaggregate.go b/typedapi/types/percentilesbucketaggregate.go index 53e0e91dad..ab44cbf93a 100644 --- a/typedapi/types/percentilesbucketaggregate.go +++ b/typedapi/types/percentilesbucketaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // PercentilesBucketAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L180-L181 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L180-L181 type PercentilesBucketAggregate struct { Meta Metadata `json:"meta,omitempty"` Values Percentiles `json:"values"` @@ -64,7 +64,7 @@ func (s *PercentilesBucketAggregate) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(source) switch rawMsg[0] { case '{': - o := make(KeyedPercentiles, 0) + o := make(map[string]string, 0) if err := localDec.Decode(&o); err != nil { return fmt.Errorf("%s | %w", "Values", err) } @@ -88,3 +88,5 @@ func NewPercentilesBucketAggregate() *PercentilesBucketAggregate { return r } + +// false diff --git a/typedapi/types/percentilesbucketaggregation.go b/typedapi/types/percentilesbucketaggregation.go index efcba45b17..d4b760edf6 100644 --- a/typedapi/types/percentilesbucketaggregation.go +++ b/typedapi/types/percentilesbucketaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // PercentilesBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L389-L397 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L389-L397 type PercentilesBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -100,3 +100,13 @@ func NewPercentilesBucketAggregation() *PercentilesBucketAggregation { return r } + +// true + +type PercentilesBucketAggregationVariant interface { + PercentilesBucketAggregationCaster() *PercentilesBucketAggregation +} + +func (s *PercentilesBucketAggregation) PercentilesBucketAggregationCaster() *PercentilesBucketAggregation { + return s +} diff --git a/typedapi/types/percolatequery.go b/typedapi/types/percolatequery.go index 391829f354..53f85c684a 100644 --- a/typedapi/types/percolatequery.go +++ b/typedapi/types/percolatequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PercolateQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L205-L245 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L205-L245 type PercolateQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -175,3 +175,13 @@ func NewPercolateQuery() *PercolateQuery { return r } + +// true + +type PercolateQueryVariant interface { + PercolateQueryCaster() *PercolateQuery +} + +func (s *PercolateQuery) PercolateQueryCaster() *PercolateQuery { + return s +} diff --git a/typedapi/types/percolatorproperty.go b/typedapi/types/percolatorproperty.go index 702c859002..83350d7c45 100644 --- a/typedapi/types/percolatorproperty.go +++ b/typedapi/types/percolatorproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,19 +29,21 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // PercolatorProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L188-L190 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L192-L194 type PercolatorProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *PercolatorProperty) UnmarshalJSON(data []byte) error { @@ -83,301 +85,313 @@ func (s *PercolatorProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -426,306 +440,323 @@ func (s *PercolatorProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -740,12 +771,13 @@ func (s *PercolatorProperty) UnmarshalJSON(data []byte) error { func (s PercolatorProperty) MarshalJSON() ([]byte, error) { type innerPercolatorProperty PercolatorProperty tmp := innerPercolatorProperty{ - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Properties: s.Properties, - Type: s.Type, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "percolator" @@ -756,10 +788,20 @@ func (s PercolatorProperty) MarshalJSON() ([]byte, error) { // NewPercolatorProperty returns a PercolatorProperty. func NewPercolatorProperty() *PercolatorProperty { r := &PercolatorProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type PercolatorPropertyVariant interface { + PercolatorPropertyCaster() *PercolatorProperty +} + +func (s *PercolatorProperty) PercolatorPropertyCaster() *PercolatorProperty { + return s +} diff --git a/typedapi/types/perpartitioncategorization.go b/typedapi/types/perpartitioncategorization.go index 37f9473f0e..ef911910f4 100644 --- a/typedapi/types/perpartitioncategorization.go +++ b/typedapi/types/perpartitioncategorization.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PerPartitionCategorization type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Analysis.ts#L150-L159 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Analysis.ts#L150-L159 type PerPartitionCategorization struct { // Enabled To enable this setting, you must also set the `partition_field_name` property // to the same value in every detector that uses the keyword `mlcategory`. @@ -100,3 +100,13 @@ func NewPerPartitionCategorization() *PerPartitionCategorization { return r } + +// true + +type PerPartitionCategorizationVariant interface { + PerPartitionCategorizationCaster() *PerPartitionCategorization +} + +func (s *PerPartitionCategorization) PerPartitionCategorizationCaster() *PerPartitionCategorization { + return s +} diff --git a/typedapi/types/persiananalyzer.go b/typedapi/types/persiananalyzer.go index 37f478303f..7440dbfae7 100644 --- a/typedapi/types/persiananalyzer.go +++ b/typedapi/types/persiananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PersianAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L244-L248 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L255-L259 type PersianAnalyzer struct { Stopwords []string `json:"stopwords,omitempty"` StopwordsPath *string `json:"stopwords_path,omitempty"` @@ -111,3 +111,13 @@ func NewPersianAnalyzer() *PersianAnalyzer { return r } + +// true + +type PersianAnalyzerVariant interface { + PersianAnalyzerCaster() *PersianAnalyzer +} + +func (s *PersianAnalyzer) PersianAnalyzerCaster() *PersianAnalyzer { + return s +} diff --git a/typedapi/types/persistenttaskstatus.go b/typedapi/types/persistenttaskstatus.go index 83cb71f3ca..83c1cdb9c5 100644 --- a/typedapi/types/persistenttaskstatus.go +++ b/typedapi/types/persistenttaskstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // PersistentTaskStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L56-L58 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L56-L58 type PersistentTaskStatus struct { Status shutdownstatus.ShutdownStatus `json:"status"` } @@ -37,3 +37,5 @@ func NewPersistentTaskStatus() *PersistentTaskStatus { return r } + +// false diff --git a/typedapi/types/phase.go b/typedapi/types/phase.go index 203f26e724..4795059165 100644 --- a/typedapi/types/phase.go +++ b/typedapi/types/phase.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,10 +30,10 @@ import ( // Phase type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/_types/Phase.ts#L26-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/_types/Phase.ts#L26-L29 type Phase struct { Actions *IlmActions `json:"actions,omitempty"` - MinAge *Duration `json:"min_age,omitempty"` + MinAge Duration `json:"min_age,omitempty"` } func (s *Phase) UnmarshalJSON(data []byte) error { @@ -72,3 +72,13 @@ func NewPhase() *Phase { return r } + +// true + +type PhaseVariant interface { + PhaseCaster() *Phase +} + +func (s *Phase) PhaseCaster() *Phase { + return s +} diff --git a/typedapi/types/phases.go b/typedapi/types/phases.go index 3757e87fc7..41458355ae 100644 --- a/typedapi/types/phases.go +++ b/typedapi/types/phases.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Phases type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/_types/Phase.ts#L34-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/_types/Phase.ts#L31-L37 type Phases struct { Cold *Phase `json:"cold,omitempty"` Delete *Phase `json:"delete,omitempty"` @@ -37,3 +37,13 @@ func NewPhases() *Phases { return r } + +// true + +type PhasesVariant interface { + PhasesCaster() *Phases +} + +func (s *Phases) PhasesCaster() *Phases { + return s +} diff --git a/typedapi/types/phonetictokenfilter.go b/typedapi/types/phonetictokenfilter.go index ac7f740594..6ef066cdd8 100644 --- a/typedapi/types/phonetictokenfilter.go +++ b/typedapi/types/phonetictokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -36,7 +36,7 @@ import ( // PhoneticTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/phonetic-plugin.ts#L64-L72 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/phonetic-plugin.ts#L64-L72 type PhoneticTokenFilter struct { Encoder phoneticencoder.PhoneticEncoder `json:"encoder"` Languageset []phoneticlanguage.PhoneticLanguage `json:"languageset,omitempty"` @@ -164,3 +164,13 @@ func NewPhoneticTokenFilter() *PhoneticTokenFilter { return r } + +// true + +type PhoneticTokenFilterVariant interface { + PhoneticTokenFilterCaster() *PhoneticTokenFilter +} + +func (s *PhoneticTokenFilter) PhoneticTokenFilterCaster() *PhoneticTokenFilter { + return s +} diff --git a/typedapi/types/phrasesuggest.go b/typedapi/types/phrasesuggest.go index ba42a4092f..c0a2abad78 100644 --- a/typedapi/types/phrasesuggest.go +++ b/typedapi/types/phrasesuggest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PhraseSuggest type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L57-L62 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L57-L62 type PhraseSuggest struct { Length int `json:"length"` Offset int `json:"offset"` @@ -125,3 +125,5 @@ func NewPhraseSuggest() *PhraseSuggest { return r } + +// false diff --git a/typedapi/types/phrasesuggestcollate.go b/typedapi/types/phrasesuggestcollate.go index 959d38f4cf..85a24b4739 100644 --- a/typedapi/types/phrasesuggestcollate.go +++ b/typedapi/types/phrasesuggestcollate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PhraseSuggestCollate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L333-L346 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L333-L346 type PhraseSuggestCollate struct { // Params Parameters to use if the query is templated. Params map[string]json.RawMessage `json:"params,omitempty"` @@ -92,8 +92,18 @@ func (s *PhraseSuggestCollate) UnmarshalJSON(data []byte) error { // NewPhraseSuggestCollate returns a PhraseSuggestCollate. func NewPhraseSuggestCollate() *PhraseSuggestCollate { r := &PhraseSuggestCollate{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type PhraseSuggestCollateVariant interface { + PhraseSuggestCollateCaster() *PhraseSuggestCollate +} + +func (s *PhraseSuggestCollate) PhraseSuggestCollateCaster() *PhraseSuggestCollate { + return s +} diff --git a/typedapi/types/phrasesuggestcollatequery.go b/typedapi/types/phrasesuggestcollatequery.go index 8fef219e86..91aac7b080 100644 --- a/typedapi/types/phrasesuggestcollatequery.go +++ b/typedapi/types/phrasesuggestcollatequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PhraseSuggestCollateQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L348-L357 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L348-L357 type PhraseSuggestCollateQuery struct { // Id The search template ID. Id *string `json:"id,omitempty"` @@ -82,3 +82,13 @@ func NewPhraseSuggestCollateQuery() *PhraseSuggestCollateQuery { return r } + +// true + +type PhraseSuggestCollateQueryVariant interface { + PhraseSuggestCollateQueryCaster() *PhraseSuggestCollateQuery +} + +func (s *PhraseSuggestCollateQuery) PhraseSuggestCollateQueryCaster() *PhraseSuggestCollateQuery { + return s +} diff --git a/typedapi/types/phrasesuggester.go b/typedapi/types/phrasesuggester.go index ce0024e28c..ef1b7e618d 100644 --- a/typedapi/types/phrasesuggester.go +++ b/typedapi/types/phrasesuggester.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PhraseSuggester type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L359-L417 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L359-L417 type PhraseSuggester struct { // Analyzer The analyzer to analyze the suggest text with. // Defaults to the search analyzer of the suggest field. @@ -299,3 +299,13 @@ func NewPhraseSuggester() *PhraseSuggester { return r } + +// true + +type PhraseSuggesterVariant interface { + PhraseSuggesterCaster() *PhraseSuggester +} + +func (s *PhraseSuggester) PhraseSuggesterCaster() *PhraseSuggester { + return s +} diff --git a/typedapi/types/phrasesuggesthighlight.go b/typedapi/types/phrasesuggesthighlight.go index 48fbab3193..edcddf8814 100644 --- a/typedapi/types/phrasesuggesthighlight.go +++ b/typedapi/types/phrasesuggesthighlight.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PhraseSuggestHighlight type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L419-L428 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L419-L428 type PhraseSuggestHighlight struct { // PostTag Use in conjunction with `pre_tag` to define the HTML tags to use for the // highlighted text. @@ -91,3 +91,13 @@ func NewPhraseSuggestHighlight() *PhraseSuggestHighlight { return r } + +// true + +type PhraseSuggestHighlightVariant interface { + PhraseSuggestHighlightCaster() *PhraseSuggestHighlight +} + +func (s *PhraseSuggestHighlight) PhraseSuggestHighlightCaster() *PhraseSuggestHighlight { + return s +} diff --git a/typedapi/types/phrasesuggestoption.go b/typedapi/types/phrasesuggestoption.go index 4bfd7c8d1f..761cb7ebe1 100644 --- a/typedapi/types/phrasesuggestoption.go +++ b/typedapi/types/phrasesuggestoption.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PhraseSuggestOption type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L86-L91 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L86-L91 type PhraseSuggestOption struct { CollateMatch *bool `json:"collate_match,omitempty"` Highlighted *string `json:"highlighted,omitempty"` @@ -119,3 +119,5 @@ func NewPhraseSuggestOption() *PhraseSuggestOption { return r } + +// false diff --git a/typedapi/types/pinneddoc.go b/typedapi/types/pinneddoc.go index 02147a2a08..fb62b5a7df 100644 --- a/typedapi/types/pinneddoc.go +++ b/typedapi/types/pinneddoc.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,12 +30,12 @@ import ( // PinnedDoc type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L269-L278 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L269-L278 type PinnedDoc struct { // Id_ The unique document ID. Id_ string `json:"_id"` // Index_ The index that contains the document. - Index_ string `json:"_index"` + Index_ *string `json:"_index,omitempty"` } func (s *PinnedDoc) UnmarshalJSON(data []byte) error { @@ -74,3 +74,13 @@ func NewPinnedDoc() *PinnedDoc { return r } + +// true + +type PinnedDocVariant interface { + PinnedDocCaster() *PinnedDoc +} + +func (s *PinnedDoc) PinnedDocCaster() *PinnedDoc { + return s +} diff --git a/typedapi/types/pinnedquery.go b/typedapi/types/pinnedquery.go index f46893801f..f9dcf0a6a2 100644 --- a/typedapi/types/pinnedquery.go +++ b/typedapi/types/pinnedquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,8 +31,9 @@ import ( // PinnedQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L247-L267 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L247-L267 type PinnedQuery struct { + AdditionalPinnedQueryProperty map[string]json.RawMessage `json:"-"` // Boost Floating point number used to decrease or increase the relevance scores of // the query. // Boost values are relative to the default value of 1.0. @@ -47,7 +48,7 @@ type PinnedQuery struct { Ids []string `json:"ids,omitempty"` // Organic Any choice of query used to rank documents which will be ranked below the // "pinned" documents. - Organic *Query `json:"organic,omitempty"` + Organic Query `json:"organic"` QueryName_ *string `json:"_name,omitempty"` } @@ -109,14 +110,68 @@ func (s *PinnedQuery) UnmarshalJSON(data []byte) error { } s.QueryName_ = &o + default: + + if key, ok := t.(string); ok { + if s.AdditionalPinnedQueryProperty == nil { + s.AdditionalPinnedQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalPinnedQueryProperty", err) + } + s.AdditionalPinnedQueryProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s PinnedQuery) MarshalJSON() ([]byte, error) { + type opt PinnedQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalPinnedQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalPinnedQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewPinnedQuery returns a PinnedQuery. func NewPinnedQuery() *PinnedQuery { - r := &PinnedQuery{} + r := &PinnedQuery{ + AdditionalPinnedQueryProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type PinnedQueryVariant interface { + PinnedQueryCaster() *PinnedQuery +} + +func (s *PinnedQuery) PinnedQueryCaster() *PinnedQuery { + return s +} diff --git a/typedapi/types/pipelineconfig.go b/typedapi/types/pipelineconfig.go index b36488b2a6..46607ed663 100644 --- a/typedapi/types/pipelineconfig.go +++ b/typedapi/types/pipelineconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PipelineConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Pipeline.ts#L67-L81 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Pipeline.ts#L67-L81 type PipelineConfig struct { // Description Description of the ingest pipeline. Description *string `json:"description,omitempty"` @@ -90,3 +90,5 @@ func NewPipelineConfig() *PipelineConfig { return r } + +// false diff --git a/typedapi/types/pipelinemetadata.go b/typedapi/types/pipelinemetadata.go index 620cc262de..a0c7a7e5b1 100644 --- a/typedapi/types/pipelinemetadata.go +++ b/typedapi/types/pipelinemetadata.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PipelineMetadata type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/logstash/_types/Pipeline.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/logstash/_types/Pipeline.ts#L23-L26 type PipelineMetadata struct { Type string `json:"type"` Version string `json:"version"` @@ -87,3 +87,13 @@ func NewPipelineMetadata() *PipelineMetadata { return r } + +// true + +type PipelineMetadataVariant interface { + PipelineMetadataCaster() *PipelineMetadata +} + +func (s *PipelineMetadata) PipelineMetadataCaster() *PipelineMetadata { + return s +} diff --git a/typedapi/types/pipelineprocessor.go b/typedapi/types/pipelineprocessor.go index 4b19faaea6..7d1d175108 100644 --- a/typedapi/types/pipelineprocessor.go +++ b/typedapi/types/pipelineprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,13 +31,13 @@ import ( // PipelineProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1226-L1237 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1267-L1278 type PipelineProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. Description *string `json:"description,omitempty"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissingPipeline Whether to ignore missing pipelines instead of failing. @@ -80,16 +80,9 @@ func (s *PipelineProcessor) UnmarshalJSON(data []byte) error { s.Description = &o case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -152,3 +145,13 @@ func NewPipelineProcessor() *PipelineProcessor { return r } + +// true + +type PipelineProcessorVariant interface { + PipelineProcessorCaster() *PipelineProcessor +} + +func (s *PipelineProcessor) PipelineProcessorCaster() *PipelineProcessor { + return s +} diff --git a/typedapi/types/pipelinesettings.go b/typedapi/types/pipelinesettings.go index ea7263a653..2cc780ef1a 100644 --- a/typedapi/types/pipelinesettings.go +++ b/typedapi/types/pipelinesettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PipelineSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/logstash/_types/Pipeline.ts#L28-L59 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/logstash/_types/Pipeline.ts#L28-L59 type PipelineSettings struct { // PipelineBatchDelay When creating pipeline event batches, how long in milliseconds to wait for // each event before dispatching an undersized batch to pipeline workers. @@ -184,3 +184,13 @@ func NewPipelineSettings() *PipelineSettings { return r } + +// true + +type PipelineSettingsVariant interface { + PipelineSettingsCaster() *PipelineSettings +} + +func (s *PipelineSettings) PipelineSettingsCaster() *PipelineSettings { + return s +} diff --git a/typedapi/types/pipelinesimulation.go b/typedapi/types/pipelinesimulation.go index d4d2e381ca..8512f15e13 100644 --- a/typedapi/types/pipelinesimulation.go +++ b/typedapi/types/pipelinesimulation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // PipelineSimulation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/simulate/types.ts#L52-L60 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Simulation.ts#L52-L60 type PipelineSimulation struct { Description *string `json:"description,omitempty"` Doc *DocumentSimulation `json:"doc,omitempty"` @@ -126,3 +126,5 @@ func NewPipelineSimulation() *PipelineSimulation { return r } + +// false diff --git a/typedapi/types/pipeseparatedflagssimplequerystringflag.go b/typedapi/types/pipeseparatedflagssimplequerystringflag.go index a56c079df4..a322896285 100644 --- a/typedapi/types/pipeseparatedflagssimplequerystringflag.go +++ b/typedapi/types/pipeseparatedflagssimplequerystringflag.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // simplequerystringflag.SimpleQueryStringFlag // string // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_spec_utils/PipeSeparatedFlags.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_spec_utils/PipeSeparatedFlags.ts#L20-L27 type PipeSeparatedFlagsSimpleQueryStringFlag any + +type PipeSeparatedFlagsSimpleQueryStringFlagVariant interface { + PipeSeparatedFlagsSimpleQueryStringFlagCaster() *PipeSeparatedFlagsSimpleQueryStringFlag +} diff --git a/typedapi/types/pivot.go b/typedapi/types/pivot.go index 5013839202..fff8635dc1 100644 --- a/typedapi/types/pivot.go +++ b/typedapi/types/pivot.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Pivot type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/_types/Transform.ts#L54-L68 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/_types/Transform.ts#L54-L68 type Pivot struct { // Aggregations Defines how to aggregate the grouped data. The following aggregations are // currently supported: average, bucket @@ -41,9 +41,19 @@ type Pivot struct { // NewPivot returns a Pivot. func NewPivot() *Pivot { r := &Pivot{ - Aggregations: make(map[string]Aggregations, 0), - GroupBy: make(map[string]PivotGroupByContainer, 0), + Aggregations: make(map[string]Aggregations), + GroupBy: make(map[string]PivotGroupByContainer), } return r } + +// true + +type PivotVariant interface { + PivotCaster() *Pivot +} + +func (s *Pivot) PivotCaster() *Pivot { + return s +} diff --git a/typedapi/types/pivotgroupbycontainer.go b/typedapi/types/pivotgroupbycontainer.go index 50b7100c8d..bc300e329f 100644 --- a/typedapi/types/pivotgroupbycontainer.go +++ b/typedapi/types/pivotgroupbycontainer.go @@ -16,23 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // PivotGroupByContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/_types/Transform.ts#L70-L78 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/_types/Transform.ts#L70-L78 type PivotGroupByContainer struct { - DateHistogram *DateHistogramAggregation `json:"date_histogram,omitempty"` - GeotileGrid *GeoTileGridAggregation `json:"geotile_grid,omitempty"` - Histogram *HistogramAggregation `json:"histogram,omitempty"` - Terms *TermsAggregation `json:"terms,omitempty"` + AdditionalPivotGroupByContainerProperty map[string]json.RawMessage `json:"-"` + DateHistogram *DateHistogramAggregation `json:"date_histogram,omitempty"` + GeotileGrid *GeoTileGridAggregation `json:"geotile_grid,omitempty"` + Histogram *HistogramAggregation `json:"histogram,omitempty"` + Terms *TermsAggregation `json:"terms,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s PivotGroupByContainer) MarshalJSON() ([]byte, error) { + type opt PivotGroupByContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalPivotGroupByContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalPivotGroupByContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewPivotGroupByContainer returns a PivotGroupByContainer. func NewPivotGroupByContainer() *PivotGroupByContainer { - r := &PivotGroupByContainer{} + r := &PivotGroupByContainer{ + AdditionalPivotGroupByContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type PivotGroupByContainerVariant interface { + PivotGroupByContainerCaster() *PivotGroupByContainer +} + +func (s *PivotGroupByContainer) PivotGroupByContainerCaster() *PivotGroupByContainer { + return s +} diff --git a/typedapi/types/pluginsrecord.go b/typedapi/types/pluginsrecord.go index fa2f93d8cf..1d45b8a5fa 100644 --- a/typedapi/types/pluginsrecord.go +++ b/typedapi/types/pluginsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PluginsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/plugins/types.ts#L22-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/plugins/types.ts#L22-L52 type PluginsRecord struct { // Component The component name. Component *string `json:"component,omitempty"` @@ -124,3 +124,5 @@ func NewPluginsRecord() *PluginsRecord { return r } + +// false diff --git a/typedapi/types/pluginsstatus.go b/typedapi/types/pluginsstatus.go index 29cd86ccb6..622c1b738c 100644 --- a/typedapi/types/pluginsstatus.go +++ b/typedapi/types/pluginsstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // PluginsStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L60-L62 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L60-L62 type PluginsStatus struct { Status shutdownstatus.ShutdownStatus `json:"status"` } @@ -37,3 +37,5 @@ func NewPluginsStatus() *PluginsStatus { return r } + +// false diff --git a/typedapi/types/pluginstats.go b/typedapi/types/pluginstats.go index 47049f10b7..903340bebd 100644 --- a/typedapi/types/pluginstats.go +++ b/typedapi/types/pluginstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PluginStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L180-L190 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L183-L193 type PluginStats struct { Classname string `json:"classname"` Description string `json:"description"` @@ -147,3 +147,5 @@ func NewPluginStats() *PluginStats { return r } + +// false diff --git a/typedapi/types/pointintimereference.go b/typedapi/types/pointintimereference.go index 228223aafb..ac7398ac0b 100644 --- a/typedapi/types/pointintimereference.go +++ b/typedapi/types/pointintimereference.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // PointInTimeReference type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/PointInTimeReference.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/PointInTimeReference.ts#L23-L26 type PointInTimeReference struct { Id string `json:"id"` KeepAlive Duration `json:"keep_alive,omitempty"` @@ -72,3 +72,13 @@ func NewPointInTimeReference() *PointInTimeReference { return r } + +// true + +type PointInTimeReferenceVariant interface { + PointInTimeReferenceCaster() *PointInTimeReference +} + +func (s *PointInTimeReference) PointInTimeReferenceCaster() *PointInTimeReference { + return s +} diff --git a/typedapi/types/pointproperty.go b/typedapi/types/pointproperty.go index 0bd2199f32..16474f5a4a 100644 --- a/typedapi/types/pointproperty.go +++ b/typedapi/types/pointproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // PointProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/geo.ts#L66-L71 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/geo.ts#L66-L71 type PointProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -43,11 +44,12 @@ type PointProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` IgnoreZValue *bool `json:"ignore_z_value,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *string `json:"null_value,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *string `json:"null_value,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *PointProperty) UnmarshalJSON(data []byte) error { @@ -119,301 +121,313 @@ func (s *PointProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -502,301 +516,313 @@ func (s *PointProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -816,6 +842,11 @@ func (s *PointProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -830,18 +861,19 @@ func (s *PointProperty) UnmarshalJSON(data []byte) error { func (s PointProperty) MarshalJSON() ([]byte, error) { type innerPointProperty PointProperty tmp := innerPointProperty{ - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - IgnoreMalformed: s.IgnoreMalformed, - IgnoreZValue: s.IgnoreZValue, - Meta: s.Meta, - NullValue: s.NullValue, - Properties: s.Properties, - Store: s.Store, - Type: s.Type, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + IgnoreZValue: s.IgnoreZValue, + Meta: s.Meta, + NullValue: s.NullValue, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "point" @@ -852,10 +884,20 @@ func (s PointProperty) MarshalJSON() ([]byte, error) { // NewPointProperty returns a PointProperty. func NewPointProperty() *PointProperty { r := &PointProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type PointPropertyVariant interface { + PointPropertyCaster() *PointProperty +} + +func (s *PointProperty) PointPropertyCaster() *PointProperty { + return s +} diff --git a/typedapi/types/pool.go b/typedapi/types/pool.go index 120c840e05..6217215240 100644 --- a/typedapi/types/pool.go +++ b/typedapi/types/pool.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Pool type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L949-L966 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L949-L966 type Pool struct { // MaxInBytes Maximum amount of memory, in bytes, available for use by the heap. MaxInBytes *int64 `json:"max_in_bytes,omitempty"` @@ -129,3 +129,5 @@ func NewPool() *Pool { return r } + +// false diff --git a/typedapi/types/porterstemtokenfilter.go b/typedapi/types/porterstemtokenfilter.go index 50da4fc3a6..01cf300566 100644 --- a/typedapi/types/porterstemtokenfilter.go +++ b/typedapi/types/porterstemtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // PorterStemTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L293-L295 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L293-L295 type PorterStemTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewPorterStemTokenFilter() *PorterStemTokenFilter { return r } + +// true + +type PorterStemTokenFilterVariant interface { + PorterStemTokenFilterCaster() *PorterStemTokenFilter +} + +func (s *PorterStemTokenFilter) PorterStemTokenFilterCaster() *PorterStemTokenFilter { + return s +} diff --git a/typedapi/types/portugueseanalyzer.go b/typedapi/types/portugueseanalyzer.go index a70e66ee93..129440c36c 100644 --- a/typedapi/types/portugueseanalyzer.go +++ b/typedapi/types/portugueseanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PortugueseAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L250-L255 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L261-L266 type PortugueseAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewPortugueseAnalyzer() *PortugueseAnalyzer { return r } + +// true + +type PortugueseAnalyzerVariant interface { + PortugueseAnalyzerCaster() *PortugueseAnalyzer +} + +func (s *PortugueseAnalyzer) PortugueseAnalyzerCaster() *PortugueseAnalyzer { + return s +} diff --git a/typedapi/types/postmigrationfeature.go b/typedapi/types/postmigrationfeature.go index 38666133bf..bb59659f4c 100644 --- a/typedapi/types/postmigrationfeature.go +++ b/typedapi/types/postmigrationfeature.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PostMigrationFeature type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L27-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L27-L29 type PostMigrationFeature struct { FeatureName string `json:"feature_name"` } @@ -74,3 +74,5 @@ func NewPostMigrationFeature() *PostMigrationFeature { return r } + +// false diff --git a/typedapi/types/predicatetokenfilter.go b/typedapi/types/predicatetokenfilter.go index c33a3d3ec0..b854e33138 100644 --- a/typedapi/types/predicatetokenfilter.go +++ b/typedapi/types/predicatetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // PredicateTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L297-L300 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L297-L300 type PredicateTokenFilter struct { Script Script `json:"script"` Type string `json:"type,omitempty"` @@ -92,3 +92,13 @@ func NewPredicateTokenFilter() *PredicateTokenFilter { return r } + +// true + +type PredicateTokenFilterVariant interface { + PredicateTokenFilterCaster() *PredicateTokenFilter +} + +func (s *PredicateTokenFilter) PredicateTokenFilterCaster() *PredicateTokenFilter { + return s +} diff --git a/typedapi/types/predictedvalue.go b/typedapi/types/predictedvalue.go index 8ad8961a43..5b89b5464b 100644 --- a/typedapi/types/predictedvalue.go +++ b/typedapi/types/predictedvalue.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // PredictedValue type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L457-L457 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L445-L445 type PredictedValue []ScalarValue diff --git a/typedapi/types/prefixquery.go b/typedapi/types/prefixquery.go index 7e36ab9cb1..edf0b690fd 100644 --- a/typedapi/types/prefixquery.go +++ b/typedapi/types/prefixquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PrefixQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L98-L120 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L98-L120 type PrefixQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -148,3 +148,13 @@ func NewPrefixQuery() *PrefixQuery { return r } + +// true + +type PrefixQueryVariant interface { + PrefixQueryCaster() *PrefixQuery +} + +func (s *PrefixQuery) PrefixQueryCaster() *PrefixQuery { + return s +} diff --git a/typedapi/types/preprocessor.go b/typedapi/types/preprocessor.go index ac616a6bcf..fd65c083ce 100644 --- a/typedapi/types/preprocessor.go +++ b/typedapi/types/preprocessor.go @@ -16,22 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // Preprocessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model/types.ts#L31-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model/types.ts#L31-L36 type Preprocessor struct { - FrequencyEncoding *FrequencyEncodingPreprocessor `json:"frequency_encoding,omitempty"` - OneHotEncoding *OneHotEncodingPreprocessor `json:"one_hot_encoding,omitempty"` - TargetMeanEncoding *TargetMeanEncodingPreprocessor `json:"target_mean_encoding,omitempty"` + AdditionalPreprocessorProperty map[string]json.RawMessage `json:"-"` + FrequencyEncoding *FrequencyEncodingPreprocessor `json:"frequency_encoding,omitempty"` + OneHotEncoding *OneHotEncodingPreprocessor `json:"one_hot_encoding,omitempty"` + TargetMeanEncoding *TargetMeanEncodingPreprocessor `json:"target_mean_encoding,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s Preprocessor) MarshalJSON() ([]byte, error) { + type opt Preprocessor + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalPreprocessorProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalPreprocessorProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewPreprocessor returns a Preprocessor. func NewPreprocessor() *Preprocessor { - r := &Preprocessor{} + r := &Preprocessor{ + AdditionalPreprocessorProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type PreprocessorVariant interface { + PreprocessorCaster() *Preprocessor +} + +func (s *Preprocessor) PreprocessorCaster() *Preprocessor { + return s +} diff --git a/typedapi/types/pressurememory.go b/typedapi/types/pressurememory.go index 9f1543004d..54dbc28f6d 100644 --- a/typedapi/types/pressurememory.go +++ b/typedapi/types/pressurememory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PressureMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L144-L199 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L144-L199 type PressureMemory struct { // All Memory consumed by indexing requests in the coordinating, primary, or replica // stage. @@ -239,3 +239,5 @@ func NewPressureMemory() *PressureMemory { return r } + +// false diff --git a/typedapi/types/privileges.go b/typedapi/types/privileges.go index 64b6920011..11e9041355 100644 --- a/typedapi/types/privileges.go +++ b/typedapi/types/privileges.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Privileges type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/has_privileges/types.ts#L48-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/has_privileges/types.ts#L49-L49 type Privileges map[string]bool diff --git a/typedapi/types/privilegesactions.go b/typedapi/types/privilegesactions.go index 46fc8322d9..c6ec6d6d5a 100644 --- a/typedapi/types/privilegesactions.go +++ b/typedapi/types/privilegesactions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PrivilegesActions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/put_privileges/types.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/put_privileges/types.ts#L22-L27 type PrivilegesActions struct { Actions []string `json:"actions"` Application *string `json:"application,omitempty"` @@ -92,3 +92,13 @@ func NewPrivilegesActions() *PrivilegesActions { return r } + +// true + +type PrivilegesActionsVariant interface { + PrivilegesActionsCaster() *PrivilegesActions +} + +func (s *PrivilegesActions) PrivilegesActionsCaster() *PrivilegesActions { + return s +} diff --git a/typedapi/types/privilegescheck.go b/typedapi/types/privilegescheck.go index 241c999919..3ffb129983 100644 --- a/typedapi/types/privilegescheck.go +++ b/typedapi/types/privilegescheck.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // PrivilegesCheck type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/has_privileges_user_profile/types.ts#L30-L37 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/has_privileges_user_profile/types.ts#L30-L37 type PrivilegesCheck struct { Application []ApplicationPrivilegesCheck `json:"application,omitempty"` // Cluster A list of the cluster privileges that you want to check. @@ -40,3 +40,13 @@ func NewPrivilegesCheck() *PrivilegesCheck { return r } + +// true + +type PrivilegesCheckVariant interface { + PrivilegesCheckCaster() *PrivilegesCheck +} + +func (s *PrivilegesCheck) PrivilegesCheckCaster() *PrivilegesCheck { + return s +} diff --git a/typedapi/types/process.go b/typedapi/types/process.go index 4c0e07da26..13ca7d3fa1 100644 --- a/typedapi/types/process.go +++ b/typedapi/types/process.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Process type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L1024-L1046 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L1024-L1046 type Process struct { // Cpu Contains CPU statistics for the node. Cpu *Cpu `json:"cpu,omitempty"` @@ -131,3 +131,5 @@ func NewProcess() *Process { return r } + +// false diff --git a/typedapi/types/processor.go b/typedapi/types/processor.go index 99c6bc0076..21bd78827d 100644 --- a/typedapi/types/processor.go +++ b/typedapi/types/processor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Processor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L420-L437 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L420-L437 type Processor struct { // Count Number of documents transformed by the processor. Count *int64 `json:"count,omitempty"` @@ -119,3 +119,5 @@ func NewProcessor() *Processor { return r } + +// false diff --git a/typedapi/types/processorcontainer.go b/typedapi/types/processorcontainer.go index ec2e24e566..2e84dcc15f 100644 --- a/typedapi/types/processorcontainer.go +++ b/typedapi/types/processorcontainer.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // ProcessorContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L27-L297 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L28-L302 type ProcessorContainer struct { + AdditionalProcessorContainerProperty map[string]json.RawMessage `json:"-"` // Append Appends one or more values to an existing array if the field already exists // and it is an array. // Converts a scalar to an array and appends one or more values to it if the @@ -116,6 +122,8 @@ type ProcessorContainer struct { // language processing tasks to infer against the data that is being ingested in // the pipeline. Inference *InferenceProcessor `json:"inference,omitempty"` + // IpLocation Currently an undocumented alias for GeoIP Processor. + IpLocation *IpLocationProcessor `json:"ip_location,omitempty"` // Join Joins each element of an array into a single string using a separator // character between each element. // Throws an error when the field is not an array. @@ -209,9 +217,50 @@ type ProcessorContainer struct { UserAgent *UserAgentProcessor `json:"user_agent,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s ProcessorContainer) MarshalJSON() ([]byte, error) { + type opt ProcessorContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalProcessorContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalProcessorContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewProcessorContainer returns a ProcessorContainer. func NewProcessorContainer() *ProcessorContainer { - r := &ProcessorContainer{} + r := &ProcessorContainer{ + AdditionalProcessorContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type ProcessorContainerVariant interface { + ProcessorContainerCaster() *ProcessorContainer +} + +func (s *ProcessorContainer) ProcessorContainerCaster() *ProcessorContainer { + return s +} diff --git a/typedapi/types/profile.go b/typedapi/types/profile.go index 6300140dd6..08a8c0c38f 100644 --- a/typedapi/types/profile.go +++ b/typedapi/types/profile.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Profile type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L101-L103 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L101-L103 type Profile struct { Shards []ShardProfile `json:"shards"` } @@ -33,3 +33,5 @@ func NewProfile() *Profile { return r } + +// false diff --git a/typedapi/types/property.go b/typedapi/types/property.go index 659586109a..428844697b 100644 --- a/typedapi/types/property.go +++ b/typedapi/types/property.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -42,10 +42,12 @@ package types // FlattenedProperty // NestedProperty // ObjectProperty +// PassthroughObjectProperty // SemanticTextProperty // SparseVectorProperty // CompletionProperty // ConstantKeywordProperty +// CountedKeywordProperty // FieldAliasProperty // HistogramProperty // IpProperty @@ -72,5 +74,9 @@ package types // LongRangeProperty // IcuCollationProperty // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/Property.ts#L96-L164 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/Property.ts#L119-L189 type Property any + +type PropertyVariant interface { + PropertyCaster() *Property +} diff --git a/typedapi/types/publishedclusterstates.go b/typedapi/types/publishedclusterstates.go index 2485841467..1ec8564c59 100644 --- a/typedapi/types/publishedclusterstates.go +++ b/typedapi/types/publishedclusterstates.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // PublishedClusterStates type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L263-L276 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L263-L276 type PublishedClusterStates struct { // CompatibleDiffs Number of compatible differences between published cluster states. CompatibleDiffs *int64 `json:"compatible_diffs,omitempty"` @@ -112,3 +112,5 @@ func NewPublishedClusterStates() *PublishedClusterStates { return r } + +// false diff --git a/typedapi/types/queries.go b/typedapi/types/queries.go index fee027d4a5..5ea41d6b8d 100644 --- a/typedapi/types/queries.go +++ b/typedapi/types/queries.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Queries type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L403-L405 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L417-L419 type Queries struct { Cache *CacheQueries `json:"cache,omitempty"` } @@ -33,3 +33,13 @@ func NewQueries() *Queries { return r } + +// true + +type QueriesVariant interface { + QueriesCaster() *Queries +} + +func (s *Queries) QueriesCaster() *Queries { + return s +} diff --git a/typedapi/types/query.go b/typedapi/types/query.go index ac86be15ec..2e8725822c 100644 --- a/typedapi/types/query.go +++ b/typedapi/types/query.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,8 +30,9 @@ import ( // Query type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/abstractions.ts#L102-L427 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/abstractions.ts#L103-L434 type Query struct { + AdditionalQueryProperty map[string]json.RawMessage `json:"-"` // Bool matches documents matching boolean combinations of other queries. Bool *BoolQuery `json:"bool,omitempty"` // Boosting Returns documents matching a `positive` query while reducing the relevance @@ -68,7 +69,10 @@ type Query struct { // GeoDistance Matches `geo_point` and `geo_shape` values within a given distance of a // geopoint. GeoDistance *GeoDistanceQuery `json:"geo_distance,omitempty"` - GeoPolygon *GeoPolygonQuery `json:"geo_polygon,omitempty"` + // GeoGrid Matches `geo_point` and `geo_shape` values that intersect a grid cell from a + // GeoGrid aggregation. + GeoGrid map[string]GeoGridQuery `json:"geo_grid,omitempty"` + GeoPolygon *GeoPolygonQuery `json:"geo_polygon,omitempty"` // GeoShape Filter documents indexed using either the `geo_shape` or the `geo_point` // type. GeoShape *GeoShapeQuery `json:"geo_shape,omitempty"` @@ -290,6 +294,14 @@ func (s *Query) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "GeoDistance", err) } + case "geo_grid": + if s.GeoGrid == nil { + s.GeoGrid = make(map[string]GeoGridQuery, 0) + } + if err := dec.Decode(&s.GeoGrid); err != nil { + return fmt.Errorf("%s | %w", "GeoGrid", err) + } + case "geo_polygon": if err := dec.Decode(&s.GeoPolygon); err != nil { return fmt.Errorf("%s | %w", "GeoPolygon", err) @@ -583,31 +595,85 @@ func (s *Query) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Wrapper", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalQueryProperty == nil { + s.AdditionalQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalQueryProperty", err) + } + s.AdditionalQueryProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s Query) MarshalJSON() ([]byte, error) { + type opt Query + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewQuery returns a Query. func NewQuery() *Query { r := &Query{ - Common: make(map[string]CommonTermsQuery, 0), - Fuzzy: make(map[string]FuzzyQuery, 0), - Intervals: make(map[string]IntervalsQuery, 0), - Match: make(map[string]MatchQuery, 0), - MatchBoolPrefix: make(map[string]MatchBoolPrefixQuery, 0), - MatchPhrase: make(map[string]MatchPhraseQuery, 0), - MatchPhrasePrefix: make(map[string]MatchPhrasePrefixQuery, 0), - Prefix: make(map[string]PrefixQuery, 0), - Range: make(map[string]RangeQuery, 0), - Regexp: make(map[string]RegexpQuery, 0), - SpanTerm: make(map[string]SpanTermQuery, 0), - Term: make(map[string]TermQuery, 0), - TermsSet: make(map[string]TermsSetQuery, 0), - TextExpansion: make(map[string]TextExpansionQuery, 0), - WeightedTokens: make(map[string]WeightedTokensQuery, 0), - Wildcard: make(map[string]WildcardQuery, 0), + AdditionalQueryProperty: make(map[string]json.RawMessage), + Common: make(map[string]CommonTermsQuery), + Fuzzy: make(map[string]FuzzyQuery), + GeoGrid: make(map[string]GeoGridQuery), + Intervals: make(map[string]IntervalsQuery), + Match: make(map[string]MatchQuery), + MatchBoolPrefix: make(map[string]MatchBoolPrefixQuery), + MatchPhrase: make(map[string]MatchPhraseQuery), + MatchPhrasePrefix: make(map[string]MatchPhrasePrefixQuery), + Prefix: make(map[string]PrefixQuery), + Range: make(map[string]RangeQuery), + Regexp: make(map[string]RegexpQuery), + SpanTerm: make(map[string]SpanTermQuery), + Term: make(map[string]TermQuery), + TermsSet: make(map[string]TermsSetQuery), + TextExpansion: make(map[string]TextExpansionQuery), + WeightedTokens: make(map[string]WeightedTokensQuery), + Wildcard: make(map[string]WildcardQuery), } return r } + +// true + +type QueryVariant interface { + QueryCaster() *Query +} + +func (s *Query) QueryCaster() *Query { + return s +} diff --git a/typedapi/types/querybreakdown.go b/typedapi/types/querybreakdown.go index 673a6bf767..5dd1f75713 100644 --- a/typedapi/types/querybreakdown.go +++ b/typedapi/types/querybreakdown.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // QueryBreakdown type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L105-L126 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L105-L126 type QueryBreakdown struct { Advance int64 `json:"advance"` AdvanceCount int64 `json:"advance_count"` @@ -381,3 +381,5 @@ func NewQueryBreakdown() *QueryBreakdown { return r } + +// false diff --git a/typedapi/types/querycachestats.go b/typedapi/types/querycachestats.go index 0443734aa1..671d5fd88e 100644 --- a/typedapi/types/querycachestats.go +++ b/typedapi/types/querycachestats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // QueryCacheStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L192-L226 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L195-L229 type QueryCacheStats struct { // CacheCount Total number of entries added to the query cache across all shards assigned // to selected nodes. @@ -195,3 +195,5 @@ func NewQueryCacheStats() *QueryCacheStats { return r } + +// false diff --git a/typedapi/types/queryprofile.go b/typedapi/types/queryprofile.go index 7a4f76e938..c480bbb148 100644 --- a/typedapi/types/queryprofile.go +++ b/typedapi/types/queryprofile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // QueryProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L128-L134 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L128-L134 type QueryProfile struct { Breakdown QueryBreakdown `json:"breakdown"` Children []QueryProfile `json:"children,omitempty"` @@ -105,3 +105,5 @@ func NewQueryProfile() *QueryProfile { return r } + +// false diff --git a/typedapi/types/queryrole.go b/typedapi/types/queryrole.go index 1137d1ff97..184f37d9a4 100644 --- a/typedapi/types/queryrole.go +++ b/typedapi/types/queryrole.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // QueryRole type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/query_role/types.ts#L103-L109 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/query_role/types.ts#L103-L109 type QueryRole struct { // Applications A list of application privilege entries Applications []ApplicationPrivileges `json:"applications,omitempty"` @@ -53,9 +53,17 @@ type QueryRole struct { Metadata Metadata `json:"metadata,omitempty"` // Name Name of the role. Name string `json:"name"` - // RunAs A list of users that the API keys can impersonate. *Note*: in Serverless, the - // run-as feature is disabled. For API compatibility, you can still specify an - // empty `run_as` field, but a non-empty list will be rejected. + // RemoteCluster A list of cluster permissions for remote clusters. + // NOTE: This is limited a subset of the cluster permissions. + RemoteCluster []RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + // RemoteIndices A list of indices permissions for remote clusters. + RemoteIndices []RemoteIndicesPrivileges `json:"remote_indices,omitempty"` + // Restriction Restriction for when the role descriptor is allowed to be effective. + Restriction *Restriction `json:"restriction,omitempty"` + // RunAs A list of users that the API keys can impersonate. + // NOTE: In Elastic Cloud Serverless, the run-as feature is disabled. + // For API compatibility, you can still specify an empty `run_as` field, but a + // non-empty list will be rejected. RunAs []string `json:"run_as,omitempty"` Sort_ []FieldValue `json:"_sort,omitempty"` TransientMetadata map[string]json.RawMessage `json:"transient_metadata,omitempty"` @@ -136,6 +144,21 @@ func (s *QueryRole) UnmarshalJSON(data []byte) error { } s.Name = o + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + + case "remote_indices": + if err := dec.Decode(&s.RemoteIndices); err != nil { + return fmt.Errorf("%s | %w", "RemoteIndices", err) + } + + case "restriction": + if err := dec.Decode(&s.Restriction); err != nil { + return fmt.Errorf("%s | %w", "Restriction", err) + } + case "run_as": if err := dec.Decode(&s.RunAs); err != nil { return fmt.Errorf("%s | %w", "RunAs", err) @@ -162,8 +185,10 @@ func (s *QueryRole) UnmarshalJSON(data []byte) error { // NewQueryRole returns a QueryRole. func NewQueryRole() *QueryRole { r := &QueryRole{ - TransientMetadata: make(map[string]json.RawMessage, 0), + TransientMetadata: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/queryrule.go b/typedapi/types/queryrule.go index e7cd93931d..4c8aef0d42 100644 --- a/typedapi/types/queryrule.go +++ b/typedapi/types/queryrule.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,13 +33,23 @@ import ( // QueryRule type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/_types/QueryRuleset.ts#L36-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/_types/QueryRuleset.ts#L36-L58 type QueryRule struct { - Actions QueryRuleActions `json:"actions"` - Criteria []QueryRuleCriteria `json:"criteria"` - Priority *int `json:"priority,omitempty"` - RuleId string `json:"rule_id"` - Type queryruletype.QueryRuleType `json:"type"` + // Actions The actions to take when the rule is matched. + // The format of this action depends on the rule type. + Actions QueryRuleActions `json:"actions"` + // Criteria The criteria that must be met for the rule to be applied. + // If multiple criteria are specified for a rule, all criteria must be met for + // the rule to be applied. + Criteria []QueryRuleCriteria `json:"criteria"` + Priority *int `json:"priority,omitempty"` + // RuleId A unique identifier for the rule. + RuleId string `json:"rule_id"` + // Type The type of rule. + // `pinned` will identify and pin specific documents to the top of search + // results. + // `exclude` will exclude specific documents from search results. + Type queryruletype.QueryRuleType `json:"type"` } func (s *QueryRule) UnmarshalJSON(data []byte) error { @@ -115,3 +125,13 @@ func NewQueryRule() *QueryRule { return r } + +// true + +type QueryRuleVariant interface { + QueryRuleCaster() *QueryRule +} + +func (s *QueryRule) QueryRuleCaster() *QueryRule { + return s +} diff --git a/typedapi/types/queryruleactions.go b/typedapi/types/queryruleactions.go index ea3302ebbf..e8ce1f2ffa 100644 --- a/typedapi/types/queryruleactions.go +++ b/typedapi/types/queryruleactions.go @@ -16,16 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // QueryRuleActions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/_types/QueryRuleset.ts#L70-L73 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/_types/QueryRuleset.ts#L110-L126 type QueryRuleActions struct { + // Docs The documents to apply the rule to. + // Only one of `ids` or `docs` may be specified and at least one must be + // specified. + // There is a maximum value of 100 documents in a rule. + // You can specify the following attributes for each document: + // + // * `_index`: The index of the document to pin. + // * `_id`: The unique document ID. Docs []PinnedDoc `json:"docs,omitempty"` - Ids []string `json:"ids,omitempty"` + // Ids The unique document IDs of the documents to apply the rule to. + // Only one of `ids` or `docs` may be specified and at least one must be + // specified. + Ids []string `json:"ids,omitempty"` } // NewQueryRuleActions returns a QueryRuleActions. @@ -34,3 +45,13 @@ func NewQueryRuleActions() *QueryRuleActions { return r } + +// true + +type QueryRuleActionsVariant interface { + QueryRuleActionsCaster() *QueryRuleActions +} + +func (s *QueryRuleActions) QueryRuleActionsCaster() *QueryRuleActions { + return s +} diff --git a/typedapi/types/queryrulecriteria.go b/typedapi/types/queryrulecriteria.go index a862bf7c94..30e02843ad 100644 --- a/typedapi/types/queryrulecriteria.go +++ b/typedapi/types/queryrulecriteria.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,11 +33,40 @@ import ( // QueryRuleCriteria type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/_types/QueryRuleset.ts#L49-L53 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/_types/QueryRuleset.ts#L65-L93 type QueryRuleCriteria struct { - Metadata *string `json:"metadata,omitempty"` - Type queryrulecriteriatype.QueryRuleCriteriaType `json:"type"` - Values []json.RawMessage `json:"values,omitempty"` + // Metadata The metadata field to match against. + // This metadata will be used to match against `match_criteria` sent in the + // rule. + // It is required for all criteria types except `always`. + Metadata *string `json:"metadata,omitempty"` + // Type The type of criteria. The following criteria types are supported: + // + // * `always`: Matches all queries, regardless of input. + // * `contains`: Matches that contain this value anywhere in the field meet the + // criteria defined by the rule. Only applicable for string values. + // * `exact`: Only exact matches meet the criteria defined by the rule. + // Applicable for string or numerical values. + // * `fuzzy`: Exact matches or matches within the allowed Levenshtein Edit + // Distance meet the criteria defined by the rule. Only applicable for string + // values. + // * `gt`: Matches with a value greater than this value meet the criteria + // defined by the rule. Only applicable for numerical values. + // * `gte`: Matches with a value greater than or equal to this value meet the + // criteria defined by the rule. Only applicable for numerical values. + // * `lt`: Matches with a value less than this value meet the criteria defined + // by the rule. Only applicable for numerical values. + // * `lte`: Matches with a value less than or equal to this value meet the + // criteria defined by the rule. Only applicable for numerical values. + // * `prefix`: Matches that start with this value meet the criteria defined by + // the rule. Only applicable for string values. + // * `suffix`: Matches that end with this value meet the criteria defined by the + // rule. Only applicable for string values. + Type queryrulecriteriatype.QueryRuleCriteriaType `json:"type"` + // Values The values to match against the `metadata` field. + // Only one value must match for the criteria to be met. + // It is required for all criteria types except `always`. + Values []json.RawMessage `json:"values,omitempty"` } func (s *QueryRuleCriteria) UnmarshalJSON(data []byte) error { @@ -88,3 +117,13 @@ func NewQueryRuleCriteria() *QueryRuleCriteria { return r } + +// true + +type QueryRuleCriteriaVariant interface { + QueryRuleCriteriaCaster() *QueryRuleCriteria +} + +func (s *QueryRuleCriteria) QueryRuleCriteriaCaster() *QueryRuleCriteria { + return s +} diff --git a/typedapi/types/queryrulesetlistitem.go b/typedapi/types/queryrulesetlistitem.go index c00261e5dd..ef60ca75fa 100644 --- a/typedapi/types/queryrulesetlistitem.go +++ b/typedapi/types/queryrulesetlistitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,13 +31,20 @@ import ( // QueryRulesetListItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/list_rulesets/types.ts#L23-L37 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/list_rulesets/types.ts#L23-L44 type QueryRulesetListItem struct { - // RuleCriteriaTypesCounts A map of criteria type to the number of rules of that type + // RuleCriteriaTypesCounts A map of criteria type (for example, `exact`) to the number of rules of that + // type. + // + // NOTE: The counts in `rule_criteria_types_counts` may be larger than the value + // of `rule_total_count` because a rule may have multiple criteria. RuleCriteriaTypesCounts map[string]int `json:"rule_criteria_types_counts"` - // RuleTotalCount The number of rules associated with this ruleset + // RuleTotalCount The number of rules associated with the ruleset. RuleTotalCount int `json:"rule_total_count"` - // RulesetId Ruleset unique identifier + // RuleTypeCounts A map of rule type (for example, `pinned`) to the number of rules of that + // type. + RuleTypeCounts map[string]int `json:"rule_type_counts"` + // RulesetId A unique identifier for the ruleset. RulesetId string `json:"ruleset_id"` } @@ -80,6 +87,14 @@ func (s *QueryRulesetListItem) UnmarshalJSON(data []byte) error { s.RuleTotalCount = f } + case "rule_type_counts": + if s.RuleTypeCounts == nil { + s.RuleTypeCounts = make(map[string]int, 0) + } + if err := dec.Decode(&s.RuleTypeCounts); err != nil { + return fmt.Errorf("%s | %w", "RuleTypeCounts", err) + } + case "ruleset_id": if err := dec.Decode(&s.RulesetId); err != nil { return fmt.Errorf("%s | %w", "RulesetId", err) @@ -93,8 +108,11 @@ func (s *QueryRulesetListItem) UnmarshalJSON(data []byte) error { // NewQueryRulesetListItem returns a QueryRulesetListItem. func NewQueryRulesetListItem() *QueryRulesetListItem { r := &QueryRulesetListItem{ - RuleCriteriaTypesCounts: make(map[string]int, 0), + RuleCriteriaTypesCounts: make(map[string]int), + RuleTypeCounts: make(map[string]int), } return r } + +// false diff --git a/typedapi/types/queryrulesetmatchedrule.go b/typedapi/types/queryrulesetmatchedrule.go index 8e22aa4658..32c8d6d875 100644 --- a/typedapi/types/queryrulesetmatchedrule.go +++ b/typedapi/types/queryrulesetmatchedrule.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // QueryRulesetMatchedRule type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/test/QueryRulesetTestResponse.ts#L30-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/query_rules/test/QueryRulesetTestResponse.ts#L30-L39 type QueryRulesetMatchedRule struct { // RuleId Rule unique identifier within that ruleset RuleId string `json:"rule_id"` @@ -74,3 +74,5 @@ func NewQueryRulesetMatchedRule() *QueryRulesetMatchedRule { return r } + +// false diff --git a/typedapi/types/querystringquery.go b/typedapi/types/querystringquery.go index 10af36164e..06ff921c0c 100644 --- a/typedapi/types/querystringquery.go +++ b/typedapi/types/querystringquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // QueryStringQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L598-L721 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L598-L721 type QueryStringQuery struct { // AllowLeadingWildcard If `true`, the wildcard characters `*` and `?` are allowed as the first // character of the query string. @@ -433,3 +433,13 @@ func NewQueryStringQuery() *QueryStringQuery { return r } + +// true + +type QueryStringQueryVariant interface { + QueryStringQueryCaster() *QueryStringQuery +} + +func (s *QueryStringQuery) QueryStringQueryCaster() *QueryStringQuery { + return s +} diff --git a/typedapi/types/queryuser.go b/typedapi/types/queryuser.go index c52bf35caa..e4ee38b661 100644 --- a/typedapi/types/queryuser.go +++ b/typedapi/types/queryuser.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // QueryUser type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/query_user/types.ts#L103-L105 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/query_user/types.ts#L103-L105 type QueryUser struct { Email *string `json:"email,omitempty"` Enabled bool `json:"enabled"` @@ -125,3 +125,5 @@ func NewQueryUser() *QueryUser { return r } + +// false diff --git a/typedapi/types/queryvectorbuilder.go b/typedapi/types/queryvectorbuilder.go index 676ba4d215..59b08823ed 100644 --- a/typedapi/types/queryvectorbuilder.go +++ b/typedapi/types/queryvectorbuilder.go @@ -16,20 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // QueryVectorBuilder type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Knn.ts#L74-L77 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Knn.ts#L89-L92 type QueryVectorBuilder struct { - TextEmbedding *TextEmbedding `json:"text_embedding,omitempty"` + AdditionalQueryVectorBuilderProperty map[string]json.RawMessage `json:"-"` + TextEmbedding *TextEmbedding `json:"text_embedding,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s QueryVectorBuilder) MarshalJSON() ([]byte, error) { + type opt QueryVectorBuilder + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalQueryVectorBuilderProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalQueryVectorBuilderProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewQueryVectorBuilder returns a QueryVectorBuilder. func NewQueryVectorBuilder() *QueryVectorBuilder { - r := &QueryVectorBuilder{} + r := &QueryVectorBuilder{ + AdditionalQueryVectorBuilderProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type QueryVectorBuilderVariant interface { + QueryVectorBuilderCaster() *QueryVectorBuilder +} + +func (s *QueryVectorBuilder) QueryVectorBuilderCaster() *QueryVectorBuilder { + return s +} diff --git a/typedapi/types/querywatch.go b/typedapi/types/querywatch.go index a5c1e1566d..7a3bfb0027 100644 --- a/typedapi/types/querywatch.go +++ b/typedapi/types/querywatch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // QueryWatch type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Watch.ts#L58-L64 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Watch.ts#L58-L64 type QueryWatch struct { Id_ string `json:"_id"` PrimaryTerm_ *int `json:"_primary_term,omitempty"` @@ -102,3 +102,5 @@ func NewQueryWatch() *QueryWatch { return r } + +// false diff --git a/typedapi/types/questionansweringinferenceoptions.go b/typedapi/types/questionansweringinferenceoptions.go index f14b9516b9..1b846fd16a 100644 --- a/typedapi/types/questionansweringinferenceoptions.go +++ b/typedapi/types/questionansweringinferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // QuestionAnsweringInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L282-L292 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L270-L280 type QuestionAnsweringInferenceOptions struct { // MaxAnswerLength The maximum answer length to consider MaxAnswerLength *int `json:"max_answer_length,omitempty"` @@ -119,3 +119,13 @@ func NewQuestionAnsweringInferenceOptions() *QuestionAnsweringInferenceOptions { return r } + +// true + +type QuestionAnsweringInferenceOptionsVariant interface { + QuestionAnsweringInferenceOptionsCaster() *QuestionAnsweringInferenceOptions +} + +func (s *QuestionAnsweringInferenceOptions) QuestionAnsweringInferenceOptionsCaster() *QuestionAnsweringInferenceOptions { + return s +} diff --git a/typedapi/types/questionansweringinferenceupdateoptions.go b/typedapi/types/questionansweringinferenceupdateoptions.go index 578b901b6d..99a5c007c2 100644 --- a/typedapi/types/questionansweringinferenceupdateoptions.go +++ b/typedapi/types/questionansweringinferenceupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // QuestionAnsweringInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L420-L431 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L408-L419 type QuestionAnsweringInferenceUpdateOptions struct { // MaxAnswerLength The maximum answer length to consider for extraction MaxAnswerLength *int `json:"max_answer_length,omitempty"` @@ -133,3 +133,13 @@ func NewQuestionAnsweringInferenceUpdateOptions() *QuestionAnsweringInferenceUpd return r } + +// true + +type QuestionAnsweringInferenceUpdateOptionsVariant interface { + QuestionAnsweringInferenceUpdateOptionsCaster() *QuestionAnsweringInferenceUpdateOptions +} + +func (s *QuestionAnsweringInferenceUpdateOptions) QuestionAnsweringInferenceUpdateOptionsCaster() *QuestionAnsweringInferenceUpdateOptions { + return s +} diff --git a/typedapi/types/randomsampleraggregation.go b/typedapi/types/randomsampleraggregation.go index 1e07db6c19..1787d0fe43 100644 --- a/typedapi/types/randomsampleraggregation.go +++ b/typedapi/types/randomsampleraggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RandomSamplerAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L749-L769 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L749-L769 type RandomSamplerAggregation struct { // Probability The probability that a document will be included in the aggregated data. // Must be greater than 0, less than 0.5, or exactly 1. @@ -120,3 +120,13 @@ func NewRandomSamplerAggregation() *RandomSamplerAggregation { return r } + +// true + +type RandomSamplerAggregationVariant interface { + RandomSamplerAggregationCaster() *RandomSamplerAggregation +} + +func (s *RandomSamplerAggregation) RandomSamplerAggregationCaster() *RandomSamplerAggregation { + return s +} diff --git a/typedapi/types/randomscorefunction.go b/typedapi/types/randomscorefunction.go index ad121fffaf..7c85185eaf 100644 --- a/typedapi/types/randomscorefunction.go +++ b/typedapi/types/randomscorefunction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RandomScoreFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L144-L147 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L144-L147 type RandomScoreFunction struct { Field *string `json:"field,omitempty"` Seed string `json:"seed,omitempty"` @@ -80,3 +80,13 @@ func NewRandomScoreFunction() *RandomScoreFunction { return r } + +// true + +type RandomScoreFunctionVariant interface { + RandomScoreFunctionCaster() *RandomScoreFunction +} + +func (s *RandomScoreFunction) RandomScoreFunctionCaster() *RandomScoreFunction { + return s +} diff --git a/typedapi/types/rangeaggregate.go b/typedapi/types/rangeaggregate.go index 7b016d9c37..b8f44b3b27 100644 --- a/typedapi/types/rangeaggregate.go +++ b/typedapi/types/rangeaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // RangeAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L594-L598 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L594-L598 type RangeAggregate struct { Buckets BucketsRangeBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewRangeAggregate() *RangeAggregate { return r } + +// false diff --git a/typedapi/types/rangeaggregation.go b/typedapi/types/rangeaggregation.go index 20f5ae9ed2..c7420d20e8 100644 --- a/typedapi/types/rangeaggregation.go +++ b/typedapi/types/rangeaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RangeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L669-L689 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L669-L689 type RangeAggregation struct { // Field The date field whose values are use to build ranges. Field *string `json:"field,omitempty"` @@ -130,3 +130,13 @@ func NewRangeAggregation() *RangeAggregation { return r } + +// true + +type RangeAggregationVariant interface { + RangeAggregationCaster() *RangeAggregation +} + +func (s *RangeAggregation) RangeAggregationCaster() *RangeAggregation { + return s +} diff --git a/typedapi/types/rangebucket.go b/typedapi/types/rangebucket.go index 43888478a2..aaeed8a1c8 100644 --- a/typedapi/types/rangebucket.go +++ b/typedapi/types/rangebucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // RangeBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L600-L607 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L600-L607 type RangeBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -699,8 +699,10 @@ func (s RangeBucket) MarshalJSON() ([]byte, error) { // NewRangeBucket returns a RangeBucket. func NewRangeBucket() *RangeBucket { r := &RangeBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/rangequery.go b/typedapi/types/rangequery.go index 957f9134dc..f9b219094b 100644 --- a/typedapi/types/rangequery.go +++ b/typedapi/types/rangequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -27,5 +27,9 @@ package types // NumberRangeQuery // TermRangeQuery // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L176-L186 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L176-L186 type RangeQuery any + +type RangeQueryVariant interface { + RangeQueryCaster() *RangeQuery +} diff --git a/typedapi/types/rangequerybasedatemath.go b/typedapi/types/rangequerybasedatemath.go deleted file mode 100644 index e339b35ad7..0000000000 --- a/typedapi/types/rangequerybasedatemath.go +++ /dev/null @@ -1,147 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation" -) - -// RangeQueryBaseDateMath type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L122-L148 -type RangeQueryBaseDateMath struct { - // Boost Floating point number used to decrease or increase the relevance scores of - // the query. - // Boost values are relative to the default value of 1.0. - // A boost value between 0 and 1.0 decreases the relevance score. - // A value greater than 1.0 increases the relevance score. - Boost *float32 `json:"boost,omitempty"` - From *string `json:"from,omitempty"` - // Gt Greater than. - Gt *string `json:"gt,omitempty"` - // Gte Greater than or equal to. - Gte *string `json:"gte,omitempty"` - // Lt Less than. - Lt *string `json:"lt,omitempty"` - // Lte Less than or equal to. - Lte *string `json:"lte,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - // Relation Indicates how the range query matches values for `range` fields. - Relation *rangerelation.RangeRelation `json:"relation,omitempty"` - To *string `json:"to,omitempty"` -} - -func (s *RangeQueryBaseDateMath) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "boost": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 32) - if err != nil { - return fmt.Errorf("%s | %w", "Boost", err) - } - f := float32(value) - s.Boost = &f - case float64: - f := float32(v) - s.Boost = &f - } - - case "from": - if err := dec.Decode(&s.From); err != nil { - return fmt.Errorf("%s | %w", "From", err) - } - - case "gt": - if err := dec.Decode(&s.Gt); err != nil { - return fmt.Errorf("%s | %w", "Gt", err) - } - - case "gte": - if err := dec.Decode(&s.Gte); err != nil { - return fmt.Errorf("%s | %w", "Gte", err) - } - - case "lt": - if err := dec.Decode(&s.Lt); err != nil { - return fmt.Errorf("%s | %w", "Lt", err) - } - - case "lte": - if err := dec.Decode(&s.Lte); err != nil { - return fmt.Errorf("%s | %w", "Lte", err) - } - - case "_name": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "QueryName_", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.QueryName_ = &o - - case "relation": - if err := dec.Decode(&s.Relation); err != nil { - return fmt.Errorf("%s | %w", "Relation", err) - } - - case "to": - if err := dec.Decode(&s.To); err != nil { - return fmt.Errorf("%s | %w", "To", err) - } - - } - } - return nil -} - -// NewRangeQueryBaseDateMath returns a RangeQueryBaseDateMath. -func NewRangeQueryBaseDateMath() *RangeQueryBaseDateMath { - r := &RangeQueryBaseDateMath{} - - return r -} diff --git a/typedapi/types/rangequerybasedouble.go b/typedapi/types/rangequerybasedouble.go deleted file mode 100644 index d25979da34..0000000000 --- a/typedapi/types/rangequerybasedouble.go +++ /dev/null @@ -1,191 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation" -) - -// RangeQueryBasedouble type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L122-L148 -type RangeQueryBasedouble struct { - // Boost Floating point number used to decrease or increase the relevance scores of - // the query. - // Boost values are relative to the default value of 1.0. - // A boost value between 0 and 1.0 decreases the relevance score. - // A value greater than 1.0 increases the relevance score. - Boost *float32 `json:"boost,omitempty"` - From *Float64 `json:"from,omitempty"` - // Gt Greater than. - Gt *Float64 `json:"gt,omitempty"` - // Gte Greater than or equal to. - Gte *Float64 `json:"gte,omitempty"` - // Lt Less than. - Lt *Float64 `json:"lt,omitempty"` - // Lte Less than or equal to. - Lte *Float64 `json:"lte,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - // Relation Indicates how the range query matches values for `range` fields. - Relation *rangerelation.RangeRelation `json:"relation,omitempty"` - To *Float64 `json:"to,omitempty"` -} - -func (s *RangeQueryBasedouble) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "boost": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 32) - if err != nil { - return fmt.Errorf("%s | %w", "Boost", err) - } - f := float32(value) - s.Boost = &f - case float64: - f := float32(v) - s.Boost = &f - } - - case "from": - if err := dec.Decode(&s.From); err != nil { - return fmt.Errorf("%s | %w", "From", err) - } - - case "gt": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Gt", err) - } - f := Float64(value) - s.Gt = &f - case float64: - f := Float64(v) - s.Gt = &f - } - - case "gte": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Gte", err) - } - f := Float64(value) - s.Gte = &f - case float64: - f := Float64(v) - s.Gte = &f - } - - case "lt": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Lt", err) - } - f := Float64(value) - s.Lt = &f - case float64: - f := Float64(v) - s.Lt = &f - } - - case "lte": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Lte", err) - } - f := Float64(value) - s.Lte = &f - case float64: - f := Float64(v) - s.Lte = &f - } - - case "_name": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "QueryName_", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.QueryName_ = &o - - case "relation": - if err := dec.Decode(&s.Relation); err != nil { - return fmt.Errorf("%s | %w", "Relation", err) - } - - case "to": - if err := dec.Decode(&s.To); err != nil { - return fmt.Errorf("%s | %w", "To", err) - } - - } - } - return nil -} - -// NewRangeQueryBasedouble returns a RangeQueryBasedouble. -func NewRangeQueryBasedouble() *RangeQueryBasedouble { - r := &RangeQueryBasedouble{} - - return r -} diff --git a/typedapi/types/rangequerybasestring.go b/typedapi/types/rangequerybasestring.go deleted file mode 100644 index 672542fcec..0000000000 --- a/typedapi/types/rangequerybasestring.go +++ /dev/null @@ -1,189 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation" -) - -// RangeQueryBasestring type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L122-L148 -type RangeQueryBasestring struct { - // Boost Floating point number used to decrease or increase the relevance scores of - // the query. - // Boost values are relative to the default value of 1.0. - // A boost value between 0 and 1.0 decreases the relevance score. - // A value greater than 1.0 increases the relevance score. - Boost *float32 `json:"boost,omitempty"` - From *string `json:"from,omitempty"` - // Gt Greater than. - Gt *string `json:"gt,omitempty"` - // Gte Greater than or equal to. - Gte *string `json:"gte,omitempty"` - // Lt Less than. - Lt *string `json:"lt,omitempty"` - // Lte Less than or equal to. - Lte *string `json:"lte,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - // Relation Indicates how the range query matches values for `range` fields. - Relation *rangerelation.RangeRelation `json:"relation,omitempty"` - To *string `json:"to,omitempty"` -} - -func (s *RangeQueryBasestring) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "boost": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 32) - if err != nil { - return fmt.Errorf("%s | %w", "Boost", err) - } - f := float32(value) - s.Boost = &f - case float64: - f := float32(v) - s.Boost = &f - } - - case "from": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "From", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.From = &o - - case "gt": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Gt", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Gt = &o - - case "gte": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Gte", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Gte = &o - - case "lt": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Lt", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Lt = &o - - case "lte": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Lte", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Lte = &o - - case "_name": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "QueryName_", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.QueryName_ = &o - - case "relation": - if err := dec.Decode(&s.Relation); err != nil { - return fmt.Errorf("%s | %w", "Relation", err) - } - - case "to": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "To", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.To = &o - - } - } - return nil -} - -// NewRangeQueryBasestring returns a RangeQueryBasestring. -func NewRangeQueryBasestring() *RangeQueryBasestring { - r := &RangeQueryBasestring{} - - return r -} diff --git a/typedapi/types/rankcontainer.go b/typedapi/types/rankcontainer.go index 56ea3ead91..ca4766c08c 100644 --- a/typedapi/types/rankcontainer.go +++ b/typedapi/types/rankcontainer.go @@ -16,21 +16,68 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // RankContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Rank.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Rank.ts#L22-L28 type RankContainer struct { + AdditionalRankContainerProperty map[string]json.RawMessage `json:"-"` // Rrf The reciprocal rank fusion parameters Rrf *RrfRank `json:"rrf,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s RankContainer) MarshalJSON() ([]byte, error) { + type opt RankContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRankContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRankContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewRankContainer returns a RankContainer. func NewRankContainer() *RankContainer { - r := &RankContainer{} + r := &RankContainer{ + AdditionalRankContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type RankContainerVariant interface { + RankContainerCaster() *RankContainer +} + +func (s *RankContainer) RankContainerCaster() *RankContainer { + return s +} diff --git a/typedapi/types/rankeddocument.go b/typedapi/types/rankeddocument.go index 14a82f20f0..ef91668860 100644 --- a/typedapi/types/rankeddocument.go +++ b/typedapi/types/rankeddocument.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,11 +31,11 @@ import ( // RankedDocument type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/_types/Results.ts#L67-L77 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/_types/Results.ts#L91-L101 type RankedDocument struct { - Index int `json:"index"` - Score float32 `json:"score"` - Text *string `json:"text,omitempty"` + Index int `json:"index"` + RelevanceScore float32 `json:"relevance_score"` + Text *string `json:"text,omitempty"` } func (s *RankedDocument) UnmarshalJSON(data []byte) error { @@ -69,20 +69,20 @@ func (s *RankedDocument) UnmarshalJSON(data []byte) error { s.Index = f } - case "score": + case "relevance_score": var tmp any dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseFloat(v, 32) if err != nil { - return fmt.Errorf("%s | %w", "Score", err) + return fmt.Errorf("%s | %w", "RelevanceScore", err) } f := float32(value) - s.Score = f + s.RelevanceScore = f case float64: f := float32(v) - s.Score = f + s.RelevanceScore = f } case "text": @@ -108,3 +108,5 @@ func NewRankedDocument() *RankedDocument { return r } + +// false diff --git a/typedapi/types/rankevalhit.go b/typedapi/types/rankevalhit.go index 372f186db9..d52e11d0d1 100644 --- a/typedapi/types/rankevalhit.go +++ b/typedapi/types/rankevalhit.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RankEvalHit type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/rank_eval/types.ts#L144-L148 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/rank_eval/types.ts#L144-L148 type RankEvalHit struct { Id_ string `json:"_id"` Index_ string `json:"_index"` @@ -90,3 +90,5 @@ func NewRankEvalHit() *RankEvalHit { return r } + +// false diff --git a/typedapi/types/rankevalhititem.go b/typedapi/types/rankevalhititem.go index 76160a258a..c65a1e3fbc 100644 --- a/typedapi/types/rankevalhititem.go +++ b/typedapi/types/rankevalhititem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // RankEvalHitItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/rank_eval/types.ts#L139-L142 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/rank_eval/types.ts#L139-L142 type RankEvalHitItem struct { Hit RankEvalHit `json:"hit"` Rating *Float64 `json:"rating,omitempty"` @@ -72,3 +72,5 @@ func NewRankEvalHitItem() *RankEvalHitItem { return r } + +// false diff --git a/typedapi/types/rankevalmetric.go b/typedapi/types/rankevalmetric.go index c3ca65b5e0..dba5e1e9f9 100644 --- a/typedapi/types/rankevalmetric.go +++ b/typedapi/types/rankevalmetric.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // RankEvalMetric type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/rank_eval/types.ts#L90-L96 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/rank_eval/types.ts#L90-L96 type RankEvalMetric struct { Dcg *RankEvalMetricDiscountedCumulativeGain `json:"dcg,omitempty"` ExpectedReciprocalRank *RankEvalMetricExpectedReciprocalRank `json:"expected_reciprocal_rank,omitempty"` @@ -37,3 +37,13 @@ func NewRankEvalMetric() *RankEvalMetric { return r } + +// true + +type RankEvalMetricVariant interface { + RankEvalMetricCaster() *RankEvalMetric +} + +func (s *RankEvalMetric) RankEvalMetricCaster() *RankEvalMetric { + return s +} diff --git a/typedapi/types/rankevalmetricdetail.go b/typedapi/types/rankevalmetricdetail.go index 0d4c66c322..a4f8f585b1 100644 --- a/typedapi/types/rankevalmetricdetail.go +++ b/typedapi/types/rankevalmetricdetail.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RankEvalMetricDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/rank_eval/types.ts#L128-L137 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/rank_eval/types.ts#L128-L137 type RankEvalMetricDetail struct { // Hits The hits section shows a grouping of the search results with their supplied // ratings @@ -106,8 +106,10 @@ func (s *RankEvalMetricDetail) UnmarshalJSON(data []byte) error { // NewRankEvalMetricDetail returns a RankEvalMetricDetail. func NewRankEvalMetricDetail() *RankEvalMetricDetail { r := &RankEvalMetricDetail{ - MetricDetails: make(map[string]map[string]json.RawMessage, 0), + MetricDetails: make(map[string]map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/rankevalmetricdiscountedcumulativegain.go b/typedapi/types/rankevalmetricdiscountedcumulativegain.go index 48c4c1d7e7..3eb88e0e62 100644 --- a/typedapi/types/rankevalmetricdiscountedcumulativegain.go +++ b/typedapi/types/rankevalmetricdiscountedcumulativegain.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RankEvalMetricDiscountedCumulativeGain type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/rank_eval/types.ts#L66-L77 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/rank_eval/types.ts#L66-L77 type RankEvalMetricDiscountedCumulativeGain struct { // K Sets the maximum number of documents retrieved per query. This value will act // in place of the usual size parameter in the query. @@ -96,3 +96,13 @@ func NewRankEvalMetricDiscountedCumulativeGain() *RankEvalMetricDiscountedCumula return r } + +// true + +type RankEvalMetricDiscountedCumulativeGainVariant interface { + RankEvalMetricDiscountedCumulativeGainCaster() *RankEvalMetricDiscountedCumulativeGain +} + +func (s *RankEvalMetricDiscountedCumulativeGain) RankEvalMetricDiscountedCumulativeGainCaster() *RankEvalMetricDiscountedCumulativeGain { + return s +} diff --git a/typedapi/types/rankevalmetricexpectedreciprocalrank.go b/typedapi/types/rankevalmetricexpectedreciprocalrank.go index 77edd284fa..0e159f7dc4 100644 --- a/typedapi/types/rankevalmetricexpectedreciprocalrank.go +++ b/typedapi/types/rankevalmetricexpectedreciprocalrank.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RankEvalMetricExpectedReciprocalRank type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/rank_eval/types.ts#L79-L88 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/rank_eval/types.ts#L79-L88 type RankEvalMetricExpectedReciprocalRank struct { // K Sets the maximum number of documents retrieved per query. This value will act // in place of the usual size parameter in the query. @@ -98,3 +98,13 @@ func NewRankEvalMetricExpectedReciprocalRank() *RankEvalMetricExpectedReciprocal return r } + +// true + +type RankEvalMetricExpectedReciprocalRankVariant interface { + RankEvalMetricExpectedReciprocalRankCaster() *RankEvalMetricExpectedReciprocalRank +} + +func (s *RankEvalMetricExpectedReciprocalRank) RankEvalMetricExpectedReciprocalRankCaster() *RankEvalMetricExpectedReciprocalRank { + return s +} diff --git a/typedapi/types/rankevalmetricmeanreciprocalrank.go b/typedapi/types/rankevalmetricmeanreciprocalrank.go index cae15280bc..394c4ccb35 100644 --- a/typedapi/types/rankevalmetricmeanreciprocalrank.go +++ b/typedapi/types/rankevalmetricmeanreciprocalrank.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RankEvalMetricMeanReciprocalRank type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/rank_eval/types.ts#L60-L64 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/rank_eval/types.ts#L60-L64 type RankEvalMetricMeanReciprocalRank struct { // K Sets the maximum number of documents retrieved per query. This value will act // in place of the usual size parameter in the query. @@ -99,3 +99,13 @@ func NewRankEvalMetricMeanReciprocalRank() *RankEvalMetricMeanReciprocalRank { return r } + +// true + +type RankEvalMetricMeanReciprocalRankVariant interface { + RankEvalMetricMeanReciprocalRankCaster() *RankEvalMetricMeanReciprocalRank +} + +func (s *RankEvalMetricMeanReciprocalRank) RankEvalMetricMeanReciprocalRankCaster() *RankEvalMetricMeanReciprocalRank { + return s +} diff --git a/typedapi/types/rankevalmetricprecision.go b/typedapi/types/rankevalmetricprecision.go index 7c7400e675..cbd1bdb29b 100644 --- a/typedapi/types/rankevalmetricprecision.go +++ b/typedapi/types/rankevalmetricprecision.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RankEvalMetricPrecision type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/rank_eval/types.ts#L42-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/rank_eval/types.ts#L42-L52 type RankEvalMetricPrecision struct { // IgnoreUnlabeled Controls how unlabeled documents in the search results are counted. If set to // true, unlabeled documents are ignored and neither count as relevant or @@ -117,3 +117,13 @@ func NewRankEvalMetricPrecision() *RankEvalMetricPrecision { return r } + +// true + +type RankEvalMetricPrecisionVariant interface { + RankEvalMetricPrecisionCaster() *RankEvalMetricPrecision +} + +func (s *RankEvalMetricPrecision) RankEvalMetricPrecisionCaster() *RankEvalMetricPrecision { + return s +} diff --git a/typedapi/types/rankevalmetricrecall.go b/typedapi/types/rankevalmetricrecall.go index 770572958a..3344112c88 100644 --- a/typedapi/types/rankevalmetricrecall.go +++ b/typedapi/types/rankevalmetricrecall.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RankEvalMetricRecall type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/rank_eval/types.ts#L54-L58 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/rank_eval/types.ts#L54-L58 type RankEvalMetricRecall struct { // K Sets the maximum number of documents retrieved per query. This value will act // in place of the usual size parameter in the query. @@ -99,3 +99,13 @@ func NewRankEvalMetricRecall() *RankEvalMetricRecall { return r } + +// true + +type RankEvalMetricRecallVariant interface { + RankEvalMetricRecallCaster() *RankEvalMetricRecall +} + +func (s *RankEvalMetricRecall) RankEvalMetricRecallCaster() *RankEvalMetricRecall { + return s +} diff --git a/typedapi/types/rankevalquery.go b/typedapi/types/rankevalquery.go index 8e17c7c0a6..300578d21e 100644 --- a/typedapi/types/rankevalquery.go +++ b/typedapi/types/rankevalquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RankEvalQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/rank_eval/types.ts#L111-L117 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/rank_eval/types.ts#L111-L117 type RankEvalQuery struct { Query Query `json:"query"` Size *int `json:"size,omitempty"` @@ -89,3 +89,13 @@ func NewRankEvalQuery() *RankEvalQuery { return r } + +// true + +type RankEvalQueryVariant interface { + RankEvalQueryCaster() *RankEvalQuery +} + +func (s *RankEvalQuery) RankEvalQueryCaster() *RankEvalQuery { + return s +} diff --git a/typedapi/types/rankevalrequestitem.go b/typedapi/types/rankevalrequestitem.go index abfb102c52..385e27fa8e 100644 --- a/typedapi/types/rankevalrequestitem.go +++ b/typedapi/types/rankevalrequestitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // RankEvalRequestItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/rank_eval/types.ts#L98-L109 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/rank_eval/types.ts#L98-L109 type RankEvalRequestItem struct { // Id The search request’s ID, used to group result details later. Id string `json:"id"` @@ -95,8 +95,18 @@ func (s *RankEvalRequestItem) UnmarshalJSON(data []byte) error { // NewRankEvalRequestItem returns a RankEvalRequestItem. func NewRankEvalRequestItem() *RankEvalRequestItem { r := &RankEvalRequestItem{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type RankEvalRequestItemVariant interface { + RankEvalRequestItemCaster() *RankEvalRequestItem +} + +func (s *RankEvalRequestItem) RankEvalRequestItemCaster() *RankEvalRequestItem { + return s +} diff --git a/typedapi/types/rankfeaturefunctionlinear.go b/typedapi/types/rankfeaturefunctionlinear.go index a22250df41..339bc89f48 100644 --- a/typedapi/types/rankfeaturefunctionlinear.go +++ b/typedapi/types/rankfeaturefunctionlinear.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // RankFeatureFunctionLinear type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L282-L282 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L282-L282 type RankFeatureFunctionLinear struct { } @@ -32,3 +32,13 @@ func NewRankFeatureFunctionLinear() *RankFeatureFunctionLinear { return r } + +// true + +type RankFeatureFunctionLinearVariant interface { + RankFeatureFunctionLinearCaster() *RankFeatureFunctionLinear +} + +func (s *RankFeatureFunctionLinear) RankFeatureFunctionLinearCaster() *RankFeatureFunctionLinear { + return s +} diff --git a/typedapi/types/rankfeaturefunctionlogarithm.go b/typedapi/types/rankfeaturefunctionlogarithm.go index bfa2371525..e6ac87ec52 100644 --- a/typedapi/types/rankfeaturefunctionlogarithm.go +++ b/typedapi/types/rankfeaturefunctionlogarithm.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RankFeatureFunctionLogarithm type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L284-L289 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L284-L289 type RankFeatureFunctionLogarithm struct { // ScalingFactor Configurable scaling factor. ScalingFactor float32 `json:"scaling_factor"` @@ -79,3 +79,13 @@ func NewRankFeatureFunctionLogarithm() *RankFeatureFunctionLogarithm { return r } + +// true + +type RankFeatureFunctionLogarithmVariant interface { + RankFeatureFunctionLogarithmCaster() *RankFeatureFunctionLogarithm +} + +func (s *RankFeatureFunctionLogarithm) RankFeatureFunctionLogarithmCaster() *RankFeatureFunctionLogarithm { + return s +} diff --git a/typedapi/types/rankfeaturefunctionsaturation.go b/typedapi/types/rankfeaturefunctionsaturation.go index 068eb34c04..163398c1a9 100644 --- a/typedapi/types/rankfeaturefunctionsaturation.go +++ b/typedapi/types/rankfeaturefunctionsaturation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RankFeatureFunctionSaturation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L291-L296 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L291-L296 type RankFeatureFunctionSaturation struct { // Pivot Configurable pivot value so that the result will be less than 0.5. Pivot *float32 `json:"pivot,omitempty"` @@ -79,3 +79,13 @@ func NewRankFeatureFunctionSaturation() *RankFeatureFunctionSaturation { return r } + +// true + +type RankFeatureFunctionSaturationVariant interface { + RankFeatureFunctionSaturationCaster() *RankFeatureFunctionSaturation +} + +func (s *RankFeatureFunctionSaturation) RankFeatureFunctionSaturationCaster() *RankFeatureFunctionSaturation { + return s +} diff --git a/typedapi/types/rankfeaturefunctionsigmoid.go b/typedapi/types/rankfeaturefunctionsigmoid.go index 253b56a585..84f3807c9c 100644 --- a/typedapi/types/rankfeaturefunctionsigmoid.go +++ b/typedapi/types/rankfeaturefunctionsigmoid.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RankFeatureFunctionSigmoid type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L298-L307 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L298-L307 type RankFeatureFunctionSigmoid struct { // Exponent Configurable Exponent. Exponent float32 `json:"exponent"` @@ -97,3 +97,13 @@ func NewRankFeatureFunctionSigmoid() *RankFeatureFunctionSigmoid { return r } + +// true + +type RankFeatureFunctionSigmoidVariant interface { + RankFeatureFunctionSigmoidCaster() *RankFeatureFunctionSigmoid +} + +func (s *RankFeatureFunctionSigmoid) RankFeatureFunctionSigmoidCaster() *RankFeatureFunctionSigmoid { + return s +} diff --git a/typedapi/types/rankfeatureproperty.go b/typedapi/types/rankfeatureproperty.go index ddbab2dd86..79c9193678 100644 --- a/typedapi/types/rankfeatureproperty.go +++ b/typedapi/types/rankfeatureproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,20 +29,22 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // RankFeatureProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L192-L195 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L196-L199 type RankFeatureProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - PositiveScoreImpact *bool `json:"positive_score_impact,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + PositiveScoreImpact *bool `json:"positive_score_impact,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *RankFeatureProperty) UnmarshalJSON(data []byte) error { @@ -84,301 +86,313 @@ func (s *RankFeatureProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -441,306 +455,323 @@ func (s *RankFeatureProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -761,6 +792,7 @@ func (s RankFeatureProperty) MarshalJSON() ([]byte, error) { Meta: s.Meta, PositiveScoreImpact: s.PositiveScoreImpact, Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, Type: s.Type, } @@ -772,10 +804,20 @@ func (s RankFeatureProperty) MarshalJSON() ([]byte, error) { // NewRankFeatureProperty returns a RankFeatureProperty. func NewRankFeatureProperty() *RankFeatureProperty { r := &RankFeatureProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type RankFeaturePropertyVariant interface { + RankFeaturePropertyCaster() *RankFeatureProperty +} + +func (s *RankFeatureProperty) RankFeaturePropertyCaster() *RankFeatureProperty { + return s +} diff --git a/typedapi/types/rankfeaturequery.go b/typedapi/types/rankfeaturequery.go index 34996d5d47..f835409d35 100644 --- a/typedapi/types/rankfeaturequery.go +++ b/typedapi/types/rankfeaturequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RankFeatureQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L309-L335 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L309-L335 type RankFeatureQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -135,3 +135,13 @@ func NewRankFeatureQuery() *RankFeatureQuery { return r } + +// true + +type RankFeatureQueryVariant interface { + RankFeatureQueryCaster() *RankFeatureQuery +} + +func (s *RankFeatureQuery) RankFeatureQueryCaster() *RankFeatureQuery { + return s +} diff --git a/typedapi/types/rankfeaturesproperty.go b/typedapi/types/rankfeaturesproperty.go index 8f70616ab3..ac2ab07673 100644 --- a/typedapi/types/rankfeaturesproperty.go +++ b/typedapi/types/rankfeaturesproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,20 +29,22 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // RankFeaturesProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L197-L200 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L201-L204 type RankFeaturesProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - PositiveScoreImpact *bool `json:"positive_score_impact,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + PositiveScoreImpact *bool `json:"positive_score_impact,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *RankFeaturesProperty) UnmarshalJSON(data []byte) error { @@ -84,301 +86,313 @@ func (s *RankFeaturesProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -441,306 +455,323 @@ func (s *RankFeaturesProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -761,6 +792,7 @@ func (s RankFeaturesProperty) MarshalJSON() ([]byte, error) { Meta: s.Meta, PositiveScoreImpact: s.PositiveScoreImpact, Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, Type: s.Type, } @@ -772,10 +804,20 @@ func (s RankFeaturesProperty) MarshalJSON() ([]byte, error) { // NewRankFeaturesProperty returns a RankFeaturesProperty. func NewRankFeaturesProperty() *RankFeaturesProperty { r := &RankFeaturesProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type RankFeaturesPropertyVariant interface { + RankFeaturesPropertyCaster() *RankFeaturesProperty +} + +func (s *RankFeaturesProperty) RankFeaturesPropertyCaster() *RankFeaturesProperty { + return s +} diff --git a/typedapi/types/raretermsaggregation.go b/typedapi/types/raretermsaggregation.go index 753a836515..320192c88b 100644 --- a/typedapi/types/raretermsaggregation.go +++ b/typedapi/types/raretermsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RareTermsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L706-L739 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L706-L739 type RareTermsAggregation struct { // Exclude Terms that should be excluded from the aggregation. Exclude []string `json:"exclude,omitempty"` @@ -181,3 +181,13 @@ func NewRareTermsAggregation() *RareTermsAggregation { return r } + +// true + +type RareTermsAggregationVariant interface { + RareTermsAggregationCaster() *RareTermsAggregation +} + +func (s *RareTermsAggregation) RareTermsAggregationCaster() *RareTermsAggregation { + return s +} diff --git a/typedapi/types/rateaggregate.go b/typedapi/types/rateaggregate.go index 9235752802..71f90eede1 100644 --- a/typedapi/types/rateaggregate.go +++ b/typedapi/types/rateaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RateAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L847-L854 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L847-L854 type RateAggregate struct { Meta Metadata `json:"meta,omitempty"` Value Float64 `json:"value"` @@ -97,3 +97,5 @@ func NewRateAggregate() *RateAggregate { return r } + +// false diff --git a/typedapi/types/rateaggregation.go b/typedapi/types/rateaggregation.go index 818f6b7779..20bdbd84bc 100644 --- a/typedapi/types/rateaggregation.go +++ b/typedapi/types/rateaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // RateAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L239-L250 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L239-L250 type RateAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -113,3 +113,13 @@ func NewRateAggregation() *RateAggregation { return r } + +// true + +type RateAggregationVariant interface { + RateAggregationCaster() *RateAggregation +} + +func (s *RateAggregation) RateAggregationCaster() *RateAggregation { + return s +} diff --git a/typedapi/types/deleteinferenceendpointresult.go b/typedapi/types/ratelimitsetting.go similarity index 53% rename from typedapi/types/deleteinferenceendpointresult.go rename to typedapi/types/ratelimitsetting.go index 0bb9907e3e..82237499b7 100644 --- a/typedapi/types/deleteinferenceendpointresult.go +++ b/typedapi/types/ratelimitsetting.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,17 +29,15 @@ import ( "strconv" ) -// DeleteInferenceEndpointResult type. +// RateLimitSetting type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/_types/Results.ts#L91-L96 -type DeleteInferenceEndpointResult struct { - // Acknowledged For a successful response, this value is always true. On failure, an - // exception is returned instead. - Acknowledged bool `json:"acknowledged"` - Pipelines []string `json:"pipelines"` +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/_types/Services.ts#L95-L100 +type RateLimitSetting struct { + // RequestsPerMinute The number of requests allowed per minute. + RequestsPerMinute *int `json:"requests_per_minute,omitempty"` } -func (s *DeleteInferenceEndpointResult) UnmarshalJSON(data []byte) error { +func (s *RateLimitSetting) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -54,23 +52,20 @@ func (s *DeleteInferenceEndpointResult) UnmarshalJSON(data []byte) error { switch t { - case "acknowledged": + case "requests_per_minute": + var tmp any dec.Decode(&tmp) switch v := tmp.(type) { case string: - value, err := strconv.ParseBool(v) + value, err := strconv.Atoi(v) if err != nil { - return fmt.Errorf("%s | %w", "Acknowledged", err) + return fmt.Errorf("%s | %w", "RequestsPerMinute", err) } - s.Acknowledged = value - case bool: - s.Acknowledged = v - } - - case "pipelines": - if err := dec.Decode(&s.Pipelines); err != nil { - return fmt.Errorf("%s | %w", "Pipelines", err) + s.RequestsPerMinute = &value + case float64: + f := int(v) + s.RequestsPerMinute = &f } } @@ -78,9 +73,19 @@ func (s *DeleteInferenceEndpointResult) UnmarshalJSON(data []byte) error { return nil } -// NewDeleteInferenceEndpointResult returns a DeleteInferenceEndpointResult. -func NewDeleteInferenceEndpointResult() *DeleteInferenceEndpointResult { - r := &DeleteInferenceEndpointResult{} +// NewRateLimitSetting returns a RateLimitSetting. +func NewRateLimitSetting() *RateLimitSetting { + r := &RateLimitSetting{} return r } + +// true + +type RateLimitSettingVariant interface { + RateLimitSettingCaster() *RateLimitSetting +} + +func (s *RateLimitSetting) RateLimitSettingCaster() *RateLimitSetting { + return s +} diff --git a/typedapi/types/readblobdetails.go b/typedapi/types/readblobdetails.go new file mode 100644 index 0000000000..b60ba4090e --- /dev/null +++ b/typedapi/types/readblobdetails.go @@ -0,0 +1,160 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ReadBlobDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L204-L248 +type ReadBlobDetails struct { + // BeforeWriteComplete Indicates whether the read operation may have started before the write + // operation was complete. + BeforeWriteComplete *bool `json:"before_write_complete,omitempty"` + // Elapsed The length of time spent reading the blob. + // If the blob was not found, this detail is omitted. + Elapsed Duration `json:"elapsed,omitempty"` + // ElapsedNanos The length of time spent reading the blob, in nanoseconds. + // If the blob was not found, this detail is omitted. + ElapsedNanos *int64 `json:"elapsed_nanos,omitempty"` + // FirstByteTime The length of time waiting for the first byte of the read operation to be + // received. + // If the blob was not found, this detail is omitted. + FirstByteTime Duration `json:"first_byte_time,omitempty"` + // FirstByteTimeNanos The length of time waiting for the first byte of the read operation to be + // received, in nanoseconds. + // If the blob was not found, this detail is omitted. + FirstByteTimeNanos int64 `json:"first_byte_time_nanos"` + // Found Indicates whether the blob was found by the read operation. + // If the read was started before the write completed or the write was ended + // before completion, it might be false. + Found bool `json:"found"` + // Node The node that performed the read operation. + Node SnapshotNodeInfo `json:"node"` + // Throttled The length of time spent waiting due to the `max_restore_bytes_per_sec` or + // `indices.recovery.max_bytes_per_sec` throttles during the read of the blob. + // If the blob was not found, this detail is omitted. + Throttled Duration `json:"throttled,omitempty"` + // ThrottledNanos The length of time spent waiting due to the `max_restore_bytes_per_sec` or + // `indices.recovery.max_bytes_per_sec` throttles during the read of the blob, + // in nanoseconds. + // If the blob was not found, this detail is omitted. + ThrottledNanos *int64 `json:"throttled_nanos,omitempty"` +} + +func (s *ReadBlobDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "before_write_complete": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "BeforeWriteComplete", err) + } + s.BeforeWriteComplete = &value + case bool: + s.BeforeWriteComplete = &v + } + + case "elapsed": + if err := dec.Decode(&s.Elapsed); err != nil { + return fmt.Errorf("%s | %w", "Elapsed", err) + } + + case "elapsed_nanos": + if err := dec.Decode(&s.ElapsedNanos); err != nil { + return fmt.Errorf("%s | %w", "ElapsedNanos", err) + } + + case "first_byte_time": + if err := dec.Decode(&s.FirstByteTime); err != nil { + return fmt.Errorf("%s | %w", "FirstByteTime", err) + } + + case "first_byte_time_nanos": + if err := dec.Decode(&s.FirstByteTimeNanos); err != nil { + return fmt.Errorf("%s | %w", "FirstByteTimeNanos", err) + } + + case "found": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Found", err) + } + s.Found = value + case bool: + s.Found = v + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + case "throttled": + if err := dec.Decode(&s.Throttled); err != nil { + return fmt.Errorf("%s | %w", "Throttled", err) + } + + case "throttled_nanos": + if err := dec.Decode(&s.ThrottledNanos); err != nil { + return fmt.Errorf("%s | %w", "ThrottledNanos", err) + } + + } + } + return nil +} + +// NewReadBlobDetails returns a ReadBlobDetails. +func NewReadBlobDetails() *ReadBlobDetails { + r := &ReadBlobDetails{} + + return r +} + +// false diff --git a/typedapi/types/readexception.go b/typedapi/types/readexception.go index c1918ef79c..939dcf0202 100644 --- a/typedapi/types/readexception.go +++ b/typedapi/types/readexception.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,11 +31,14 @@ import ( // ReadException type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ccr/_types/FollowIndexStats.ts#L71-L75 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ccr/_types/FollowIndexStats.ts#L111-L118 type ReadException struct { + // Exception The exception that caused the read to fail. Exception ErrorCause `json:"exception"` - FromSeqNo int64 `json:"from_seq_no"` - Retries int `json:"retries"` + // FromSeqNo The starting sequence number of the batch requested from the leader. + FromSeqNo int64 `json:"from_seq_no"` + // Retries The number of times the batch has been retried. + Retries int `json:"retries"` } func (s *ReadException) UnmarshalJSON(data []byte) error { @@ -90,3 +93,5 @@ func NewReadException() *ReadException { return r } + +// false diff --git a/typedapi/types/readonlyurlrepository.go b/typedapi/types/readonlyurlrepository.go index b5f4f72055..d99a0f9095 100644 --- a/typedapi/types/readonlyurlrepository.go +++ b/typedapi/types/readonlyurlrepository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,11 +30,13 @@ import ( // ReadOnlyUrlRepository type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotRepository.ts#L60-L63 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotRepository.ts#L92-L102 type ReadOnlyUrlRepository struct { + // Settings The repository settings. Settings ReadOnlyUrlRepositorySettings `json:"settings"` - Type string `json:"type,omitempty"` - Uuid *string `json:"uuid,omitempty"` + // Type The read-only URL repository type. + Type string `json:"type,omitempty"` + Uuid *string `json:"uuid,omitempty"` } func (s *ReadOnlyUrlRepository) UnmarshalJSON(data []byte) error { @@ -92,3 +94,13 @@ func NewReadOnlyUrlRepository() *ReadOnlyUrlRepository { return r } + +// true + +type ReadOnlyUrlRepositoryVariant interface { + ReadOnlyUrlRepositoryCaster() *ReadOnlyUrlRepository +} + +func (s *ReadOnlyUrlRepository) ReadOnlyUrlRepositoryCaster() *ReadOnlyUrlRepository { + return s +} diff --git a/typedapi/types/readonlyurlrepositorysettings.go b/typedapi/types/readonlyurlrepositorysettings.go index 1837cdfb8d..76ac23329a 100644 --- a/typedapi/types/readonlyurlrepositorysettings.go +++ b/typedapi/types/readonlyurlrepositorysettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,16 +31,60 @@ import ( // ReadOnlyUrlRepositorySettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotRepository.ts#L110-L115 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotRepository.ts#L377-L412 type ReadOnlyUrlRepositorySettings struct { - ChunkSize ByteSize `json:"chunk_size,omitempty"` - Compress *bool `json:"compress,omitempty"` - HttpMaxRetries *int `json:"http_max_retries,omitempty"` - HttpSocketTimeout Duration `json:"http_socket_timeout,omitempty"` - MaxNumberOfSnapshots *int `json:"max_number_of_snapshots,omitempty"` - MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // ChunkSize Big files can be broken down into multiple smaller blobs in the blob store + // during snapshotting. + // It is not recommended to change this value from its default unless there is + // an explicit reason for limiting the size of blobs in the repository. + // Setting a value lower than the default can result in an increased number of + // API calls to the blob store during snapshot create and restore operations + // compared to using the default value and thus make both operations slower and + // more costly. + // Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. + // The default varies by repository type. + ChunkSize ByteSize `json:"chunk_size,omitempty"` + // Compress When set to `true`, metadata files are stored in compressed format. + // This setting doesn't affect index files that are already compressed by + // default. + Compress *bool `json:"compress,omitempty"` + // HttpMaxRetries The maximum number of retries for HTTP and HTTPS URLs. + HttpMaxRetries *int `json:"http_max_retries,omitempty"` + // HttpSocketTimeout The maximum wait time for data transfers over a connection. + HttpSocketTimeout Duration `json:"http_socket_timeout,omitempty"` + // MaxNumberOfSnapshots The maximum number of snapshots the repository can contain. + // The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. + MaxNumberOfSnapshots *int `json:"max_number_of_snapshots,omitempty"` + // MaxRestoreBytesPerSec The maximum snapshot restore rate per node. + // It defaults to unlimited. + // Note that restores are also throttled through recovery settings. + MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // MaxSnapshotBytesPerSec The maximum snapshot creation rate per node. + // It defaults to 40mb per second. + // Note that if the recovery settings for managed services are set, then it + // defaults to unlimited, and the rate is additionally throttled through + // recovery settings. MaxSnapshotBytesPerSec ByteSize `json:"max_snapshot_bytes_per_sec,omitempty"` - Url string `json:"url"` + // Url The URL location of the root of the shared filesystem repository. + // The following protocols are supported: + // + // * `file` + // * `ftp` + // * `http` + // * `https` + // * `jar` + // + // URLs using the HTTP, HTTPS, or FTP protocols must be explicitly allowed with + // the `repositories.url.allowed_urls` cluster setting. + // This setting supports wildcards in the place of a host, path, query, or + // fragment in the URL. + // + // URLs using the file protocol must point to the location of a shared + // filesystem accessible to all master and data nodes in the cluster. + // This location must be registered in the `path.repo` setting. + // You don't need to register URLs using the FTP, HTTP, HTTPS, or JAR protocols + // in the `path.repo` setting. + Url string `json:"url"` } func (s *ReadOnlyUrlRepositorySettings) UnmarshalJSON(data []byte) error { @@ -147,3 +191,13 @@ func NewReadOnlyUrlRepositorySettings() *ReadOnlyUrlRepositorySettings { return r } + +// true + +type ReadOnlyUrlRepositorySettingsVariant interface { + ReadOnlyUrlRepositorySettingsCaster() *ReadOnlyUrlRepositorySettings +} + +func (s *ReadOnlyUrlRepositorySettings) ReadOnlyUrlRepositorySettingsCaster() *ReadOnlyUrlRepositorySettings { + return s +} diff --git a/typedapi/types/readsummaryinfo.go b/typedapi/types/readsummaryinfo.go new file mode 100644 index 0000000000..a097e86339 --- /dev/null +++ b/typedapi/types/readsummaryinfo.go @@ -0,0 +1,169 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ReadSummaryInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L115-L160 +type ReadSummaryInfo struct { + // Count The number of read operations performed in the test. + Count int `json:"count"` + // MaxWait The maximum time spent waiting for the first byte of any read request to be + // received. + MaxWait Duration `json:"max_wait"` + // MaxWaitNanos The maximum time spent waiting for the first byte of any read request to be + // received, in nanoseconds. + MaxWaitNanos int64 `json:"max_wait_nanos"` + // TotalElapsed The total elapsed time spent on reading blobs in the test. + TotalElapsed Duration `json:"total_elapsed"` + // TotalElapsedNanos The total elapsed time spent on reading blobs in the test, in nanoseconds. + TotalElapsedNanos int64 `json:"total_elapsed_nanos"` + // TotalSize The total size of all the blobs or partial blobs read in the test. + TotalSize ByteSize `json:"total_size"` + // TotalSizeBytes The total size of all the blobs or partial blobs read in the test, in bytes. + TotalSizeBytes int64 `json:"total_size_bytes"` + // TotalThrottled The total time spent waiting due to the `max_restore_bytes_per_sec` or + // `indices.recovery.max_bytes_per_sec` throttles. + TotalThrottled Duration `json:"total_throttled"` + // TotalThrottledNanos The total time spent waiting due to the `max_restore_bytes_per_sec` or + // `indices.recovery.max_bytes_per_sec` throttles, in nanoseconds. + TotalThrottledNanos int64 `json:"total_throttled_nanos"` + // TotalWait The total time spent waiting for the first byte of each read request to be + // received. + TotalWait Duration `json:"total_wait"` + // TotalWaitNanos The total time spent waiting for the first byte of each read request to be + // received, in nanoseconds. + TotalWaitNanos int64 `json:"total_wait_nanos"` +} + +func (s *ReadSummaryInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "max_wait": + if err := dec.Decode(&s.MaxWait); err != nil { + return fmt.Errorf("%s | %w", "MaxWait", err) + } + + case "max_wait_nanos": + if err := dec.Decode(&s.MaxWaitNanos); err != nil { + return fmt.Errorf("%s | %w", "MaxWaitNanos", err) + } + + case "total_elapsed": + if err := dec.Decode(&s.TotalElapsed); err != nil { + return fmt.Errorf("%s | %w", "TotalElapsed", err) + } + + case "total_elapsed_nanos": + if err := dec.Decode(&s.TotalElapsedNanos); err != nil { + return fmt.Errorf("%s | %w", "TotalElapsedNanos", err) + } + + case "total_size": + if err := dec.Decode(&s.TotalSize); err != nil { + return fmt.Errorf("%s | %w", "TotalSize", err) + } + + case "total_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSizeBytes", err) + } + s.TotalSizeBytes = value + case float64: + f := int64(v) + s.TotalSizeBytes = f + } + + case "total_throttled": + if err := dec.Decode(&s.TotalThrottled); err != nil { + return fmt.Errorf("%s | %w", "TotalThrottled", err) + } + + case "total_throttled_nanos": + if err := dec.Decode(&s.TotalThrottledNanos); err != nil { + return fmt.Errorf("%s | %w", "TotalThrottledNanos", err) + } + + case "total_wait": + if err := dec.Decode(&s.TotalWait); err != nil { + return fmt.Errorf("%s | %w", "TotalWait", err) + } + + case "total_wait_nanos": + if err := dec.Decode(&s.TotalWaitNanos); err != nil { + return fmt.Errorf("%s | %w", "TotalWaitNanos", err) + } + + } + } + return nil +} + +// NewReadSummaryInfo returns a ReadSummaryInfo. +func NewReadSummaryInfo() *ReadSummaryInfo { + r := &ReadSummaryInfo{} + + return r +} + +// false diff --git a/typedapi/types/realmcache.go b/typedapi/types/realmcache.go index b322112f7a..277022fa29 100644 --- a/typedapi/types/realmcache.go +++ b/typedapi/types/realmcache.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RealmCache type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L264-L266 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L276-L278 type RealmCache struct { Size int64 `json:"size"` } @@ -77,3 +77,5 @@ func NewRealmCache() *RealmCache { return r } + +// false diff --git a/typedapi/types/realminfo.go b/typedapi/types/realminfo.go index 0f8231ad77..721fd3a069 100644 --- a/typedapi/types/realminfo.go +++ b/typedapi/types/realminfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RealmInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/RealmInfo.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/RealmInfo.ts#L22-L25 type RealmInfo struct { Name string `json:"name"` Type string `json:"type"` @@ -80,3 +80,5 @@ func NewRealmInfo() *RealmInfo { return r } + +// false diff --git a/typedapi/types/recording.go b/typedapi/types/recording.go index 24fc596cd2..c4ab396f4a 100644 --- a/typedapi/types/recording.go +++ b/typedapi/types/recording.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Recording type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L225-L230 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L225-L230 type Recording struct { CumulativeExecutionCount *int64 `json:"cumulative_execution_count,omitempty"` CumulativeExecutionTime Duration `json:"cumulative_execution_time,omitempty"` @@ -102,3 +102,5 @@ func NewRecording() *Recording { return r } + +// false diff --git a/typedapi/types/recoverybytes.go b/typedapi/types/recoverybytes.go index 3bfb4cee96..e3e530c431 100644 --- a/typedapi/types/recoverybytes.go +++ b/typedapi/types/recoverybytes.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // RecoveryBytes type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/recovery/types.ts#L38-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/recovery/types.ts#L38-L48 type RecoveryBytes struct { Percent Percentage `json:"percent"` Recovered ByteSize `json:"recovered,omitempty"` @@ -114,3 +114,5 @@ func NewRecoveryBytes() *RecoveryBytes { return r } + +// false diff --git a/typedapi/types/recoveryfiles.go b/typedapi/types/recoveryfiles.go index 5d376ac428..4c6ab83eda 100644 --- a/typedapi/types/recoveryfiles.go +++ b/typedapi/types/recoveryfiles.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RecoveryFiles type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/recovery/types.ts#L56-L62 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/recovery/types.ts#L56-L62 type RecoveryFiles struct { Details []FileDetails `json:"details,omitempty"` Percent Percentage `json:"percent"` @@ -121,3 +121,5 @@ func NewRecoveryFiles() *RecoveryFiles { return r } + +// false diff --git a/typedapi/types/recoveryindexstatus.go b/typedapi/types/recoveryindexstatus.go index 6897abf3ee..e47eb72789 100644 --- a/typedapi/types/recoveryindexstatus.go +++ b/typedapi/types/recoveryindexstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // RecoveryIndexStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/recovery/types.ts#L64-L74 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/recovery/types.ts#L64-L74 type RecoveryIndexStatus struct { Bytes *RecoveryBytes `json:"bytes,omitempty"` Files RecoveryFiles `json:"files"` @@ -114,3 +114,5 @@ func NewRecoveryIndexStatus() *RecoveryIndexStatus { return r } + +// false diff --git a/typedapi/types/recoveryorigin.go b/typedapi/types/recoveryorigin.go index 0699b30b22..4aa03b583f 100644 --- a/typedapi/types/recoveryorigin.go +++ b/typedapi/types/recoveryorigin.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RecoveryOrigin type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/recovery/types.ts#L76-L89 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/recovery/types.ts#L76-L89 type RecoveryOrigin struct { BootstrapNewHistoryUuid *bool `json:"bootstrap_new_history_uuid,omitempty"` Host *string `json:"host,omitempty"` @@ -149,3 +149,5 @@ func NewRecoveryOrigin() *RecoveryOrigin { return r } + +// false diff --git a/typedapi/types/recoveryrecord.go b/typedapi/types/recoveryrecord.go index cdaad57bc5..d744f87bcc 100644 --- a/typedapi/types/recoveryrecord.go +++ b/typedapi/types/recoveryrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RecoveryRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/recovery/types.ts#L24-L155 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/recovery/types.ts#L24-L155 type RecoveryRecord struct { // Bytes The number of bytes to recover. Bytes *string `json:"bytes,omitempty"` @@ -362,3 +362,5 @@ func NewRecoveryRecord() *RecoveryRecord { return r } + +// false diff --git a/typedapi/types/recoverystartstatus.go b/typedapi/types/recoverystartstatus.go index eae05f5207..8117fa43e9 100644 --- a/typedapi/types/recoverystartstatus.go +++ b/typedapi/types/recoverystartstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // RecoveryStartStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/recovery/types.ts#L91-L96 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/recovery/types.ts#L91-L96 type RecoveryStartStatus struct { CheckIndexTime Duration `json:"check_index_time,omitempty"` CheckIndexTimeInMillis int64 `json:"check_index_time_in_millis"` @@ -84,3 +84,5 @@ func NewRecoveryStartStatus() *RecoveryStartStatus { return r } + +// false diff --git a/typedapi/types/recoverystats.go b/typedapi/types/recoverystats.go index 3c954feb7e..35d9048c07 100644 --- a/typedapi/types/recoverystats.go +++ b/typedapi/types/recoverystats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RecoveryStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L228-L233 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L231-L236 type RecoveryStats struct { CurrentAsSource int64 `json:"current_as_source"` CurrentAsTarget int64 `json:"current_as_target"` @@ -105,3 +105,5 @@ func NewRecoveryStats() *RecoveryStats { return r } + +// false diff --git a/typedapi/types/recoverystatus.go b/typedapi/types/recoverystatus.go index d56145279c..3bd66440d2 100644 --- a/typedapi/types/recoverystatus.go +++ b/typedapi/types/recoverystatus.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // RecoveryStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/recovery/types.ts#L98-L100 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/recovery/types.ts#L98-L100 type RecoveryStatus struct { Shards []ShardRecovery `json:"shards"` } @@ -33,3 +33,5 @@ func NewRecoveryStatus() *RecoveryStatus { return r } + +// false diff --git a/typedapi/types/redact.go b/typedapi/types/redact.go index 07a4ad278f..7d69606eb6 100644 --- a/typedapi/types/redact.go +++ b/typedapi/types/redact.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Redact type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/simulate/types.ts#L39-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Simulation.ts#L39-L44 type Redact struct { // IsRedacted_ indicates if document has been redacted IsRedacted_ bool `json:"_is_redacted"` @@ -77,3 +77,5 @@ func NewRedact() *Redact { return r } + +// false diff --git a/typedapi/types/redactprocessor.go b/typedapi/types/redactprocessor.go index 6682c70448..f629fb1c88 100644 --- a/typedapi/types/redactprocessor.go +++ b/typedapi/types/redactprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RedactProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1239-L1280 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1280-L1321 type RedactProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type RedactProcessor struct { // Field The field to be redacted Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly @@ -98,16 +98,9 @@ func (s *RedactProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -227,8 +220,18 @@ func (s *RedactProcessor) UnmarshalJSON(data []byte) error { // NewRedactProcessor returns a RedactProcessor. func NewRedactProcessor() *RedactProcessor { r := &RedactProcessor{ - PatternDefinitions: make(map[string]string, 0), + PatternDefinitions: make(map[string]string), } return r } + +// true + +type RedactProcessorVariant interface { + RedactProcessorCaster() *RedactProcessor +} + +func (s *RedactProcessor) RedactProcessorCaster() *RedactProcessor { + return s +} diff --git a/typedapi/types/refreshstats.go b/typedapi/types/refreshstats.go index 154a483ae6..4b8c458992 100644 --- a/typedapi/types/refreshstats.go +++ b/typedapi/types/refreshstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RefreshStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L235-L242 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L238-L245 type RefreshStats struct { ExternalTotal int64 `json:"external_total"` ExternalTotalTimeInMillis int64 `json:"external_total_time_in_millis"` @@ -127,3 +127,5 @@ func NewRefreshStats() *RefreshStats { return r } + +// false diff --git a/typedapi/types/regexoptions.go b/typedapi/types/regexoptions.go index 97d9670eda..162528c3ed 100644 --- a/typedapi/types/regexoptions.go +++ b/typedapi/types/regexoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RegexOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L183-L194 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L183-L194 type RegexOptions struct { // Flags Optional operators for the regular expression. Flags string `json:"flags,omitempty"` @@ -93,3 +93,13 @@ func NewRegexOptions() *RegexOptions { return r } + +// true + +type RegexOptionsVariant interface { + RegexOptionsCaster() *RegexOptions +} + +func (s *RegexOptions) RegexOptionsCaster() *RegexOptions { + return s +} diff --git a/typedapi/types/regexpquery.go b/typedapi/types/regexpquery.go index 281dc40e71..e38b69a5ea 100644 --- a/typedapi/types/regexpquery.go +++ b/typedapi/types/regexpquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RegexpQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L203-L236 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L203-L236 type RegexpQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -180,3 +180,13 @@ func NewRegexpQuery() *RegexpQuery { return r } + +// true + +type RegexpQueryVariant interface { + RegexpQueryCaster() *RegexpQuery +} + +func (s *RegexpQuery) RegexpQueryCaster() *RegexpQuery { + return s +} diff --git a/typedapi/types/regexvalidation.go b/typedapi/types/regexvalidation.go index 8cdeaf484f..f4a3b39c24 100644 --- a/typedapi/types/regexvalidation.go +++ b/typedapi/types/regexvalidation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RegexValidation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L78-L81 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L78-L81 type RegexValidation struct { Constraint string `json:"constraint"` Type string `json:"type,omitempty"` @@ -93,3 +93,13 @@ func NewRegexValidation() *RegexValidation { return r } + +// true + +type RegexValidationVariant interface { + RegexValidationCaster() *RegexValidation +} + +func (s *RegexValidation) RegexValidationCaster() *RegexValidation { + return s +} diff --git a/typedapi/types/registereddomainprocessor.go b/typedapi/types/registereddomainprocessor.go index 92606f77f4..0a899154a2 100644 --- a/typedapi/types/registereddomainprocessor.go +++ b/typedapi/types/registereddomainprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RegisteredDomainProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1282-L1298 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1323-L1339 type RegisteredDomainProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type RegisteredDomainProcessor struct { // Field Field containing the source FQDN. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If true and any required fields are missing, the processor quietly exits @@ -88,16 +88,9 @@ func (s *RegisteredDomainProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -160,3 +153,13 @@ func NewRegisteredDomainProcessor() *RegisteredDomainProcessor { return r } + +// true + +type RegisteredDomainProcessorVariant interface { + RegisteredDomainProcessorCaster() *RegisteredDomainProcessor +} + +func (s *RegisteredDomainProcessor) RegisteredDomainProcessorCaster() *RegisteredDomainProcessor { + return s +} diff --git a/typedapi/types/regressioninferenceoptions.go b/typedapi/types/regressioninferenceoptions.go index 8333467336..3ed0d8af73 100644 --- a/typedapi/types/regressioninferenceoptions.go +++ b/typedapi/types/regressioninferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RegressionInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L82-L91 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L82-L91 type RegressionInferenceOptions struct { // NumTopFeatureImportanceValues Specifies the maximum number of feature importance values per document. NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` @@ -87,3 +87,13 @@ func NewRegressionInferenceOptions() *RegressionInferenceOptions { return r } + +// true + +type RegressionInferenceOptionsVariant interface { + RegressionInferenceOptionsCaster() *RegressionInferenceOptions +} + +func (s *RegressionInferenceOptions) RegressionInferenceOptionsCaster() *RegressionInferenceOptions { + return s +} diff --git a/typedapi/types/reindexdestination.go b/typedapi/types/reindexdestination.go index 4fe31bd4be..dfda9c44be 100644 --- a/typedapi/types/reindexdestination.go +++ b/typedapi/types/reindexdestination.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,20 +34,26 @@ import ( // ReindexDestination type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/reindex/types.ts#L39-L64 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/reindex/types.ts#L39-L67 type ReindexDestination struct { // Index The name of the data stream, index, or index alias you are copying to. Index string `json:"index"` - // OpType Set to `create` to only index documents that do not already exist. - // Important: To reindex to a data stream destination, this argument must be + // OpType If it is `create`, the operation will only index documents that do not + // already exist (also known as "put if absent"). + // + // IMPORTANT: To reindex to a data stream destination, this argument must be // `create`. OpType *optype.OpType `json:"op_type,omitempty"` // Pipeline The name of the pipeline to use. Pipeline *string `json:"pipeline,omitempty"` - // Routing By default, a document's routing is preserved unless it’s changed by the + // Routing By default, a document's routing is preserved unless it's changed by the // script. - // Set to `discard` to set routing to `null`, or `=value` to route using the - // specified `value`. + // If it is `keep`, the routing on the bulk request sent for each match is set + // to the routing on the match. + // If it is `discard`, the routing on the bulk request sent for each match is + // set to `null`. + // If it is `=value`, the routing on the bulk request sent for each match is set + // to all value specified after the equals sign (`=`). Routing *string `json:"routing,omitempty"` // VersionType The versioning to use for the indexing operation. VersionType *versiontype.VersionType `json:"version_type,omitempty"` @@ -111,3 +117,13 @@ func NewReindexDestination() *ReindexDestination { return r } + +// true + +type ReindexDestinationVariant interface { + ReindexDestinationCaster() *ReindexDestination +} + +func (s *ReindexDestination) ReindexDestinationCaster() *ReindexDestination { + return s +} diff --git a/typedapi/types/reindexnode.go b/typedapi/types/reindexnode.go index d31da298f2..3aa4e21610 100644 --- a/typedapi/types/reindexnode.go +++ b/typedapi/types/reindexnode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // ReindexNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/reindex_rethrottle/types.ts#L33-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/reindex_rethrottle/types.ts#L33-L35 type ReindexNode struct { Attributes map[string]string `json:"attributes"` Host string `json:"host"` @@ -107,9 +107,11 @@ func (s *ReindexNode) UnmarshalJSON(data []byte) error { // NewReindexNode returns a ReindexNode. func NewReindexNode() *ReindexNode { r := &ReindexNode{ - Attributes: make(map[string]string, 0), - Tasks: make(map[string]ReindexTask, 0), + Attributes: make(map[string]string), + Tasks: make(map[string]ReindexTask), } return r } + +// false diff --git a/typedapi/types/reindexsource.go b/typedapi/types/reindexsource.go index 86189b6e01..1cb777631c 100644 --- a/typedapi/types/reindexsource.go +++ b/typedapi/types/reindexsource.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,26 +31,37 @@ import ( // ReindexSource type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/reindex/types.ts#L66-L97 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/reindex/types.ts#L69-L110 type ReindexSource struct { // Index The name of the data stream, index, or alias you are copying from. - // Accepts a comma-separated list to reindex from multiple sources. + // It accepts a comma-separated list to reindex from multiple sources. Index []string `json:"index"` - // Query Specifies the documents to reindex using the Query DSL. + // Query The documents to reindex, which is defined with Query DSL. Query *Query `json:"query,omitempty"` // Remote A remote instance of Elasticsearch that you want to index from. Remote *RemoteSource `json:"remote,omitempty"` RuntimeMappings RuntimeFields `json:"runtime_mappings,omitempty"` // Size The number of documents to index per batch. - // Use when indexing from remote to ensure that the batches fit within the - // on-heap buffer, which defaults to a maximum size of 100 MB. + // Use it when you are indexing from remote to ensure that the batches fit + // within the on-heap buffer, which defaults to a maximum size of 100 MB. Size *int `json:"size,omitempty"` // Slice Slice the reindex request manually using the provided slice ID and total // number of slices. - Slice *SlicedScroll `json:"slice,omitempty"` - Sort []SortCombinations `json:"sort,omitempty"` - // SourceFields_ If `true` reindexes all source fields. - // Set to a list to reindex select fields. + Slice *SlicedScroll `json:"slice,omitempty"` + // Sort A comma-separated list of `:` pairs to sort by before + // indexing. + // Use it in conjunction with `max_docs` to control what documents are + // reindexed. + // + // WARNING: Sort in reindex is deprecated. + // Sorting in reindex was never guaranteed to index documents in order and + // prevents further development of reindex such as resilience and performance + // improvements. + // If used in combination with `max_docs`, consider using a query filter + // instead. + Sort []SortCombinations `json:"sort,omitempty"` + // SourceFields_ If `true`, reindex all source fields. + // Set it to a list to reindex select fields. SourceFields_ []string `json:"_source,omitempty"` } @@ -164,3 +175,13 @@ func NewReindexSource() *ReindexSource { return r } + +// true + +type ReindexSourceVariant interface { + ReindexSourceCaster() *ReindexSource +} + +func (s *ReindexSource) ReindexSourceCaster() *ReindexSource { + return s +} diff --git a/typedapi/types/reindexstatus.go b/typedapi/types/reindexstatus.go index 6f6c6e2032..1a54dc8742 100644 --- a/typedapi/types/reindexstatus.go +++ b/typedapi/types/reindexstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ReindexStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/reindex_rethrottle/types.ts#L37-L85 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/reindex_rethrottle/types.ts#L37-L85 type ReindexStatus struct { // Batches The number of scroll responses pulled back by the reindex. Batches int64 `json:"batches"` @@ -237,3 +237,5 @@ func NewReindexStatus() *ReindexStatus { return r } + +// false diff --git a/typedapi/types/reindextask.go b/typedapi/types/reindextask.go index 9012265b5d..89f48849bf 100644 --- a/typedapi/types/reindextask.go +++ b/typedapi/types/reindextask.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ReindexTask type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/reindex_rethrottle/types.ts#L87-L98 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/reindex_rethrottle/types.ts#L87-L98 type ReindexTask struct { Action string `json:"action"` Cancellable bool `json:"cancellable"` @@ -161,3 +161,5 @@ func NewReindexTask() *ReindexTask { return r } + +// false diff --git a/typedapi/types/reloaddetails.go b/typedapi/types/reloaddetails.go index 4fc6a43351..194ba31583 100644 --- a/typedapi/types/reloaddetails.go +++ b/typedapi/types/reloaddetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ReloadDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/reload_search_analyzers/types.ts#L27-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/reload_search_analyzers/types.ts#L27-L31 type ReloadDetails struct { Index string `json:"index"` ReloadedAnalyzers []string `json:"reloaded_analyzers"` @@ -86,3 +86,5 @@ func NewReloadDetails() *ReloadDetails { return r } + +// false diff --git a/typedapi/types/reloadresult.go b/typedapi/types/reloadresult.go index a532731d3b..6ff2e340dc 100644 --- a/typedapi/types/reloadresult.go +++ b/typedapi/types/reloadresult.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ReloadResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/reload_search_analyzers/types.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/reload_search_analyzers/types.ts#L22-L25 type ReloadResult struct { ReloadDetails []ReloadDetails `json:"reload_details"` Shards_ ShardStatistics `json:"_shards"` @@ -34,3 +34,5 @@ func NewReloadResult() *ReloadResult { return r } + +// false diff --git a/typedapi/types/relocationfailureinfo.go b/typedapi/types/relocationfailureinfo.go index 0054a58144..9dd66bdd68 100644 --- a/typedapi/types/relocationfailureinfo.go +++ b/typedapi/types/relocationfailureinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RelocationFailureInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Node.ts#L67-L69 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Node.ts#L67-L69 type RelocationFailureInfo struct { FailedAttempts int `json:"failed_attempts"` } @@ -78,3 +78,5 @@ func NewRelocationFailureInfo() *RelocationFailureInfo { return r } + +// false diff --git a/typedapi/types/remoteclusterprivileges.go b/typedapi/types/remoteclusterprivileges.go new file mode 100644 index 0000000000..179d8903d4 --- /dev/null +++ b/typedapi/types/remoteclusterprivileges.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/remoteclusterprivilege" +) + +// RemoteClusterPrivileges type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L278-L290 +type RemoteClusterPrivileges struct { + // Clusters A list of cluster aliases to which the permissions in this entry apply. + Clusters []string `json:"clusters"` + // Privileges The cluster level privileges that owners of the role have on the remote + // cluster. + Privileges []remoteclusterprivilege.RemoteClusterPrivilege `json:"privileges"` +} + +func (s *RemoteClusterPrivileges) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "clusters": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Clusters", err) + } + + s.Clusters = append(s.Clusters, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Clusters); err != nil { + return fmt.Errorf("%s | %w", "Clusters", err) + } + } + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return fmt.Errorf("%s | %w", "Privileges", err) + } + + } + } + return nil +} + +// NewRemoteClusterPrivileges returns a RemoteClusterPrivileges. +func NewRemoteClusterPrivileges() *RemoteClusterPrivileges { + r := &RemoteClusterPrivileges{} + + return r +} + +// true + +type RemoteClusterPrivilegesVariant interface { + RemoteClusterPrivilegesCaster() *RemoteClusterPrivileges +} + +func (s *RemoteClusterPrivileges) RemoteClusterPrivilegesCaster() *RemoteClusterPrivileges { + return s +} diff --git a/typedapi/types/remoteindicesprivileges.go b/typedapi/types/remoteindicesprivileges.go index 75b91422c3..13a6f9f94c 100644 --- a/typedapi/types/remoteindicesprivileges.go +++ b/typedapi/types/remoteindicesprivileges.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // RemoteIndicesPrivileges type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Privileges.ts#L225-L253 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L244-L276 type RemoteIndicesPrivileges struct { // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that // cover restricted indices. Implicitly, restricted indices have limited @@ -109,8 +109,19 @@ func (s *RemoteIndicesPrivileges) UnmarshalJSON(data []byte) error { } case "names": - if err := dec.Decode(&s.Names); err != nil { - return fmt.Errorf("%s | %w", "Names", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } } case "privileges": @@ -136,7 +147,7 @@ func (s *RemoteIndicesPrivileges) UnmarshalJSON(data []byte) error { switch t { - case "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": o := NewQuery() localDec := json.NewDecoder(bytes.NewReader(message)) if err := localDec.Decode(&o); err != nil { @@ -174,3 +185,13 @@ func NewRemoteIndicesPrivileges() *RemoteIndicesPrivileges { return r } + +// true + +type RemoteIndicesPrivilegesVariant interface { + RemoteIndicesPrivilegesCaster() *RemoteIndicesPrivileges +} + +func (s *RemoteIndicesPrivileges) RemoteIndicesPrivilegesCaster() *RemoteIndicesPrivileges { + return s +} diff --git a/typedapi/types/remotesource.go b/typedapi/types/remotesource.go index ac234d1af3..f8083c720e 100644 --- a/typedapi/types/remotesource.go +++ b/typedapi/types/remotesource.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,18 +30,18 @@ import ( // RemoteSource type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/reindex/types.ts#L99-L125 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/reindex/types.ts#L112-L140 type RemoteSource struct { // ConnectTimeout The remote connection timeout. - // Defaults to 30 seconds. ConnectTimeout Duration `json:"connect_timeout,omitempty"` // Headers An object containing the headers of the request. Headers map[string]string `json:"headers,omitempty"` // Host The URL for the remote instance of Elasticsearch that you want to index from. + // This information is required when you're indexing from remote. Host string `json:"host"` // Password The password to use for authentication with the remote host. Password *string `json:"password,omitempty"` - // SocketTimeout The remote socket read timeout. Defaults to 30 seconds. + // SocketTimeout The remote socket read timeout. SocketTimeout Duration `json:"socket_timeout,omitempty"` // Username The username to use for authentication with the remote host. Username *string `json:"username,omitempty"` @@ -103,8 +103,18 @@ func (s *RemoteSource) UnmarshalJSON(data []byte) error { // NewRemoteSource returns a RemoteSource. func NewRemoteSource() *RemoteSource { r := &RemoteSource{ - Headers: make(map[string]string, 0), + Headers: make(map[string]string), } return r } + +// true + +type RemoteSourceVariant interface { + RemoteSourceCaster() *RemoteSource +} + +func (s *RemoteSource) RemoteSourceCaster() *RemoteSource { + return s +} diff --git a/typedapi/types/remoteuserindicesprivileges.go b/typedapi/types/remoteuserindicesprivileges.go new file mode 100644 index 0000000000..b28ff406d0 --- /dev/null +++ b/typedapi/types/remoteuserindicesprivileges.go @@ -0,0 +1,180 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexprivilege" +) + +// RemoteUserIndicesPrivileges type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L316-L339 +type RemoteUserIndicesPrivileges struct { + // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that + // cover restricted indices. Implicitly, restricted indices have limited + // privileges that can cause pattern tests to fail. If restricted indices are + // explicitly included in the `names` list, Elasticsearch checks privileges + // against these indices regardless of the value set for + // `allow_restricted_indices`. + AllowRestrictedIndices bool `json:"allow_restricted_indices"` + Clusters []string `json:"clusters"` + // FieldSecurity The document fields that the owners of the role have read access to. + FieldSecurity []FieldSecurity `json:"field_security,omitempty"` + // Names A list of indices (or index name patterns) to which the permissions in this + // entry apply. + Names []string `json:"names"` + // Privileges The index level privileges that owners of the role have on the specified + // indices. + Privileges []indexprivilege.IndexPrivilege `json:"privileges"` + // Query Search queries that define the documents the user has access to. A document + // within the specified indices must match these queries for it to be accessible + // by the owners of the role. + Query []IndicesPrivilegesQuery `json:"query,omitempty"` +} + +func (s *RemoteUserIndicesPrivileges) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_restricted_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowRestrictedIndices", err) + } + s.AllowRestrictedIndices = value + case bool: + s.AllowRestrictedIndices = v + } + + case "clusters": + if err := dec.Decode(&s.Clusters); err != nil { + return fmt.Errorf("%s | %w", "Clusters", err) + } + + case "field_security": + if err := dec.Decode(&s.FieldSecurity); err != nil { + return fmt.Errorf("%s | %w", "FieldSecurity", err) + } + + case "names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + } + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return fmt.Errorf("%s | %w", "Privileges", err) + } + + case "query": + messageArray := []json.RawMessage{} + if err := dec.Decode(&messageArray); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + query_field: + for _, message := range messageArray { + keyDec := json.NewDecoder(bytes.NewReader(message)) + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Query", err) + } + + switch t { + + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + o := NewQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = append(s.Query, o) + continue query_field + + case "template": + o := NewRoleTemplateQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = append(s.Query, o) + continue query_field + + } + } + + var o any + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = append(s.Query, o) + } + + } + } + return nil +} + +// NewRemoteUserIndicesPrivileges returns a RemoteUserIndicesPrivileges. +func NewRemoteUserIndicesPrivileges() *RemoteUserIndicesPrivileges { + r := &RemoteUserIndicesPrivileges{} + + return r +} + +// false diff --git a/typedapi/types/removeaction.go b/typedapi/types/removeaction.go index 3e5a75d94d..1654f02877 100644 --- a/typedapi/types/removeaction.go +++ b/typedapi/types/removeaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RemoveAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/update_aliases/types.ts#L97-L122 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/update_aliases/types.ts#L97-L122 type RemoveAction struct { // Alias Alias for the action. // Index alias names support date math. @@ -131,3 +131,13 @@ func NewRemoveAction() *RemoveAction { return r } + +// true + +type RemoveActionVariant interface { + RemoveActionCaster() *RemoveAction +} + +func (s *RemoveAction) RemoveActionCaster() *RemoveAction { + return s +} diff --git a/typedapi/types/removeduplicatestokenfilter.go b/typedapi/types/removeduplicatestokenfilter.go index 682f5c2bce..a4e9fb8a66 100644 --- a/typedapi/types/removeduplicatestokenfilter.go +++ b/typedapi/types/removeduplicatestokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // RemoveDuplicatesTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L302-L304 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L302-L304 type RemoveDuplicatesTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewRemoveDuplicatesTokenFilter() *RemoveDuplicatesTokenFilter { return r } + +// true + +type RemoveDuplicatesTokenFilterVariant interface { + RemoveDuplicatesTokenFilterCaster() *RemoveDuplicatesTokenFilter +} + +func (s *RemoveDuplicatesTokenFilter) RemoveDuplicatesTokenFilterCaster() *RemoveDuplicatesTokenFilter { + return s +} diff --git a/typedapi/types/removeindexaction.go b/typedapi/types/removeindexaction.go index bdae1e63cf..22cb2d6279 100644 --- a/typedapi/types/removeindexaction.go +++ b/typedapi/types/removeindexaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RemoveIndexAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/update_aliases/types.ts#L124-L139 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/update_aliases/types.ts#L124-L139 type RemoveIndexAction struct { // Index Data stream or index for the action. // Supports wildcards (`*`). @@ -104,3 +104,13 @@ func NewRemoveIndexAction() *RemoveIndexAction { return r } + +// true + +type RemoveIndexActionVariant interface { + RemoveIndexActionCaster() *RemoveIndexAction +} + +func (s *RemoveIndexAction) RemoveIndexActionCaster() *RemoveIndexAction { + return s +} diff --git a/typedapi/types/removeprocessor.go b/typedapi/types/removeprocessor.go index a64db5d402..ae2ac83150 100644 --- a/typedapi/types/removeprocessor.go +++ b/typedapi/types/removeprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RemoveProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1300-L1314 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1341-L1355 type RemoveProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type RemoveProcessor struct { // Field Fields to be removed. Supports template snippets. Field []string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly @@ -99,16 +99,9 @@ func (s *RemoveProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -182,3 +175,13 @@ func NewRemoveProcessor() *RemoveProcessor { return r } + +// true + +type RemoveProcessorVariant interface { + RemoveProcessorCaster() *RemoveProcessor +} + +func (s *RemoveProcessor) RemoveProcessorCaster() *RemoveProcessor { + return s +} diff --git a/typedapi/types/renameprocessor.go b/typedapi/types/renameprocessor.go index ab0cf4f1ea..5601129509 100644 --- a/typedapi/types/renameprocessor.go +++ b/typedapi/types/renameprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RenameProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1316-L1332 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1357-L1373 type RenameProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -40,7 +40,7 @@ type RenameProcessor struct { // Supports template snippets. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without @@ -89,16 +89,9 @@ func (s *RenameProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -161,3 +154,13 @@ func NewRenameProcessor() *RenameProcessor { return r } + +// true + +type RenameProcessorVariant interface { + RenameProcessorCaster() *RenameProcessor +} + +func (s *RenameProcessor) RenameProcessorCaster() *RenameProcessor { + return s +} diff --git a/typedapi/types/replicationaccess.go b/typedapi/types/replicationaccess.go index 21ad998232..0f270e6897 100644 --- a/typedapi/types/replicationaccess.go +++ b/typedapi/types/replicationaccess.go @@ -16,22 +16,94 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + // ReplicationAccess type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Privileges.ts#L380-L385 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L442-L452 type ReplicationAccess struct { + // AllowRestrictedIndices This needs to be set to true if the patterns in the names field should cover + // system indices. + AllowRestrictedIndices *bool `json:"allow_restricted_indices,omitempty"` // Names A list of indices (or index name patterns) to which the permissions in this // entry apply. Names []string `json:"names"` } +func (s *ReplicationAccess) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_restricted_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowRestrictedIndices", err) + } + s.AllowRestrictedIndices = &value + case bool: + s.AllowRestrictedIndices = &v + } + + case "names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + } + + } + } + return nil +} + // NewReplicationAccess returns a ReplicationAccess. func NewReplicationAccess() *ReplicationAccess { r := &ReplicationAccess{} return r } + +// true + +type ReplicationAccessVariant interface { + ReplicationAccessCaster() *ReplicationAccess +} + +func (s *ReplicationAccess) ReplicationAccessCaster() *ReplicationAccess { + return s +} diff --git a/typedapi/types/reportingemailattachment.go b/typedapi/types/reportingemailattachment.go index ddf0061b4c..2a89b5efb2 100644 --- a/typedapi/types/reportingemailattachment.go +++ b/typedapi/types/reportingemailattachment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ReportingEmailAttachment type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L224-L232 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L224-L232 type ReportingEmailAttachment struct { Inline *bool `json:"inline,omitempty"` Interval Duration `json:"interval,omitempty"` @@ -118,3 +118,13 @@ func NewReportingEmailAttachment() *ReportingEmailAttachment { return r } + +// true + +type ReportingEmailAttachmentVariant interface { + ReportingEmailAttachmentCaster() *ReportingEmailAttachment +} + +func (s *ReportingEmailAttachment) ReportingEmailAttachmentCaster() *ReportingEmailAttachment { + return s +} diff --git a/typedapi/types/repositoriesrecord.go b/typedapi/types/repositoriesrecord.go index ab56074344..f88c6df598 100644 --- a/typedapi/types/repositoriesrecord.go +++ b/typedapi/types/repositoriesrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RepositoriesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/repositories/types.ts#L20-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/repositories/types.ts#L20-L31 type RepositoriesRecord struct { // Id The unique repository identifier. Id *string `json:"id,omitempty"` @@ -89,3 +89,5 @@ func NewRepositoriesRecord() *RepositoriesRecord { return r } + +// false diff --git a/typedapi/types/repository.go b/typedapi/types/repository.go index e10245996f..dc283db2f0 100644 --- a/typedapi/types/repository.go +++ b/typedapi/types/repository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,5 +29,9 @@ package types // ReadOnlyUrlRepository // SourceOnlyRepository // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotRepository.ts#L24-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotRepository.ts#L24-L34 type Repository any + +type RepositoryVariant interface { + RepositoryCaster() *Repository +} diff --git a/typedapi/types/repositoryintegrityindicator.go b/typedapi/types/repositoryintegrityindicator.go index 2b40ef0f18..3bd5dfa1a9 100644 --- a/typedapi/types/repositoryintegrityindicator.go +++ b/typedapi/types/repositoryintegrityindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // RepositoryIntegrityIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L136-L140 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L137-L141 type RepositoryIntegrityIndicator struct { Details *RepositoryIntegrityIndicatorDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` @@ -100,3 +100,5 @@ func NewRepositoryIntegrityIndicator() *RepositoryIntegrityIndicator { return r } + +// false diff --git a/typedapi/types/repositoryintegrityindicatordetails.go b/typedapi/types/repositoryintegrityindicatordetails.go index c46b5a599a..393981195a 100644 --- a/typedapi/types/repositoryintegrityindicatordetails.go +++ b/typedapi/types/repositoryintegrityindicatordetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RepositoryIntegrityIndicatorDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L141-L145 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L142-L146 type RepositoryIntegrityIndicatorDetails struct { Corrupted []string `json:"corrupted,omitempty"` CorruptedRepositories *int64 `json:"corrupted_repositories,omitempty"` @@ -99,3 +99,5 @@ func NewRepositoryIntegrityIndicatorDetails() *RepositoryIntegrityIndicatorDetai return r } + +// false diff --git a/typedapi/types/repositorylocation.go b/typedapi/types/repositorylocation.go index 67567a80fb..0308e851d7 100644 --- a/typedapi/types/repositorylocation.go +++ b/typedapi/types/repositorylocation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RepositoryLocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/RepositoryMeteringInformation.ts#L68-L74 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/RepositoryMeteringInformation.ts#L68-L74 type RepositoryLocation struct { BasePath string `json:"base_path"` // Bucket Bucket name (GCP, S3) @@ -102,3 +102,5 @@ func NewRepositoryLocation() *RepositoryLocation { return r } + +// false diff --git a/typedapi/types/repositorymeteringinformation.go b/typedapi/types/repositorymeteringinformation.go index 7b5697374a..7093b452b7 100644 --- a/typedapi/types/repositorymeteringinformation.go +++ b/typedapi/types/repositorymeteringinformation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RepositoryMeteringInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/RepositoryMeteringInformation.ts#L24-L66 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/RepositoryMeteringInformation.ts#L24-L66 type RepositoryMeteringInformation struct { // Archived A flag that tells whether or not this object has been archived. When a // repository is closed or updated the @@ -154,3 +154,5 @@ func NewRepositoryMeteringInformation() *RepositoryMeteringInformation { return r } + +// false diff --git a/typedapi/types/requestcachestats.go b/typedapi/types/requestcachestats.go index c7dcb767e1..d350500a35 100644 --- a/typedapi/types/requestcachestats.go +++ b/typedapi/types/requestcachestats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RequestCacheStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L244-L250 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L247-L253 type RequestCacheStats struct { Evictions int64 `json:"evictions"` HitCount int64 `json:"hit_count"` @@ -138,3 +138,5 @@ func NewRequestCacheStats() *RequestCacheStats { return r } + +// false diff --git a/typedapi/types/requestcounts.go b/typedapi/types/requestcounts.go index e04fce618c..963178a383 100644 --- a/typedapi/types/requestcounts.go +++ b/typedapi/types/requestcounts.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RequestCounts type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/RepositoryMeteringInformation.ts#L76-L103 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/RepositoryMeteringInformation.ts#L76-L103 type RequestCounts struct { // GetBlob Number of Get Blob requests (Azure) GetBlob *int64 `json:"GetBlob,omitempty"` @@ -253,3 +253,5 @@ func NewRequestCounts() *RequestCounts { return r } + +// false diff --git a/typedapi/types/requestitem.go b/typedapi/types/requestitem.go index 88ac09aae3..57b5fe4d13 100644 --- a/typedapi/types/requestitem.go +++ b/typedapi/types/requestitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // MultisearchHeader // TemplateConfig // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/msearch_template/types.ts#L25-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/msearch_template/types.ts#L25-L26 type RequestItem any + +type RequestItemVariant interface { + RequestItemCaster() *RequestItem +} diff --git a/typedapi/types/reroutedecision.go b/typedapi/types/reroutedecision.go index fec8934e2e..02ee364adc 100644 --- a/typedapi/types/reroutedecision.go +++ b/typedapi/types/reroutedecision.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RerouteDecision type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/reroute/types.ts#L86-L90 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/reroute/types.ts#L86-L90 type RerouteDecision struct { Decider string `json:"decider"` Decision string `json:"decision"` @@ -100,3 +100,5 @@ func NewRerouteDecision() *RerouteDecision { return r } + +// false diff --git a/typedapi/types/rerouteexplanation.go b/typedapi/types/rerouteexplanation.go index ec03f0a386..719964a574 100644 --- a/typedapi/types/rerouteexplanation.go +++ b/typedapi/types/rerouteexplanation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RerouteExplanation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/reroute/types.ts#L92-L96 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/reroute/types.ts#L92-L96 type RerouteExplanation struct { Command string `json:"command"` Decisions []RerouteDecision `json:"decisions"` @@ -86,3 +86,5 @@ func NewRerouteExplanation() *RerouteExplanation { return r } + +// false diff --git a/typedapi/types/rerouteparameters.go b/typedapi/types/rerouteparameters.go index 44f1a21498..e480cb1fe6 100644 --- a/typedapi/types/rerouteparameters.go +++ b/typedapi/types/rerouteparameters.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RerouteParameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/reroute/types.ts#L98-L105 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/reroute/types.ts#L98-L105 type RerouteParameters struct { AllowPrimary bool `json:"allow_primary"` FromNode *string `json:"from_node,omitempty"` @@ -117,3 +117,5 @@ func NewRerouteParameters() *RerouteParameters { return r } + +// false diff --git a/typedapi/types/rerouteprocessor.go b/typedapi/types/rerouteprocessor.go index fbb3d670fc..6d1096002d 100644 --- a/typedapi/types/rerouteprocessor.go +++ b/typedapi/types/rerouteprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RerouteProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1334-L1362 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1375-L1403 type RerouteProcessor struct { // Dataset Field references or a static value for the dataset part of the data stream // name. @@ -55,7 +55,7 @@ type RerouteProcessor struct { // option is set. Destination *string `json:"destination,omitempty"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // Namespace Field references or a static value for the namespace part of the data stream @@ -134,16 +134,9 @@ func (s *RerouteProcessor) UnmarshalJSON(data []byte) error { s.Destination = &o case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -203,3 +196,13 @@ func NewRerouteProcessor() *RerouteProcessor { return r } + +// true + +type RerouteProcessorVariant interface { + RerouteProcessorCaster() *RerouteProcessor +} + +func (s *RerouteProcessor) RerouteProcessorCaster() *RerouteProcessor { + return s +} diff --git a/typedapi/types/rescore.go b/typedapi/types/rescore.go index 33b8b33416..da4858a991 100644 --- a/typedapi/types/rescore.go +++ b/typedapi/types/rescore.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,11 +31,12 @@ import ( // Rescore type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/rescoring.ts#L25-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/rescoring.ts#L25-L38 type Rescore struct { - LearningToRank *LearningToRank `json:"learning_to_rank,omitempty"` - Query *RescoreQuery `json:"query,omitempty"` - WindowSize *int `json:"window_size,omitempty"` + AdditionalRescoreProperty map[string]json.RawMessage `json:"-"` + LearningToRank *LearningToRank `json:"learning_to_rank,omitempty"` + Query *RescoreQuery `json:"query,omitempty"` + WindowSize *int `json:"window_size,omitempty"` } func (s *Rescore) UnmarshalJSON(data []byte) error { @@ -79,14 +80,68 @@ func (s *Rescore) UnmarshalJSON(data []byte) error { s.WindowSize = &f } + default: + + if key, ok := t.(string); ok { + if s.AdditionalRescoreProperty == nil { + s.AdditionalRescoreProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalRescoreProperty", err) + } + s.AdditionalRescoreProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s Rescore) MarshalJSON() ([]byte, error) { + type opt Rescore + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRescoreProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRescoreProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewRescore returns a Rescore. func NewRescore() *Rescore { - r := &Rescore{} + r := &Rescore{ + AdditionalRescoreProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type RescoreVariant interface { + RescoreCaster() *Rescore +} + +func (s *Rescore) RescoreCaster() *Rescore { + return s +} diff --git a/typedapi/types/rescorequery.go b/typedapi/types/rescorequery.go index e045fd5b42..8ef5d01ca7 100644 --- a/typedapi/types/rescorequery.go +++ b/typedapi/types/rescorequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // RescoreQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/rescoring.ts#L40-L62 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/rescoring.ts#L40-L62 type RescoreQuery struct { // Query The query to use for rescoring. // This query is only run on the Top-K results returned by the `query` and @@ -115,3 +115,13 @@ func NewRescoreQuery() *RescoreQuery { return r } + +// true + +type RescoreQueryVariant interface { + RescoreQueryCaster() *RescoreQuery +} + +func (s *RescoreQuery) RescoreQueryCaster() *RescoreQuery { + return s +} diff --git a/typedapi/types/rescorevector.go b/typedapi/types/rescorevector.go new file mode 100644 index 0000000000..7af1b40d96 --- /dev/null +++ b/typedapi/types/rescorevector.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RescoreVector type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Knn.ts#L30-L33 +type RescoreVector struct { + // Oversample Applies the specified oversample factor to k on the approximate kNN search + Oversample float32 `json:"oversample"` +} + +func (s *RescoreVector) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "oversample": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Oversample", err) + } + f := float32(value) + s.Oversample = f + case float64: + f := float32(v) + s.Oversample = f + } + + } + } + return nil +} + +// NewRescoreVector returns a RescoreVector. +func NewRescoreVector() *RescoreVector { + r := &RescoreVector{} + + return r +} + +// true + +type RescoreVectorVariant interface { + RescoreVectorCaster() *RescoreVector +} + +func (s *RescoreVector) RescoreVectorCaster() *RescoreVector { + return s +} diff --git a/typedapi/types/reservedsize.go b/typedapi/types/reservedsize.go index e026987fa1..4f3ef025d2 100644 --- a/typedapi/types/reservedsize.go +++ b/typedapi/types/reservedsize.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ReservedSize type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/allocation_explain/types.ts#L72-L77 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/allocation_explain/types.ts#L72-L77 type ReservedSize struct { NodeId string `json:"node_id"` Path string `json:"path"` @@ -102,3 +102,5 @@ func NewReservedSize() *ReservedSize { return r } + +// false diff --git a/typedapi/types/resolveclusterinfo.go b/typedapi/types/resolveclusterinfo.go index 0e4d3672cb..0c7b5ea1a1 100644 --- a/typedapi/types/resolveclusterinfo.go +++ b/typedapi/types/resolveclusterinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,20 +31,20 @@ import ( // ResolveClusterInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/resolve_cluster/ResolveClusterResponse.ts#L29-L55 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/resolve_cluster/ResolveClusterResponse.ts#L29-L55 type ResolveClusterInfo struct { // Connected Whether the remote cluster is connected to the local (querying) cluster. Connected bool `json:"connected"` // Error Provides error messages that are likely to occur if you do a search with this // index expression - // on the specified cluster (e.g., lack of security privileges to query an - // index). + // on the specified cluster (for example, lack of security privileges to query + // an index). Error *string `json:"error,omitempty"` // MatchingIndices Whether the index expression provided in the request matches any indices, // aliases or data streams // on the cluster. MatchingIndices *bool `json:"matching_indices,omitempty"` - // SkipUnavailable The skip_unavailable setting for a remote cluster. + // SkipUnavailable The `skip_unavailable` setting for a remote cluster. SkipUnavailable bool `json:"skip_unavailable"` // Version Provides version information about the cluster. Version *ElasticsearchVersionMinInfo `json:"version,omitempty"` @@ -135,3 +135,5 @@ func NewResolveClusterInfo() *ResolveClusterInfo { return r } + +// false diff --git a/typedapi/types/resolveindexaliasitem.go b/typedapi/types/resolveindexaliasitem.go index d810add3d2..10ebb5f37a 100644 --- a/typedapi/types/resolveindexaliasitem.go +++ b/typedapi/types/resolveindexaliasitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ResolveIndexAliasItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/resolve_index/ResolveIndexResponse.ts#L37-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/resolve_index/ResolveIndexResponse.ts#L37-L40 type ResolveIndexAliasItem struct { Indices []string `json:"indices"` Name string `json:"name"` @@ -83,3 +83,5 @@ func NewResolveIndexAliasItem() *ResolveIndexAliasItem { return r } + +// false diff --git a/typedapi/types/resolveindexdatastreamsitem.go b/typedapi/types/resolveindexdatastreamsitem.go index 457ebfeff7..e6399e77ca 100644 --- a/typedapi/types/resolveindexdatastreamsitem.go +++ b/typedapi/types/resolveindexdatastreamsitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ResolveIndexDataStreamsItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/resolve_index/ResolveIndexResponse.ts#L42-L46 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/resolve_index/ResolveIndexResponse.ts#L42-L46 type ResolveIndexDataStreamsItem struct { BackingIndices []string `json:"backing_indices"` Name string `json:"name"` @@ -89,3 +89,5 @@ func NewResolveIndexDataStreamsItem() *ResolveIndexDataStreamsItem { return r } + +// false diff --git a/typedapi/types/resolveindexitem.go b/typedapi/types/resolveindexitem.go index 11c5d5cea2..8e73f48b6a 100644 --- a/typedapi/types/resolveindexitem.go +++ b/typedapi/types/resolveindexitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ResolveIndexItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/resolve_index/ResolveIndexResponse.ts#L30-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/resolve_index/ResolveIndexResponse.ts#L30-L35 type ResolveIndexItem struct { Aliases []string `json:"aliases,omitempty"` Attributes []string `json:"attributes"` @@ -84,3 +84,5 @@ func NewResolveIndexItem() *ResolveIndexItem { return r } + +// false diff --git a/typedapi/types/resourceprivileges.go b/typedapi/types/resourceprivileges.go index 907e113f7b..ebc535e196 100644 --- a/typedapi/types/resourceprivileges.go +++ b/typedapi/types/resourceprivileges.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ResourcePrivileges type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/has_privileges/types.ts#L47-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/has_privileges/types.ts#L48-L48 type ResourcePrivileges map[string]Privileges diff --git a/typedapi/types/responsebody.go b/typedapi/types/responsebody.go deleted file mode 100644 index 5f51f07550..0000000000 --- a/typedapi/types/responsebody.go +++ /dev/null @@ -1,789 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - "strings" -) - -// ResponseBody type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/SearchResponse.ts#L38-L54 -type ResponseBody struct { - Aggregations map[string]Aggregate `json:"aggregations,omitempty"` - Clusters_ *ClusterStatistics `json:"_clusters,omitempty"` - Fields map[string]json.RawMessage `json:"fields,omitempty"` - Hits HitsMetadata `json:"hits"` - MaxScore *Float64 `json:"max_score,omitempty"` - NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` - PitId *string `json:"pit_id,omitempty"` - Profile *Profile `json:"profile,omitempty"` - ScrollId_ *string `json:"_scroll_id,omitempty"` - Shards_ ShardStatistics `json:"_shards"` - Suggest map[string][]Suggest `json:"suggest,omitempty"` - TerminatedEarly *bool `json:"terminated_early,omitempty"` - TimedOut bool `json:"timed_out"` - Took int64 `json:"took"` -} - -func (s *ResponseBody) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "aggregations": - if s.Aggregations == nil { - s.Aggregations = make(map[string]Aggregate, 0) - } - - for dec.More() { - tt, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - if s.Aggregations == nil { - s.Aggregations = make(map[string]Aggregate, 0) - } - switch elems[0] { - - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "min": - o := NewMinAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "max": - o := NewMaxAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "sum": - o := NewSumAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "parent": - o := NewParentAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "range": - o := NewRangeAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "frequent_item_sets": - o := NewFrequentItemSetsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "time_series": - o := NewTimeSeriesAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "boxplot": - o := NewBoxPlotAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "rate": - o := NewRateAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - default: - o := make(map[string]any, 0) - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]any, 0) - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[value] = o - } - } - } - - case "_clusters": - if err := dec.Decode(&s.Clusters_); err != nil { - return fmt.Errorf("%s | %w", "Clusters_", err) - } - - case "fields": - if s.Fields == nil { - s.Fields = make(map[string]json.RawMessage, 0) - } - if err := dec.Decode(&s.Fields); err != nil { - return fmt.Errorf("%s | %w", "Fields", err) - } - - case "hits": - if err := dec.Decode(&s.Hits); err != nil { - return fmt.Errorf("%s | %w", "Hits", err) - } - - case "max_score": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "MaxScore", err) - } - f := Float64(value) - s.MaxScore = &f - case float64: - f := Float64(v) - s.MaxScore = &f - } - - case "num_reduce_phases": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "NumReducePhases", err) - } - s.NumReducePhases = &value - case float64: - f := int64(v) - s.NumReducePhases = &f - } - - case "pit_id": - if err := dec.Decode(&s.PitId); err != nil { - return fmt.Errorf("%s | %w", "PitId", err) - } - - case "profile": - if err := dec.Decode(&s.Profile); err != nil { - return fmt.Errorf("%s | %w", "Profile", err) - } - - case "_scroll_id": - if err := dec.Decode(&s.ScrollId_); err != nil { - return fmt.Errorf("%s | %w", "ScrollId_", err) - } - - case "_shards": - if err := dec.Decode(&s.Shards_); err != nil { - return fmt.Errorf("%s | %w", "Shards_", err) - } - - case "suggest": - if s.Suggest == nil { - s.Suggest = make(map[string][]Suggest, 0) - } - - for dec.More() { - tt, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - if s.Suggest == nil { - s.Suggest = make(map[string][]Suggest, 0) - } - switch elems[0] { - - case "completion": - o := NewCompletionSuggest() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Suggest", err) - } - s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) - - case "phrase": - o := NewPhraseSuggest() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Suggest", err) - } - s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) - - case "term": - o := NewTermSuggest() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Suggest", err) - } - s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) - - default: - o := make(map[string]any, 0) - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Suggest", err) - } - s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) - } - } else { - return errors.New("cannot decode JSON for field Suggest") - } - } else { - o := make(map[string]any, 0) - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Suggest", err) - } - s.Suggest[value] = append(s.Suggest[value], o) - } - } - } - - case "terminated_early": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "TerminatedEarly", err) - } - s.TerminatedEarly = &value - case bool: - s.TerminatedEarly = &v - } - - case "timed_out": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "TimedOut", err) - } - s.TimedOut = value - case bool: - s.TimedOut = v - } - - case "took": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Took", err) - } - s.Took = value - case float64: - f := int64(v) - s.Took = f - } - - } - } - return nil -} - -// NewResponseBody returns a ResponseBody. -func NewResponseBody() *ResponseBody { - r := &ResponseBody{ - Aggregations: make(map[string]Aggregate, 0), - Fields: make(map[string]json.RawMessage, 0), - Suggest: make(map[string][]Suggest, 0), - } - - return r -} diff --git a/typedapi/types/responseitem.go b/typedapi/types/responseitem.go index 9f11b4b6ee..051aadb0ed 100644 --- a/typedapi/types/responseitem.go +++ b/typedapi/types/responseitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -27,38 +27,43 @@ import ( "fmt" "io" "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/failurestorestatus" ) // ResponseItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/bulk/types.ts#L37-L81 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/bulk/types.ts#L37-L84 type ResponseItem struct { - // Error Contains additional information about the failed operation. - // The parameter is only returned for failed operations. - Error *ErrorCause `json:"error,omitempty"` - ForcedRefresh *bool `json:"forced_refresh,omitempty"` - Get *InlineGetDictUserDefined `json:"get,omitempty"` + // Error Additional information about the failed operation. + // The property is returned only for failed operations. + Error *ErrorCause `json:"error,omitempty"` + FailureStore *failurestorestatus.FailureStoreStatus `json:"failure_store,omitempty"` + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + Get *InlineGetDictUserDefined `json:"get,omitempty"` // Id_ The document ID associated with the operation. Id_ *string `json:"_id,omitempty"` - // Index_ Name of the index associated with the operation. + // Index_ The name of the index associated with the operation. // If the operation targeted a data stream, this is the backing index into which // the document was written. Index_ string `json:"_index"` // PrimaryTerm_ The primary term assigned to the document for the operation. + // This property is returned only for successful operations. PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` - // Result Result of the operation. + // Result The result of the operation. // Successful values are `created`, `deleted`, and `updated`. Result *string `json:"result,omitempty"` // SeqNo_ The sequence number assigned to the document for the operation. - // Sequence numbers are used to ensure an older version of a document doesn’t + // Sequence numbers are used to ensure an older version of a document doesn't // overwrite a newer version. SeqNo_ *int64 `json:"_seq_no,omitempty"` - // Shards_ Contains shard information for the operation. + // Shards_ Shard information for the operation. Shards_ *ShardStatistics `json:"_shards,omitempty"` - // Status HTTP status code returned for the operation. + // Status The HTTP status code returned for the operation. Status int `json:"status"` // Version_ The document version associated with the operation. // The document version is incremented each time the document is updated. + // This property is returned only for successful actions. Version_ *int64 `json:"_version,omitempty"` } @@ -82,6 +87,11 @@ func (s *ResponseItem) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Error", err) } + case "failure_store": + if err := dec.Decode(&s.FailureStore); err != nil { + return fmt.Errorf("%s | %w", "FailureStore", err) + } + case "forced_refresh": var tmp any dec.Decode(&tmp) @@ -194,3 +204,5 @@ func NewResponseItem() *ResponseItem { return r } + +// false diff --git a/typedapi/types/restriction.go b/typedapi/types/restriction.go new file mode 100644 index 0000000000..6caf15087b --- /dev/null +++ b/typedapi/types/restriction.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/restrictionworkflow" +) + +// Restriction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/RoleDescriptor.ts#L135-L141 +type Restriction struct { + // Workflows A list of workflows to which the API key is restricted. + // NOTE: In order to use a role restriction, an API key must be created with a + // single role descriptor. + Workflows []restrictionworkflow.RestrictionWorkflow `json:"workflows"` +} + +// NewRestriction returns a Restriction. +func NewRestriction() *Restriction { + r := &Restriction{} + + return r +} + +// true + +type RestrictionVariant interface { + RestrictionCaster() *Restriction +} + +func (s *Restriction) RestrictionCaster() *Restriction { + return s +} diff --git a/typedapi/types/retention.go b/typedapi/types/retention.go index d71b3365e9..5cecb5d1b9 100644 --- a/typedapi/types/retention.go +++ b/typedapi/types/retention.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Retention type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/_types/SnapshotLifecycle.ts#L84-L97 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/_types/SnapshotLifecycle.ts#L94-L107 type Retention struct { // ExpireAfter Time period after which a snapshot is considered expired and eligible for // deletion. SLM deletes expired snapshots based on the slm.retention_schedule. @@ -107,3 +107,13 @@ func NewRetention() *Retention { return r } + +// true + +type RetentionVariant interface { + RetentionCaster() *Retention +} + +func (s *Retention) RetentionCaster() *Retention { + return s +} diff --git a/typedapi/types/retentionlease.go b/typedapi/types/retentionlease.go index d8570144d9..597a8e8cb4 100644 --- a/typedapi/types/retentionlease.go +++ b/typedapi/types/retentionlease.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // RetentionLease type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L65-L67 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L66-L68 type RetentionLease struct { Period Duration `json:"period"` } @@ -66,3 +66,13 @@ func NewRetentionLease() *RetentionLease { return r } + +// true + +type RetentionLeaseVariant interface { + RetentionLeaseCaster() *RetentionLease +} + +func (s *RetentionLease) RetentionLeaseCaster() *RetentionLease { + return s +} diff --git a/typedapi/types/retentionpolicy.go b/typedapi/types/retentionpolicy.go index 466ea0d39a..baa9b4b6d4 100644 --- a/typedapi/types/retentionpolicy.go +++ b/typedapi/types/retentionpolicy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // RetentionPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/_types/Transform.ts#L88-L96 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/_types/Transform.ts#L88-L96 type RetentionPolicy struct { // Field The date field that is used to calculate the age of the document. Field string `json:"field"` @@ -76,3 +76,13 @@ func NewRetentionPolicy() *RetentionPolicy { return r } + +// true + +type RetentionPolicyVariant interface { + RetentionPolicyCaster() *RetentionPolicy +} + +func (s *RetentionPolicy) RetentionPolicyCaster() *RetentionPolicy { + return s +} diff --git a/typedapi/types/retentionpolicycontainer.go b/typedapi/types/retentionpolicycontainer.go index 2be130c59c..8d1438d2f9 100644 --- a/typedapi/types/retentionpolicycontainer.go +++ b/typedapi/types/retentionpolicycontainer.go @@ -16,21 +16,68 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // RetentionPolicyContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/_types/Transform.ts#L80-L86 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/_types/Transform.ts#L80-L86 type RetentionPolicyContainer struct { + AdditionalRetentionPolicyContainerProperty map[string]json.RawMessage `json:"-"` // Time Specifies that the transform uses a time field to set the retention policy. Time *RetentionPolicy `json:"time,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s RetentionPolicyContainer) MarshalJSON() ([]byte, error) { + type opt RetentionPolicyContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRetentionPolicyContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRetentionPolicyContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewRetentionPolicyContainer returns a RetentionPolicyContainer. func NewRetentionPolicyContainer() *RetentionPolicyContainer { - r := &RetentionPolicyContainer{} + r := &RetentionPolicyContainer{ + AdditionalRetentionPolicyContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type RetentionPolicyContainerVariant interface { + RetentionPolicyContainerCaster() *RetentionPolicyContainer +} + +func (s *RetentionPolicyContainer) RetentionPolicyContainerCaster() *RetentionPolicyContainer { + return s +} diff --git a/typedapi/types/retries.go b/typedapi/types/retries.go index e0ddd937d1..fff52f2b7d 100644 --- a/typedapi/types/retries.go +++ b/typedapi/types/retries.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,9 +31,11 @@ import ( // Retries type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Retries.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Retries.ts#L22-L31 type Retries struct { - Bulk int64 `json:"bulk"` + // Bulk The number of bulk actions retried. + Bulk int64 `json:"bulk"` + // Search The number of search actions retried. Search int64 `json:"search"` } @@ -93,3 +95,5 @@ func NewRetries() *Retries { return r } + +// false diff --git a/typedapi/types/retrievercontainer.go b/typedapi/types/retrievercontainer.go index bde4a6df94..16b9015e88 100644 --- a/typedapi/types/retrievercontainer.go +++ b/typedapi/types/retrievercontainer.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // RetrieverContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Retriever.ts#L28-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Retriever.ts#L28-L42 type RetrieverContainer struct { + AdditionalRetrieverContainerProperty map[string]json.RawMessage `json:"-"` // Knn A retriever that replaces the functionality of a knn search. Knn *KnnRetriever `json:"knn,omitempty"` // Rrf A retriever that produces top documents from reciprocal rank fusion (RRF). @@ -37,9 +43,50 @@ type RetrieverContainer struct { TextSimilarityReranker *TextSimilarityReranker `json:"text_similarity_reranker,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s RetrieverContainer) MarshalJSON() ([]byte, error) { + type opt RetrieverContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRetrieverContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRetrieverContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewRetrieverContainer returns a RetrieverContainer. func NewRetrieverContainer() *RetrieverContainer { - r := &RetrieverContainer{} + r := &RetrieverContainer{ + AdditionalRetrieverContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type RetrieverContainerVariant interface { + RetrieverContainerCaster() *RetrieverContainer +} + +func (s *RetrieverContainer) RetrieverContainerCaster() *RetrieverContainer { + return s +} diff --git a/typedapi/types/reversenestedaggregate.go b/typedapi/types/reversenestedaggregate.go index b933cbe799..af5658cfbc 100644 --- a/typedapi/types/reversenestedaggregate.go +++ b/typedapi/types/reversenestedaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // ReverseNestedAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L540-L544 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L540-L544 type ReverseNestedAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -631,8 +631,10 @@ func (s ReverseNestedAggregate) MarshalJSON() ([]byte, error) { // NewReverseNestedAggregate returns a ReverseNestedAggregate. func NewReverseNestedAggregate() *ReverseNestedAggregate { r := &ReverseNestedAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/reversenestedaggregation.go b/typedapi/types/reversenestedaggregation.go index 446f7e5494..3bfe4e0b1c 100644 --- a/typedapi/types/reversenestedaggregation.go +++ b/typedapi/types/reversenestedaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ReverseNestedAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L741-L747 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L741-L747 type ReverseNestedAggregation struct { // Path Defines the nested object field that should be joined back to. // The default is empty, which means that it joins back to the root/main @@ -69,3 +69,13 @@ func NewReverseNestedAggregation() *ReverseNestedAggregation { return r } + +// true + +type ReverseNestedAggregationVariant interface { + ReverseNestedAggregationCaster() *ReverseNestedAggregation +} + +func (s *ReverseNestedAggregation) ReverseNestedAggregationCaster() *ReverseNestedAggregation { + return s +} diff --git a/typedapi/types/reversetokenfilter.go b/typedapi/types/reversetokenfilter.go index e9d87476d3..b99919749b 100644 --- a/typedapi/types/reversetokenfilter.go +++ b/typedapi/types/reversetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ReverseTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L306-L308 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L306-L308 type ReverseTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewReverseTokenFilter() *ReverseTokenFilter { return r } + +// true + +type ReverseTokenFilterVariant interface { + ReverseTokenFilterCaster() *ReverseTokenFilter +} + +func (s *ReverseTokenFilter) ReverseTokenFilterCaster() *ReverseTokenFilter { + return s +} diff --git a/typedapi/types/role.go b/typedapi/types/role.go index 4fec8e0179..8be51fba2c 100644 --- a/typedapi/types/role.go +++ b/typedapi/types/role.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,19 +26,25 @@ import ( "errors" "fmt" "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterprivilege" ) // Role type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_role/types.ts#L29-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_role/types.ts#L32-L54 type Role struct { Applications []ApplicationPrivileges `json:"applications"` - Cluster []string `json:"cluster"` + Cluster []clusterprivilege.ClusterPrivilege `json:"cluster"` + Description *string `json:"description,omitempty"` Global map[string]map[string]map[string][]string `json:"global,omitempty"` Indices []IndicesPrivileges `json:"indices"` Metadata Metadata `json:"metadata"` + RemoteCluster []RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + RemoteIndices []RemoteIndicesPrivileges `json:"remote_indices,omitempty"` RoleTemplates []RoleTemplate `json:"role_templates,omitempty"` - RunAs []string `json:"run_as"` + RunAs []string `json:"run_as,omitempty"` TransientMetadata map[string]json.RawMessage `json:"transient_metadata,omitempty"` } @@ -67,6 +73,18 @@ func (s *Role) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Cluster", err) } + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + case "global": if s.Global == nil { s.Global = make(map[string]map[string]map[string][]string, 0) @@ -85,6 +103,16 @@ func (s *Role) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Metadata", err) } + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + + case "remote_indices": + if err := dec.Decode(&s.RemoteIndices); err != nil { + return fmt.Errorf("%s | %w", "RemoteIndices", err) + } + case "role_templates": if err := dec.Decode(&s.RoleTemplates); err != nil { return fmt.Errorf("%s | %w", "RoleTemplates", err) @@ -111,9 +139,11 @@ func (s *Role) UnmarshalJSON(data []byte) error { // NewRole returns a Role. func NewRole() *Role { r := &Role{ - Global: make(map[string]map[string]map[string][]string, 0), - TransientMetadata: make(map[string]json.RawMessage, 0), + Global: make(map[string]map[string]map[string][]string), + TransientMetadata: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/roledescriptor.go b/typedapi/types/roledescriptor.go index e52b935fb4..14ffc1a280 100644 --- a/typedapi/types/roledescriptor.go +++ b/typedapi/types/roledescriptor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // RoleDescriptor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/RoleDescriptor.ts#L31-L64 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/RoleDescriptor.ts#L33-L83 type RoleDescriptor struct { // Applications A list of application privilege entries Applications []ApplicationPrivileges `json:"applications,omitempty"` @@ -51,9 +51,17 @@ type RoleDescriptor struct { // Metadata Optional meta-data. Within the metadata object, keys that begin with `_` are // reserved for system usage. Metadata Metadata `json:"metadata,omitempty"` - // RunAs A list of users that the API keys can impersonate. *Note*: in Serverless, the - // run-as feature is disabled. For API compatibility, you can still specify an - // empty `run_as` field, but a non-empty list will be rejected. + // RemoteCluster A list of cluster permissions for remote clusters. + // NOTE: This is limited a subset of the cluster permissions. + RemoteCluster []RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + // RemoteIndices A list of indices permissions for remote clusters. + RemoteIndices []RemoteIndicesPrivileges `json:"remote_indices,omitempty"` + // Restriction Restriction for when the role descriptor is allowed to be effective. + Restriction *Restriction `json:"restriction,omitempty"` + // RunAs A list of users that the API keys can impersonate. + // NOTE: In Elastic Cloud Serverless, the run-as feature is disabled. + // For API compatibility, you can still specify an empty `run_as` field, but a + // non-empty list will be rejected. RunAs []string `json:"run_as,omitempty"` TransientMetadata map[string]json.RawMessage `json:"transient_metadata,omitempty"` } @@ -121,6 +129,21 @@ func (s *RoleDescriptor) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Metadata", err) } + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + + case "remote_indices": + if err := dec.Decode(&s.RemoteIndices); err != nil { + return fmt.Errorf("%s | %w", "RemoteIndices", err) + } + + case "restriction": + if err := dec.Decode(&s.Restriction); err != nil { + return fmt.Errorf("%s | %w", "Restriction", err) + } + case "run_as": if err := dec.Decode(&s.RunAs); err != nil { return fmt.Errorf("%s | %w", "RunAs", err) @@ -142,8 +165,18 @@ func (s *RoleDescriptor) UnmarshalJSON(data []byte) error { // NewRoleDescriptor returns a RoleDescriptor. func NewRoleDescriptor() *RoleDescriptor { r := &RoleDescriptor{ - TransientMetadata: make(map[string]json.RawMessage, 0), + TransientMetadata: make(map[string]json.RawMessage), } return r } + +// true + +type RoleDescriptorVariant interface { + RoleDescriptorCaster() *RoleDescriptor +} + +func (s *RoleDescriptor) RoleDescriptorCaster() *RoleDescriptor { + return s +} diff --git a/typedapi/types/roledescriptorread.go b/typedapi/types/roledescriptorread.go index 4ebb266f48..6935508a45 100644 --- a/typedapi/types/roledescriptorread.go +++ b/typedapi/types/roledescriptorread.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,14 +33,14 @@ import ( // RoleDescriptorRead type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/RoleDescriptor.ts#L66-L98 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/RoleDescriptor.ts#L85-L133 type RoleDescriptorRead struct { // Applications A list of application privilege entries Applications []ApplicationPrivileges `json:"applications,omitempty"` // Cluster A list of cluster privileges. These privileges define the cluster level // actions that API keys are able to execute. Cluster []clusterprivilege.ClusterPrivilege `json:"cluster"` - // Description Optional description of the role descriptor + // Description An optional description of the role descriptor. Description *string `json:"description,omitempty"` // Global An object defining global privileges. A global privilege is a form of cluster // privilege that is request-aware. Support for global privileges is currently @@ -51,6 +51,13 @@ type RoleDescriptorRead struct { // Metadata Optional meta-data. Within the metadata object, keys that begin with `_` are // reserved for system usage. Metadata Metadata `json:"metadata,omitempty"` + // RemoteCluster A list of cluster permissions for remote clusters. + // NOTE: This is limited a subset of the cluster permissions. + RemoteCluster []RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + // RemoteIndices A list of indices permissions for remote clusters. + RemoteIndices []RemoteIndicesPrivileges `json:"remote_indices,omitempty"` + // Restriction A restriction for when the role descriptor is allowed to be effective. + Restriction *Restriction `json:"restriction,omitempty"` // RunAs A list of users that the API keys can impersonate. RunAs []string `json:"run_as,omitempty"` TransientMetadata map[string]json.RawMessage `json:"transient_metadata,omitempty"` @@ -119,6 +126,21 @@ func (s *RoleDescriptorRead) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Metadata", err) } + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + + case "remote_indices": + if err := dec.Decode(&s.RemoteIndices); err != nil { + return fmt.Errorf("%s | %w", "RemoteIndices", err) + } + + case "restriction": + if err := dec.Decode(&s.Restriction); err != nil { + return fmt.Errorf("%s | %w", "Restriction", err) + } + case "run_as": if err := dec.Decode(&s.RunAs); err != nil { return fmt.Errorf("%s | %w", "RunAs", err) @@ -140,8 +162,10 @@ func (s *RoleDescriptorRead) UnmarshalJSON(data []byte) error { // NewRoleDescriptorRead returns a RoleDescriptorRead. func NewRoleDescriptorRead() *RoleDescriptorRead { r := &RoleDescriptorRead{ - TransientMetadata: make(map[string]json.RawMessage, 0), + TransientMetadata: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/roledescriptorwrapper.go b/typedapi/types/roledescriptorwrapper.go index cb59aeb1ba..a823463f0c 100644 --- a/typedapi/types/roledescriptorwrapper.go +++ b/typedapi/types/roledescriptorwrapper.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // RoleDescriptorWrapper type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_service_accounts/types.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_service_accounts/types.ts#L22-L24 type RoleDescriptorWrapper struct { RoleDescriptor RoleDescriptorRead `json:"role_descriptor"` } @@ -33,3 +33,5 @@ func NewRoleDescriptorWrapper() *RoleDescriptorWrapper { return r } + +// false diff --git a/typedapi/types/rolemappingrule.go b/typedapi/types/rolemappingrule.go index 39b77e10dd..1be2b681ee 100644 --- a/typedapi/types/rolemappingrule.go +++ b/typedapi/types/rolemappingrule.go @@ -16,23 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // RoleMappingRule type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/RoleMappingRule.ts#L22-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/RoleMappingRule.ts#L23-L31 type RoleMappingRule struct { - All []RoleMappingRule `json:"all,omitempty"` - Any []RoleMappingRule `json:"any,omitempty"` - Except *RoleMappingRule `json:"except,omitempty"` - Field *FieldRule `json:"field,omitempty"` + AdditionalRoleMappingRuleProperty map[string]json.RawMessage `json:"-"` + All []RoleMappingRule `json:"all,omitempty"` + Any []RoleMappingRule `json:"any,omitempty"` + Except *RoleMappingRule `json:"except,omitempty"` + Field map[string][]FieldValue `json:"field,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s RoleMappingRule) MarshalJSON() ([]byte, error) { + type opt RoleMappingRule + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRoleMappingRuleProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRoleMappingRuleProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewRoleMappingRule returns a RoleMappingRule. func NewRoleMappingRule() *RoleMappingRule { - r := &RoleMappingRule{} + r := &RoleMappingRule{ + AdditionalRoleMappingRuleProperty: make(map[string]json.RawMessage), + Field: make(map[string][]FieldValue), + } return r } + +// true + +type RoleMappingRuleVariant interface { + RoleMappingRuleCaster() *RoleMappingRule +} + +func (s *RoleMappingRule) RoleMappingRuleCaster() *RoleMappingRule { + return s +} diff --git a/typedapi/types/rolequerycontainer.go b/typedapi/types/rolequerycontainer.go index 5c8a00ced4..e37417b893 100644 --- a/typedapi/types/rolequerycontainer.go +++ b/typedapi/types/rolequerycontainer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,8 +30,9 @@ import ( // RoleQueryContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/query_role/types.ts#L37-L101 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/query_role/types.ts#L37-L101 type RoleQueryContainer struct { + AdditionalRoleQueryContainerProperty map[string]json.RawMessage `json:"-"` // Bool matches roles matching boolean combinations of other queries. Bool *BoolQuery `json:"bool,omitempty"` // Exists Returns roles that contain an indexed value for a field. @@ -159,20 +160,73 @@ func (s *RoleQueryContainer) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Wildcard", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalRoleQueryContainerProperty == nil { + s.AdditionalRoleQueryContainerProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalRoleQueryContainerProperty", err) + } + s.AdditionalRoleQueryContainerProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s RoleQueryContainer) MarshalJSON() ([]byte, error) { + type opt RoleQueryContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRoleQueryContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRoleQueryContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewRoleQueryContainer returns a RoleQueryContainer. func NewRoleQueryContainer() *RoleQueryContainer { r := &RoleQueryContainer{ - Match: make(map[string]MatchQuery, 0), - Prefix: make(map[string]PrefixQuery, 0), - Range: make(map[string]RangeQuery, 0), - Term: make(map[string]TermQuery, 0), - Wildcard: make(map[string]WildcardQuery, 0), + AdditionalRoleQueryContainerProperty: make(map[string]json.RawMessage), + Match: make(map[string]MatchQuery), + Prefix: make(map[string]PrefixQuery), + Range: make(map[string]RangeQuery), + Term: make(map[string]TermQuery), + Wildcard: make(map[string]WildcardQuery), } return r } + +// true + +type RoleQueryContainerVariant interface { + RoleQueryContainerCaster() *RoleQueryContainer +} + +func (s *RoleQueryContainer) RoleQueryContainerCaster() *RoleQueryContainer { + return s +} diff --git a/typedapi/types/roletemplate.go b/typedapi/types/roletemplate.go index d1f0213193..8ab57f6761 100644 --- a/typedapi/types/roletemplate.go +++ b/typedapi/types/roletemplate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // RoleTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/RoleTemplate.ts#L28-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/RoleTemplate.ts#L28-L31 type RoleTemplate struct { Format *templateformat.TemplateFormat `json:"format,omitempty"` Template Script `json:"template"` @@ -38,3 +38,13 @@ func NewRoleTemplate() *RoleTemplate { return r } + +// true + +type RoleTemplateVariant interface { + RoleTemplateCaster() *RoleTemplate +} + +func (s *RoleTemplate) RoleTemplateCaster() *RoleTemplate { + return s +} diff --git a/typedapi/types/roletemplateinlinequery.go b/typedapi/types/roletemplateinlinequery.go index 1b47dd13d5..bf3d5da62b 100644 --- a/typedapi/types/roletemplateinlinequery.go +++ b/typedapi/types/roletemplateinlinequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // string // Query // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Privileges.ts#L321-L322 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L383-L384 type RoleTemplateInlineQuery any + +type RoleTemplateInlineQueryVariant interface { + RoleTemplateInlineQueryCaster() *RoleTemplateInlineQuery +} diff --git a/typedapi/types/roletemplatequery.go b/typedapi/types/roletemplatequery.go index ba783c4f9a..5ae15454e4 100644 --- a/typedapi/types/roletemplatequery.go +++ b/typedapi/types/roletemplatequery.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // RoleTemplateQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Privileges.ts#L289-L299 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L351-L361 type RoleTemplateQuery struct { // Template When you create a role, you can specify a query that defines the document // level security permissions. You can optionally @@ -41,3 +41,13 @@ func NewRoleTemplateQuery() *RoleTemplateQuery { return r } + +// true + +type RoleTemplateQueryVariant interface { + RoleTemplateQueryCaster() *RoleTemplateQuery +} + +func (s *RoleTemplateQuery) RoleTemplateQueryCaster() *RoleTemplateQuery { + return s +} diff --git a/typedapi/types/roletemplatescript.go b/typedapi/types/roletemplatescript.go index becf25ba2f..157dbbf1f3 100644 --- a/typedapi/types/roletemplatescript.go +++ b/typedapi/types/roletemplatescript.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // RoleTemplateScript type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Privileges.ts#L301-L319 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L363-L381 type RoleTemplateScript struct { // Id The `id` for a stored script. Id *string `json:"id,omitempty"` @@ -109,7 +109,7 @@ func (s *RoleTemplateScript) UnmarshalJSON(data []byte) error { switch t { - case "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": o := NewQuery() localDec := json.NewDecoder(bytes.NewReader(message)) if err := localDec.Decode(&o); err != nil { @@ -135,9 +135,19 @@ func (s *RoleTemplateScript) UnmarshalJSON(data []byte) error { // NewRoleTemplateScript returns a RoleTemplateScript. func NewRoleTemplateScript() *RoleTemplateScript { r := &RoleTemplateScript{ - Options: make(map[string]string, 0), - Params: make(map[string]json.RawMessage, 0), + Options: make(map[string]string), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type RoleTemplateScriptVariant interface { + RoleTemplateScriptCaster() *RoleTemplateScript +} + +func (s *RoleTemplateScript) RoleTemplateScriptCaster() *RoleTemplateScript { + return s +} diff --git a/typedapi/types/rolloveraction.go b/typedapi/types/rolloveraction.go index 1acf4fb43d..cbd2abe794 100644 --- a/typedapi/types/rolloveraction.go +++ b/typedapi/types/rolloveraction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RolloverAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/_types/Phase.ts#L102-L113 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/_types/Phase.ts#L99-L110 type RolloverAction struct { MaxAge Duration `json:"max_age,omitempty"` MaxDocs *int64 `json:"max_docs,omitempty"` @@ -161,3 +161,13 @@ func NewRolloverAction() *RolloverAction { return r } + +// true + +type RolloverActionVariant interface { + RolloverActionCaster() *RolloverAction +} + +func (s *RolloverAction) RolloverActionCaster() *RolloverAction { + return s +} diff --git a/typedapi/types/rolloverconditions.go b/typedapi/types/rolloverconditions.go index 31a278df5a..75fa2638ed 100644 --- a/typedapi/types/rolloverconditions.go +++ b/typedapi/types/rolloverconditions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RolloverConditions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/rollover/types.ts#L24-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/rollover/types.ts#L24-L40 type RolloverConditions struct { MaxAge Duration `json:"max_age,omitempty"` MaxAgeMillis *int64 `json:"max_age_millis,omitempty"` @@ -231,3 +231,13 @@ func NewRolloverConditions() *RolloverConditions { return r } + +// true + +type RolloverConditionsVariant interface { + RolloverConditionsCaster() *RolloverConditions +} + +func (s *RolloverConditions) RolloverConditionsCaster() *RolloverConditions { + return s +} diff --git a/typedapi/types/rollupcapabilities.go b/typedapi/types/rollupcapabilities.go index 5c5a00da72..d9917a3c3f 100644 --- a/typedapi/types/rollupcapabilities.go +++ b/typedapi/types/rollupcapabilities.go @@ -16,14 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // RollupCapabilities type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/get_rollup_caps/types.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/get_rollup_caps/types.ts#L24-L29 type RollupCapabilities struct { + // RollupJobs There can be multiple, independent jobs configured for a single index or + // index pattern. Each of these jobs may have different configurations, so the + // API returns a list of all the various configurations available. RollupJobs []RollupCapabilitySummary `json:"rollup_jobs"` } @@ -33,3 +36,5 @@ func NewRollupCapabilities() *RollupCapabilities { return r } + +// false diff --git a/typedapi/types/rollupcapabilitysummary.go b/typedapi/types/rollupcapabilitysummary.go index 32f402b107..c271fb5bbf 100644 --- a/typedapi/types/rollupcapabilitysummary.go +++ b/typedapi/types/rollupcapabilitysummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RollupCapabilitySummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/get_rollup_caps/types.ts#L28-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/get_rollup_caps/types.ts#L31-L36 type RollupCapabilitySummary struct { Fields map[string][]RollupFieldSummary `json:"fields"` IndexPattern string `json:"index_pattern"` @@ -106,8 +106,10 @@ func (s *RollupCapabilitySummary) UnmarshalJSON(data []byte) error { // NewRollupCapabilitySummary returns a RollupCapabilitySummary. func NewRollupCapabilitySummary() *RollupCapabilitySummary { r := &RollupCapabilitySummary{ - Fields: make(map[string][]RollupFieldSummary, 0), + Fields: make(map[string][]RollupFieldSummary), } return r } + +// false diff --git a/typedapi/types/rollupfieldsummary.go b/typedapi/types/rollupfieldsummary.go index ef3876a9c3..232b1139ef 100644 --- a/typedapi/types/rollupfieldsummary.go +++ b/typedapi/types/rollupfieldsummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RollupFieldSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/get_rollup_caps/types.ts#L35-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/get_rollup_caps/types.ts#L38-L42 type RollupFieldSummary struct { Agg string `json:"agg"` CalendarInterval Duration `json:"calendar_interval,omitempty"` @@ -86,3 +86,5 @@ func NewRollupFieldSummary() *RollupFieldSummary { return r } + +// false diff --git a/typedapi/types/rollupjob.go b/typedapi/types/rollupjob.go index dba069740e..76332b814a 100644 --- a/typedapi/types/rollupjob.go +++ b/typedapi/types/rollupjob.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // RollupJob type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/get_jobs/types.ts#L28-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/get_jobs/types.ts#L28-L43 type RollupJob struct { + // Config The rollup job configuration. Config RollupJobConfiguration `json:"config"` - Stats RollupJobStats `json:"stats"` - Status RollupJobStatus `json:"status"` + // Stats Transient statistics about the rollup job, such as how many documents have + // been processed and how many rollup summary docs have been indexed. + // These stats are not persisted. + // If a node is restarted, these stats are reset. + Stats RollupJobStats `json:"stats"` + // Status The current status of the indexer for the rollup job. + Status RollupJobStatus `json:"status"` } // NewRollupJob returns a RollupJob. @@ -35,3 +41,5 @@ func NewRollupJob() *RollupJob { return r } + +// false diff --git a/typedapi/types/rollupjobconfiguration.go b/typedapi/types/rollupjobconfiguration.go index afc4915b6f..31ceaa01ca 100644 --- a/typedapi/types/rollupjobconfiguration.go +++ b/typedapi/types/rollupjobconfiguration.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RollupJobConfiguration type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/get_jobs/types.ts#L34-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/get_jobs/types.ts#L45-L54 type RollupJobConfiguration struct { Cron string `json:"cron"` Groups Groupings `json:"groups"` @@ -133,3 +133,5 @@ func NewRollupJobConfiguration() *RollupJobConfiguration { return r } + +// false diff --git a/typedapi/types/rollupjobstats.go b/typedapi/types/rollupjobstats.go index 126d82763d..361e8b01bd 100644 --- a/typedapi/types/rollupjobstats.go +++ b/typedapi/types/rollupjobstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RollupJobStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/get_jobs/types.ts#L45-L58 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/get_jobs/types.ts#L56-L69 type RollupJobStats struct { DocumentsProcessed int64 `json:"documents_processed"` IndexFailures int64 `json:"index_failures"` @@ -223,3 +223,5 @@ func NewRollupJobStats() *RollupJobStats { return r } + +// false diff --git a/typedapi/types/rollupjobstatus.go b/typedapi/types/rollupjobstatus.go index 3f9f32446a..3811d7b033 100644 --- a/typedapi/types/rollupjobstatus.go +++ b/typedapi/types/rollupjobstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // RollupJobStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/get_jobs/types.ts#L60-L64 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/get_jobs/types.ts#L71-L75 type RollupJobStatus struct { CurrentPosition map[string]json.RawMessage `json:"current_position,omitempty"` JobState indexingjobstate.IndexingJobState `json:"job_state"` @@ -90,8 +90,10 @@ func (s *RollupJobStatus) UnmarshalJSON(data []byte) error { // NewRollupJobStatus returns a RollupJobStatus. func NewRollupJobStatus() *RollupJobStatus { r := &RollupJobStatus{ - CurrentPosition: make(map[string]json.RawMessage, 0), + CurrentPosition: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/rollupjobsummary.go b/typedapi/types/rollupjobsummary.go index 174d69ae79..82236917b7 100644 --- a/typedapi/types/rollupjobsummary.go +++ b/typedapi/types/rollupjobsummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RollupJobSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/get_rollup_index_caps/types.ts#L28-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/get_rollup_index_caps/types.ts#L28-L33 type RollupJobSummary struct { Fields map[string][]RollupJobSummaryField `json:"fields"` IndexPattern string `json:"index_pattern"` @@ -92,8 +92,10 @@ func (s *RollupJobSummary) UnmarshalJSON(data []byte) error { // NewRollupJobSummary returns a RollupJobSummary. func NewRollupJobSummary() *RollupJobSummary { r := &RollupJobSummary{ - Fields: make(map[string][]RollupJobSummaryField, 0), + Fields: make(map[string][]RollupJobSummaryField), } return r } + +// false diff --git a/typedapi/types/rollupjobsummaryfield.go b/typedapi/types/rollupjobsummaryfield.go index 13a1fc5843..d876ce360c 100644 --- a/typedapi/types/rollupjobsummaryfield.go +++ b/typedapi/types/rollupjobsummaryfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RollupJobSummaryField type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/get_rollup_index_caps/types.ts#L35-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/get_rollup_index_caps/types.ts#L35-L39 type RollupJobSummaryField struct { Agg string `json:"agg"` CalendarInterval Duration `json:"calendar_interval,omitempty"` @@ -86,3 +86,5 @@ func NewRollupJobSummaryField() *RollupJobSummaryField { return r } + +// false diff --git a/typedapi/types/romaniananalyzer.go b/typedapi/types/romaniananalyzer.go index f67594fd5d..f63ea76249 100644 --- a/typedapi/types/romaniananalyzer.go +++ b/typedapi/types/romaniananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RomanianAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L257-L262 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L268-L273 type RomanianAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewRomanianAnalyzer() *RomanianAnalyzer { return r } + +// true + +type RomanianAnalyzerVariant interface { + RomanianAnalyzerCaster() *RomanianAnalyzer +} + +func (s *RomanianAnalyzer) RomanianAnalyzerCaster() *RomanianAnalyzer { + return s +} diff --git a/typedapi/types/routingfield.go b/typedapi/types/routingfield.go index b872df7a08..f5272d9d15 100644 --- a/typedapi/types/routingfield.go +++ b/typedapi/types/routingfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RoutingField type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/meta-fields.ts#L50-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/meta-fields.ts#L50-L52 type RoutingField struct { Required bool `json:"required"` } @@ -76,3 +76,13 @@ func NewRoutingField() *RoutingField { return r } + +// true + +type RoutingFieldVariant interface { + RoutingFieldCaster() *RoutingField +} + +func (s *RoutingField) RoutingFieldCaster() *RoutingField { + return s +} diff --git a/typedapi/types/rrfrank.go b/typedapi/types/rrfrank.go index f9071ddafe..b4b70d9a90 100644 --- a/typedapi/types/rrfrank.go +++ b/typedapi/types/rrfrank.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RrfRank type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Rank.ts#L32-L37 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Rank.ts#L32-L37 type RrfRank struct { // RankConstant How much influence documents in individual result sets per query have over // the final ranked result set @@ -96,3 +96,13 @@ func NewRrfRank() *RrfRank { return r } + +// true + +type RrfRankVariant interface { + RrfRankCaster() *RrfRank +} + +func (s *RrfRank) RrfRankCaster() *RrfRank { + return s +} diff --git a/typedapi/types/rrfretriever.go b/typedapi/types/rrfretriever.go index 4d30643dcb..7d88068150 100644 --- a/typedapi/types/rrfretriever.go +++ b/typedapi/types/rrfretriever.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RRFRetriever type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Retriever.ts#L79-L86 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Retriever.ts#L84-L91 type RRFRetriever struct { // Filter Query to filter the documents that can match. Filter []Query `json:"filter,omitempty"` @@ -143,3 +143,13 @@ func NewRRFRetriever() *RRFRetriever { return r } + +// true + +type RRFRetrieverVariant interface { + RRFRetrieverCaster() *RRFRetriever +} + +func (s *RRFRetriever) RRFRetrieverCaster() *RRFRetriever { + return s +} diff --git a/typedapi/types/rulecondition.go b/typedapi/types/rulecondition.go index b65b3bfc43..5e77943c9b 100644 --- a/typedapi/types/rulecondition.go +++ b/typedapi/types/rulecondition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // RuleCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Rule.ts#L52-L65 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Rule.ts#L52-L65 type RuleCondition struct { // AppliesTo Specifies the result property to which the condition applies. If your // detector uses `lat_long`, `metric`, `rare`, or `freq_rare` functions, you can @@ -99,3 +99,13 @@ func NewRuleCondition() *RuleCondition { return r } + +// true + +type RuleConditionVariant interface { + RuleConditionCaster() *RuleCondition +} + +func (s *RuleCondition) RuleConditionCaster() *RuleCondition { + return s +} diff --git a/typedapi/types/rulequery.go b/typedapi/types/rulequery.go index 5632bf6f7b..0a1b1318cd 100644 --- a/typedapi/types/rulequery.go +++ b/typedapi/types/rulequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RuleQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L398-L405 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L398-L405 type RuleQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -40,7 +40,7 @@ type RuleQuery struct { // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` MatchCriteria json.RawMessage `json:"match_criteria,omitempty"` - Organic *Query `json:"organic,omitempty"` + Organic Query `json:"organic"` QueryName_ *string `json:"_name,omitempty"` RulesetIds []string `json:"ruleset_ids"` } @@ -114,3 +114,13 @@ func NewRuleQuery() *RuleQuery { return r } + +// true + +type RuleQueryVariant interface { + RuleQueryCaster() *RuleQuery +} + +func (s *RuleQuery) RuleQueryCaster() *RuleQuery { + return s +} diff --git a/typedapi/types/ruleretriever.go b/typedapi/types/ruleretriever.go index 0c5900bea1..7272966143 100644 --- a/typedapi/types/ruleretriever.go +++ b/typedapi/types/ruleretriever.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RuleRetriever type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Retriever.ts#L101-L110 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Retriever.ts#L106-L115 type RuleRetriever struct { // Filter Query to filter the documents that can match. Filter []Query `json:"filter,omitempty"` @@ -44,7 +44,7 @@ type RuleRetriever struct { // RankWindowSize This value determines the size of the individual result set. RankWindowSize *int `json:"rank_window_size,omitempty"` // Retriever The retriever whose results rules should be applied to. - Retriever *RetrieverContainer `json:"retriever,omitempty"` + Retriever RetrieverContainer `json:"retriever"` // RulesetIds The ruleset IDs containing the rules this retriever is evaluating against. RulesetIds []string `json:"ruleset_ids"` } @@ -138,3 +138,13 @@ func NewRuleRetriever() *RuleRetriever { return r } + +// true + +type RuleRetrieverVariant interface { + RuleRetrieverCaster() *RuleRetriever +} + +func (s *RuleRetriever) RuleRetrieverCaster() *RuleRetriever { + return s +} diff --git a/typedapi/types/runningstatesearchinterval.go b/typedapi/types/runningstatesearchinterval.go index 903c25262e..e934f72c65 100644 --- a/typedapi/types/runningstatesearchinterval.go +++ b/typedapi/types/runningstatesearchinterval.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // RunningStateSearchInterval type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Datafeed.ts#L216-L233 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Datafeed.ts#L226-L243 type RunningStateSearchInterval struct { // End The end time. End Duration `json:"end,omitempty"` @@ -88,3 +88,5 @@ func NewRunningStateSearchInterval() *RunningStateSearchInterval { return r } + +// false diff --git a/typedapi/types/runtimefield.go b/typedapi/types/runtimefield.go index 94a37b00e9..3c6523078c 100644 --- a/typedapi/types/runtimefield.go +++ b/typedapi/types/runtimefield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // RuntimeField type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/RuntimeFields.ts#L26-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/RuntimeFields.ts#L26-L50 type RuntimeField struct { // FetchFields For type `lookup` FetchFields []RuntimeFieldFetchFields `json:"fetch_fields,omitempty"` @@ -127,8 +127,18 @@ func (s *RuntimeField) UnmarshalJSON(data []byte) error { // NewRuntimeField returns a RuntimeField. func NewRuntimeField() *RuntimeField { r := &RuntimeField{ - Fields: make(map[string]CompositeSubField, 0), + Fields: make(map[string]CompositeSubField), } return r } + +// true + +type RuntimeFieldVariant interface { + RuntimeFieldCaster() *RuntimeField +} + +func (s *RuntimeField) RuntimeFieldCaster() *RuntimeField { + return s +} diff --git a/typedapi/types/runtimefieldfetchfields.go b/typedapi/types/runtimefieldfetchfields.go index 3fa1cd2bb1..710db09b32 100644 --- a/typedapi/types/runtimefieldfetchfields.go +++ b/typedapi/types/runtimefieldfetchfields.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RuntimeFieldFetchFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/RuntimeFields.ts#L56-L60 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/RuntimeFields.ts#L56-L60 type RuntimeFieldFetchFields struct { Field string `json:"field"` Format *string `json:"format,omitempty"` @@ -92,3 +92,13 @@ func NewRuntimeFieldFetchFields() *RuntimeFieldFetchFields { return r } + +// true + +type RuntimeFieldFetchFieldsVariant interface { + RuntimeFieldFetchFieldsCaster() *RuntimeFieldFetchFields +} + +func (s *RuntimeFieldFetchFields) RuntimeFieldFetchFieldsCaster() *RuntimeFieldFetchFields { + return s +} diff --git a/typedapi/types/runtimefields.go b/typedapi/types/runtimefields.go index 583ca84935..a429e51b94 100644 --- a/typedapi/types/runtimefields.go +++ b/typedapi/types/runtimefields.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // RuntimeFields type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/RuntimeFields.ts#L24-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/RuntimeFields.ts#L24-L24 type RuntimeFields map[string]RuntimeField + +type RuntimeFieldsVariant interface { + RuntimeFieldsCaster() *RuntimeFields +} diff --git a/typedapi/types/runtimefieldstype.go b/typedapi/types/runtimefieldstype.go index 83c543bbc4..1400a7949c 100644 --- a/typedapi/types/runtimefieldstype.go +++ b/typedapi/types/runtimefieldstype.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RuntimeFieldsType type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L277-L292 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L289-L304 type RuntimeFieldsType struct { CharsMax int64 `json:"chars_max"` CharsTotal int64 `json:"chars_total"` @@ -265,3 +265,5 @@ func NewRuntimeFieldsType() *RuntimeFieldsType { return r } + +// false diff --git a/typedapi/types/russiananalyzer.go b/typedapi/types/russiananalyzer.go index 1b19c96c96..fb94558415 100644 --- a/typedapi/types/russiananalyzer.go +++ b/typedapi/types/russiananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // RussianAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L264-L269 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L275-L280 type RussianAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewRussianAnalyzer() *RussianAnalyzer { return r } + +// true + +type RussianAnalyzerVariant interface { + RussianAnalyzerCaster() *RussianAnalyzer +} + +func (s *RussianAnalyzer) RussianAnalyzerCaster() *RussianAnalyzer { + return s +} diff --git a/typedapi/types/s3repository.go b/typedapi/types/s3repository.go index 3766a661fe..15dc0cc1b4 100644 --- a/typedapi/types/s3repository.go +++ b/typedapi/types/s3repository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,11 +30,20 @@ import ( // S3Repository type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotRepository.ts#L50-L53 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotRepository.ts#L64-L78 type S3Repository struct { + // Settings The repository settings. + // + // NOTE: In addition to the specified settings, you can also use all non-secure + // client settings in the repository settings. + // In this case, the client settings found in the repository settings will be + // merged with those of the named client used by the repository. + // Conflicts between client and repository settings are resolved by the + // repository settings taking precedence over client settings. Settings S3RepositorySettings `json:"settings"` - Type string `json:"type,omitempty"` - Uuid *string `json:"uuid,omitempty"` + // Type The S3 repository type. + Type string `json:"type,omitempty"` + Uuid *string `json:"uuid,omitempty"` } func (s *S3Repository) UnmarshalJSON(data []byte) error { @@ -92,3 +101,13 @@ func NewS3Repository() *S3Repository { return r } + +// true + +type S3RepositoryVariant interface { + S3RepositoryCaster() *S3Repository +} + +func (s *S3Repository) S3RepositoryCaster() *S3Repository { + return s +} diff --git a/typedapi/types/s3repositorysettings.go b/typedapi/types/s3repositorysettings.go index d50b8d6baa..8b5fbbaad9 100644 --- a/typedapi/types/s3repositorysettings.go +++ b/typedapi/types/s3repositorysettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,20 +31,126 @@ import ( // S3RepositorySettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotRepository.ts#L93-L102 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotRepository.ts#L237-L346 type S3RepositorySettings struct { - BasePath *string `json:"base_path,omitempty"` - Bucket string `json:"bucket"` - BufferSize ByteSize `json:"buffer_size,omitempty"` - CannedAcl *string `json:"canned_acl,omitempty"` - ChunkSize ByteSize `json:"chunk_size,omitempty"` - Client *string `json:"client,omitempty"` - Compress *bool `json:"compress,omitempty"` - MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // BasePath The path to the repository data within its bucket. + // It defaults to an empty string, meaning that the repository is at the root of + // the bucket. + // The value of this setting should not start or end with a forward slash (`/`). + // + // NOTE: Don't set base_path when configuring a snapshot repository for Elastic + // Cloud Enterprise. + // Elastic Cloud Enterprise automatically generates the `base_path` for each + // deployment so that multiple deployments may share the same bucket. + BasePath *string `json:"base_path,omitempty"` + // Bucket The name of the S3 bucket to use for snapshots. + // The bucket name must adhere to Amazon's S3 bucket naming rules. + Bucket string `json:"bucket"` + // BufferSize The minimum threshold below which the chunk is uploaded using a single + // request. + // Beyond this threshold, the S3 repository will use the AWS Multipart Upload + // API to split the chunk into several parts, each of `buffer_size` length, and + // to upload each part in its own request. + // Note that setting a buffer size lower than 5mb is not allowed since it will + // prevent the use of the Multipart API and may result in upload errors. + // It is also not possible to set a buffer size greater than 5gb as it is the + // maximum upload size allowed by S3. + // Defaults to `100mb` or 5% of JVM heap, whichever is smaller. + BufferSize ByteSize `json:"buffer_size,omitempty"` + // CannedAcl The S3 repository supports all S3 canned ACLs: `private`, `public-read`, + // `public-read-write`, `authenticated-read`, `log-delivery-write`, + // `bucket-owner-read`, `bucket-owner-full-control`. + // You could specify a canned ACL using the `canned_acl` setting. + // When the S3 repository creates buckets and objects, it adds the canned ACL + // into the buckets and objects. + CannedAcl *string `json:"canned_acl,omitempty"` + // ChunkSize Big files can be broken down into multiple smaller blobs in the blob store + // during snapshotting. + // It is not recommended to change this value from its default unless there is + // an explicit reason for limiting the size of blobs in the repository. + // Setting a value lower than the default can result in an increased number of + // API calls to the blob store during snapshot create and restore operations + // compared to using the default value and thus make both operations slower and + // more costly. + // Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. + // The default varies by repository type. + ChunkSize ByteSize `json:"chunk_size,omitempty"` + // Client The name of the S3 client to use to connect to S3. + Client *string `json:"client,omitempty"` + // Compress When set to `true`, metadata files are stored in compressed format. + // This setting doesn't affect index files that are already compressed by + // default. + Compress *bool `json:"compress,omitempty"` + // DeleteObjectsMaxSize The maxmimum batch size, between 1 and 1000, used for `DeleteObjects` + // requests. + // Defaults to 1000 which is the maximum number supported by the AWS + // DeleteObjects API. + DeleteObjectsMaxSize *int `json:"delete_objects_max_size,omitempty"` + // GetRegisterRetryDelay The time to wait before trying again if an attempt to read a linearizable + // register fails. + GetRegisterRetryDelay Duration `json:"get_register_retry_delay,omitempty"` + // MaxMultipartParts The maximum number of parts that Elasticsearch will write during a multipart + // upload of a single object. + // Files which are larger than `buffer_size × max_multipart_parts` will be + // chunked into several smaller objects. + // Elasticsearch may also split a file across multiple objects to satisfy other + // constraints such as the `chunk_size` limit. + // Defaults to `10000` which is the maximum number of parts in a multipart + // upload in AWS S3. + MaxMultipartParts *int `json:"max_multipart_parts,omitempty"` + // MaxMultipartUploadCleanupSize The maximum number of possibly-dangling multipart uploads to clean up in each + // batch of snapshot deletions. + // Defaults to 1000 which is the maximum number supported by the AWS + // ListMultipartUploads API. + // If set to `0`, Elasticsearch will not attempt to clean up dangling multipart + // uploads. + MaxMultipartUploadCleanupSize *int `json:"max_multipart_upload_cleanup_size,omitempty"` + // MaxRestoreBytesPerSec The maximum snapshot restore rate per node. + // It defaults to unlimited. + // Note that restores are also throttled through recovery settings. + MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // MaxSnapshotBytesPerSec The maximum snapshot creation rate per node. + // It defaults to 40mb per second. + // Note that if the recovery settings for managed services are set, then it + // defaults to unlimited, and the rate is additionally throttled through + // recovery settings. MaxSnapshotBytesPerSec ByteSize `json:"max_snapshot_bytes_per_sec,omitempty"` - Readonly *bool `json:"readonly,omitempty"` - ServerSideEncryption *bool `json:"server_side_encryption,omitempty"` - StorageClass *string `json:"storage_class,omitempty"` + // Readonly If true, the repository is read-only. + // The cluster can retrieve and restore snapshots from the repository but not + // write to the repository or create snapshots in it. + // + // Only a cluster with write access can create snapshots in the repository. + // All other clusters connected to the repository should have the `readonly` + // parameter set to `true`. + // + // If `false`, the cluster can write to the repository and create snapshots in + // it. + // + // IMPORTANT: If you register the same snapshot repository with multiple + // clusters, only one cluster should have write access to the repository. + // Having multiple clusters write to the repository at the same time risks + // corrupting the contents of the repository. + Readonly *bool `json:"readonly,omitempty"` + // ServerSideEncryption When set to `true`, files are encrypted on server side using an AES256 + // algorithm. + ServerSideEncryption *bool `json:"server_side_encryption,omitempty"` + // StorageClass The S3 storage class for objects written to the repository. + // Values may be `standard`, `reduced_redundancy`, `standard_ia`, `onezone_ia`, + // and `intelligent_tiering`. + StorageClass *string `json:"storage_class,omitempty"` + // ThrottledDeleteRetryDelayIncrement The delay before the first retry and the amount the delay is incremented by + // on each subsequent retry. + // The default is 50ms and the minimum is 0ms. + ThrottledDeleteRetryDelayIncrement Duration `json:"throttled_delete_retry.delay_increment,omitempty"` + // ThrottledDeleteRetryMaximumDelay The upper bound on how long the delays between retries will grow to. + // The default is 5s and the minimum is 0ms. + ThrottledDeleteRetryMaximumDelay Duration `json:"throttled_delete_retry.maximum_delay,omitempty"` + // ThrottledDeleteRetryMaximumNumberOfRetries The number times to retry a throttled snapshot deletion. + // The default is 10 and the minimum value is 0 which will disable retries + // altogether. + // Note that if retries are enabled in the Azure client, each of these retries + // comprises that many client-level retries. + ThrottledDeleteRetryMaximumNumberOfRetries *int `json:"throttled_delete_retry.maximum_number_of_retries,omitempty"` } func (s *S3RepositorySettings) UnmarshalJSON(data []byte) error { @@ -134,6 +240,59 @@ func (s *S3RepositorySettings) UnmarshalJSON(data []byte) error { s.Compress = &v } + case "delete_objects_max_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DeleteObjectsMaxSize", err) + } + s.DeleteObjectsMaxSize = &value + case float64: + f := int(v) + s.DeleteObjectsMaxSize = &f + } + + case "get_register_retry_delay": + if err := dec.Decode(&s.GetRegisterRetryDelay); err != nil { + return fmt.Errorf("%s | %w", "GetRegisterRetryDelay", err) + } + + case "max_multipart_parts": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxMultipartParts", err) + } + s.MaxMultipartParts = &value + case float64: + f := int(v) + s.MaxMultipartParts = &f + } + + case "max_multipart_upload_cleanup_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxMultipartUploadCleanupSize", err) + } + s.MaxMultipartUploadCleanupSize = &value + case float64: + f := int(v) + s.MaxMultipartUploadCleanupSize = &f + } + case "max_restore_bytes_per_sec": if err := dec.Decode(&s.MaxRestoreBytesPerSec); err != nil { return fmt.Errorf("%s | %w", "MaxRestoreBytesPerSec", err) @@ -184,6 +343,32 @@ func (s *S3RepositorySettings) UnmarshalJSON(data []byte) error { } s.StorageClass = &o + case "throttled_delete_retry.delay_increment": + if err := dec.Decode(&s.ThrottledDeleteRetryDelayIncrement); err != nil { + return fmt.Errorf("%s | %w", "ThrottledDeleteRetryDelayIncrement", err) + } + + case "throttled_delete_retry.maximum_delay": + if err := dec.Decode(&s.ThrottledDeleteRetryMaximumDelay); err != nil { + return fmt.Errorf("%s | %w", "ThrottledDeleteRetryMaximumDelay", err) + } + + case "throttled_delete_retry.maximum_number_of_retries": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ThrottledDeleteRetryMaximumNumberOfRetries", err) + } + s.ThrottledDeleteRetryMaximumNumberOfRetries = &value + case float64: + f := int(v) + s.ThrottledDeleteRetryMaximumNumberOfRetries = &f + } + } } return nil @@ -195,3 +380,13 @@ func NewS3RepositorySettings() *S3RepositorySettings { return r } + +// true + +type S3RepositorySettingsVariant interface { + S3RepositorySettingsCaster() *S3RepositorySettings +} + +func (s *S3RepositorySettings) S3RepositorySettingsCaster() *S3RepositorySettings { + return s +} diff --git a/typedapi/types/samplediversity.go b/typedapi/types/samplediversity.go index 78003fb4d4..f6924310c6 100644 --- a/typedapi/types/samplediversity.go +++ b/typedapi/types/samplediversity.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SampleDiversity type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/graph/_types/ExploreControls.ts#L51-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/graph/_types/ExploreControls.ts#L51-L54 type SampleDiversity struct { Field string `json:"field"` MaxDocsPerValue int `json:"max_docs_per_value"` @@ -84,3 +84,13 @@ func NewSampleDiversity() *SampleDiversity { return r } + +// true + +type SampleDiversityVariant interface { + SampleDiversityCaster() *SampleDiversity +} + +func (s *SampleDiversity) SampleDiversityCaster() *SampleDiversity { + return s +} diff --git a/typedapi/types/sampleraggregate.go b/typedapi/types/sampleraggregate.go index 960d50903f..63b1284556 100644 --- a/typedapi/types/sampleraggregate.go +++ b/typedapi/types/sampleraggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // SamplerAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L558-L559 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L558-L559 type SamplerAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -631,8 +631,10 @@ func (s SamplerAggregate) MarshalJSON() ([]byte, error) { // NewSamplerAggregate returns a SamplerAggregate. func NewSamplerAggregate() *SamplerAggregate { r := &SamplerAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/sampleraggregation.go b/typedapi/types/sampleraggregation.go index f79e96a5e8..8887f3c193 100644 --- a/typedapi/types/sampleraggregation.go +++ b/typedapi/types/sampleraggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SamplerAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L771-L780 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L771-L780 type SamplerAggregation struct { // ShardSize Limits how many top-scoring documents are collected in the sample processed // on each shard. @@ -80,3 +80,13 @@ func NewSamplerAggregation() *SamplerAggregation { return r } + +// true + +type SamplerAggregationVariant interface { + SamplerAggregationCaster() *SamplerAggregation +} + +func (s *SamplerAggregation) SamplerAggregationCaster() *SamplerAggregation { + return s +} diff --git a/typedapi/types/scalarvalue.go b/typedapi/types/scalarvalue.go index 4005e76308..3038426a4d 100644 --- a/typedapi/types/scalarvalue.go +++ b/typedapi/types/scalarvalue.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -28,5 +28,9 @@ package types // bool // nil // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L39-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L33-L37 type ScalarValue any + +type ScalarValueVariant interface { + ScalarValueCaster() *ScalarValue +} diff --git a/typedapi/types/scaledfloatnumberproperty.go b/typedapi/types/scaledfloatnumberproperty.go index c39e15b7c9..5c3812db4b 100644 --- a/typedapi/types/scaledfloatnumberproperty.go +++ b/typedapi/types/scaledfloatnumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // ScaledFloatNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L182-L186 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L186-L190 type ScaledFloatNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,13 +48,14 @@ type ScaledFloatNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *Float64 `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - ScalingFactor *Float64 `json:"scaling_factor,omitempty"` - Script *Script `json:"script,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *Float64 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + ScalingFactor *Float64 `json:"scaling_factor,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -162,301 +164,313 @@ func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -554,301 +568,313 @@ func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -889,6 +915,11 @@ func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -938,6 +969,7 @@ func (s ScaledFloatNumberProperty) MarshalJSON() ([]byte, error) { ScalingFactor: s.ScalingFactor, Script: s.Script, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -951,10 +983,20 @@ func (s ScaledFloatNumberProperty) MarshalJSON() ([]byte, error) { // NewScaledFloatNumberProperty returns a ScaledFloatNumberProperty. func NewScaledFloatNumberProperty() *ScaledFloatNumberProperty { r := &ScaledFloatNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type ScaledFloatNumberPropertyVariant interface { + ScaledFloatNumberPropertyCaster() *ScaledFloatNumberProperty +} + +func (s *ScaledFloatNumberProperty) ScaledFloatNumberPropertyCaster() *ScaledFloatNumberProperty { + return s +} diff --git a/typedapi/types/schedulecontainer.go b/typedapi/types/schedulecontainer.go index 74a8496225..f922f68e95 100644 --- a/typedapi/types/schedulecontainer.go +++ b/typedapi/types/schedulecontainer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,19 +26,22 @@ import ( "errors" "fmt" "io" + "strconv" ) // ScheduleContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Schedule.ts#L80-L91 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Schedule.ts#L80-L92 type ScheduleContainer struct { - Cron *string `json:"cron,omitempty"` - Daily *DailySchedule `json:"daily,omitempty"` - Hourly *HourlySchedule `json:"hourly,omitempty"` - Interval Duration `json:"interval,omitempty"` - Monthly []TimeOfMonth `json:"monthly,omitempty"` - Weekly []TimeOfWeek `json:"weekly,omitempty"` - Yearly []TimeOfYear `json:"yearly,omitempty"` + AdditionalScheduleContainerProperty map[string]json.RawMessage `json:"-"` + Cron *string `json:"cron,omitempty"` + Daily *DailySchedule `json:"daily,omitempty"` + Hourly *HourlySchedule `json:"hourly,omitempty"` + Interval Duration `json:"interval,omitempty"` + Monthly []TimeOfMonth `json:"monthly,omitempty"` + Timezone *string `json:"timezone,omitempty"` + Weekly []TimeOfWeek `json:"weekly,omitempty"` + Yearly []TimeOfYear `json:"yearly,omitempty"` } func (s *ScheduleContainer) UnmarshalJSON(data []byte) error { @@ -92,6 +95,18 @@ func (s *ScheduleContainer) UnmarshalJSON(data []byte) error { } } + case "timezone": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Timezone", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Timezone = &o + case "weekly": rawMsg := json.RawMessage{} dec.Decode(&rawMsg) @@ -124,14 +139,68 @@ func (s *ScheduleContainer) UnmarshalJSON(data []byte) error { } } + default: + + if key, ok := t.(string); ok { + if s.AdditionalScheduleContainerProperty == nil { + s.AdditionalScheduleContainerProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalScheduleContainerProperty", err) + } + s.AdditionalScheduleContainerProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s ScheduleContainer) MarshalJSON() ([]byte, error) { + type opt ScheduleContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalScheduleContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalScheduleContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewScheduleContainer returns a ScheduleContainer. func NewScheduleContainer() *ScheduleContainer { - r := &ScheduleContainer{} + r := &ScheduleContainer{ + AdditionalScheduleContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type ScheduleContainerVariant interface { + ScheduleContainerCaster() *ScheduleContainer +} + +func (s *ScheduleContainer) ScheduleContainerCaster() *ScheduleContainer { + return s +} diff --git a/typedapi/types/scheduletimeofday.go b/typedapi/types/scheduletimeofday.go index 76ed5c156d..de8b026427 100644 --- a/typedapi/types/scheduletimeofday.go +++ b/typedapi/types/scheduletimeofday.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // string // HourAndMinute // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Schedule.ts#L98-L103 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Schedule.ts#L99-L104 type ScheduleTimeOfDay any + +type ScheduleTimeOfDayVariant interface { + ScheduleTimeOfDayCaster() *ScheduleTimeOfDay +} diff --git a/typedapi/types/scheduletriggerevent.go b/typedapi/types/scheduletriggerevent.go index 9371588a3c..62f52a1c1a 100644 --- a/typedapi/types/scheduletriggerevent.go +++ b/typedapi/types/scheduletriggerevent.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ScheduleTriggerEvent type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Schedule.ts#L93-L96 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Schedule.ts#L94-L97 type ScheduleTriggerEvent struct { ScheduledTime DateTime `json:"scheduled_time"` TriggeredTime DateTime `json:"triggered_time,omitempty"` @@ -72,3 +72,13 @@ func NewScheduleTriggerEvent() *ScheduleTriggerEvent { return r } + +// true + +type ScheduleTriggerEventVariant interface { + ScheduleTriggerEventCaster() *ScheduleTriggerEvent +} + +func (s *ScheduleTriggerEvent) ScheduleTriggerEventCaster() *ScheduleTriggerEvent { + return s +} diff --git a/typedapi/types/schedulingconfiguration.go b/typedapi/types/schedulingconfiguration.go index 9b41d668f3..8de5d213ee 100644 --- a/typedapi/types/schedulingconfiguration.go +++ b/typedapi/types/schedulingconfiguration.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // SchedulingConfiguration type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L231-L235 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L246-L250 type SchedulingConfiguration struct { AccessControl *ConnectorScheduling `json:"access_control,omitempty"` Full *ConnectorScheduling `json:"full,omitempty"` @@ -35,3 +35,13 @@ func NewSchedulingConfiguration() *SchedulingConfiguration { return r } + +// true + +type SchedulingConfigurationVariant interface { + SchedulingConfigurationCaster() *SchedulingConfiguration +} + +func (s *SchedulingConfiguration) SchedulingConfigurationCaster() *SchedulingConfiguration { + return s +} diff --git a/typedapi/types/scoresort.go b/typedapi/types/scoresort.go index 940931e74b..55fffaf626 100644 --- a/typedapi/types/scoresort.go +++ b/typedapi/types/scoresort.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // ScoreSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/sort.ts#L54-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/sort.ts#L54-L56 type ScoreSort struct { Order *sortorder.SortOrder `json:"order,omitempty"` } @@ -37,3 +37,13 @@ func NewScoreSort() *ScoreSort { return r } + +// true + +type ScoreSortVariant interface { + ScoreSortCaster() *ScoreSort +} + +func (s *ScoreSort) ScoreSortCaster() *ScoreSort { + return s +} diff --git a/typedapi/types/script.go b/typedapi/types/script.go index 81ac443aae..28540550fd 100644 --- a/typedapi/types/script.go +++ b/typedapi/types/script.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // Script type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Scripting.ts#L73-L97 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Scripting.ts#L75-L99 type Script struct { // Id The `id` for a stored script. Id *string `json:"id,omitempty"` @@ -120,9 +120,19 @@ func (s *Script) UnmarshalJSON(data []byte) error { // NewScript returns a Script. func NewScript() *Script { r := &Script{ - Options: make(map[string]string, 0), - Params: make(map[string]json.RawMessage, 0), + Options: make(map[string]string), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type ScriptVariant interface { + ScriptCaster() *Script +} + +func (s *Script) ScriptCaster() *Script { + return s +} diff --git a/typedapi/types/scriptcache.go b/typedapi/types/scriptcache.go index 2f9bba1a73..a0238ca923 100644 --- a/typedapi/types/scriptcache.go +++ b/typedapi/types/scriptcache.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ScriptCache type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L1102-L1116 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L1102-L1116 type ScriptCache struct { // CacheEvictions Total number of times the script cache has evicted old data. CacheEvictions *int64 `json:"cache_evictions,omitempty"` @@ -126,3 +126,5 @@ func NewScriptCache() *ScriptCache { return r } + +// false diff --git a/typedapi/types/scriptcondition.go b/typedapi/types/scriptcondition.go index 0bd09518d1..6dd526a8cc 100644 --- a/typedapi/types/scriptcondition.go +++ b/typedapi/types/scriptcondition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ScriptCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Conditions.ts#L79-L87 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Conditions.ts#L79-L87 type ScriptCondition struct { Id *string `json:"id,omitempty"` Lang *string `json:"lang,omitempty"` @@ -106,8 +106,18 @@ func (s *ScriptCondition) UnmarshalJSON(data []byte) error { // NewScriptCondition returns a ScriptCondition. func NewScriptCondition() *ScriptCondition { r := &ScriptCondition{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type ScriptConditionVariant interface { + ScriptConditionCaster() *ScriptCondition +} + +func (s *ScriptCondition) ScriptConditionCaster() *ScriptCondition { + return s +} diff --git a/typedapi/types/scriptedheuristic.go b/typedapi/types/scriptedheuristic.go index de904dfb10..2a436c7ecc 100644 --- a/typedapi/types/scriptedheuristic.go +++ b/typedapi/types/scriptedheuristic.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ScriptedHeuristic type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L813-L815 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L813-L815 type ScriptedHeuristic struct { Script Script `json:"script"` } @@ -33,3 +33,13 @@ func NewScriptedHeuristic() *ScriptedHeuristic { return r } + +// true + +type ScriptedHeuristicVariant interface { + ScriptedHeuristicCaster() *ScriptedHeuristic +} + +func (s *ScriptedHeuristic) ScriptedHeuristicCaster() *ScriptedHeuristic { + return s +} diff --git a/typedapi/types/scriptedmetricaggregate.go b/typedapi/types/scriptedmetricaggregate.go index e9b39f3203..ca89685255 100644 --- a/typedapi/types/scriptedmetricaggregate.go +++ b/typedapi/types/scriptedmetricaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ScriptedMetricAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L739-L745 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L739-L745 type ScriptedMetricAggregate struct { Meta Metadata `json:"meta,omitempty"` Value json.RawMessage `json:"value,omitempty"` @@ -72,3 +72,5 @@ func NewScriptedMetricAggregate() *ScriptedMetricAggregate { return r } + +// false diff --git a/typedapi/types/scriptedmetricaggregation.go b/typedapi/types/scriptedmetricaggregation.go index 53320c36ff..3062c2187a 100644 --- a/typedapi/types/scriptedmetricaggregation.go +++ b/typedapi/types/scriptedmetricaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ScriptedMetricAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L263-L289 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L263-L289 type ScriptedMetricAggregation struct { // CombineScript Runs once on each shard after document collection is complete. // Allows the aggregation to consolidate the state returned from each shard. @@ -125,8 +125,18 @@ func (s *ScriptedMetricAggregation) UnmarshalJSON(data []byte) error { // NewScriptedMetricAggregation returns a ScriptedMetricAggregation. func NewScriptedMetricAggregation() *ScriptedMetricAggregation { r := &ScriptedMetricAggregation{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type ScriptedMetricAggregationVariant interface { + ScriptedMetricAggregationCaster() *ScriptedMetricAggregation +} + +func (s *ScriptedMetricAggregation) ScriptedMetricAggregationCaster() *ScriptedMetricAggregation { + return s +} diff --git a/typedapi/types/scriptfield.go b/typedapi/types/scriptfield.go index b02e359117..4017d54c40 100644 --- a/typedapi/types/scriptfield.go +++ b/typedapi/types/scriptfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ScriptField type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Scripting.ts#L99-L102 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Scripting.ts#L101-L104 type ScriptField struct { IgnoreFailure *bool `json:"ignore_failure,omitempty"` Script Script `json:"script"` @@ -82,3 +82,13 @@ func NewScriptField() *ScriptField { return r } + +// true + +type ScriptFieldVariant interface { + ScriptFieldCaster() *ScriptField +} + +func (s *ScriptField) ScriptFieldCaster() *ScriptField { + return s +} diff --git a/typedapi/types/scripting.go b/typedapi/types/scripting.go index ceaadbbdb4..305606b102 100644 --- a/typedapi/types/scripting.go +++ b/typedapi/types/scripting.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Scripting type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L1048-L1066 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L1048-L1066 type Scripting struct { // CacheEvictions Total number of times the script cache has evicted old data. CacheEvictions *int64 `json:"cache_evictions,omitempty"` @@ -126,8 +126,10 @@ func (s *Scripting) UnmarshalJSON(data []byte) error { // NewScripting returns a Scripting. func NewScripting() *Scripting { r := &Scripting{ - CompilationsHistory: make(map[string]int64, 0), + CompilationsHistory: make(map[string]int64), } return r } + +// false diff --git a/typedapi/types/scriptprocessor.go b/typedapi/types/scriptprocessor.go index 34af0ea834..b7fd791c4e 100644 --- a/typedapi/types/scriptprocessor.go +++ b/typedapi/types/scriptprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ScriptProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1364-L1384 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1405-L1425 type ScriptProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -40,7 +40,7 @@ type ScriptProcessor struct { // If no `source` is specified, this parameter is required. Id *string `json:"id,omitempty"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // Lang Script language. @@ -90,16 +90,9 @@ func (s *ScriptProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -172,8 +165,18 @@ func (s *ScriptProcessor) UnmarshalJSON(data []byte) error { // NewScriptProcessor returns a ScriptProcessor. func NewScriptProcessor() *ScriptProcessor { r := &ScriptProcessor{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type ScriptProcessorVariant interface { + ScriptProcessorCaster() *ScriptProcessor +} + +func (s *ScriptProcessor) ScriptProcessorCaster() *ScriptProcessor { + return s +} diff --git a/typedapi/types/scriptquery.go b/typedapi/types/scriptquery.go index 58ccd8992d..bec9879454 100644 --- a/typedapi/types/scriptquery.go +++ b/typedapi/types/scriptquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ScriptQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L337-L346 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L337-L346 type ScriptQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -104,3 +104,13 @@ func NewScriptQuery() *ScriptQuery { return r } + +// true + +type ScriptQueryVariant interface { + ScriptQueryCaster() *ScriptQuery +} + +func (s *ScriptQuery) ScriptQueryCaster() *ScriptQuery { + return s +} diff --git a/typedapi/types/scriptscorefunction.go b/typedapi/types/scriptscorefunction.go index d1c65cc603..5339ad0ff4 100644 --- a/typedapi/types/scriptscorefunction.go +++ b/typedapi/types/scriptscorefunction.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ScriptScoreFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L137-L142 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L137-L142 type ScriptScoreFunction struct { // Script A script that computes a score. Script Script `json:"script"` @@ -34,3 +34,13 @@ func NewScriptScoreFunction() *ScriptScoreFunction { return r } + +// true + +type ScriptScoreFunctionVariant interface { + ScriptScoreFunctionCaster() *ScriptScoreFunction +} + +func (s *ScriptScoreFunction) ScriptScoreFunctionCaster() *ScriptScoreFunction { + return s +} diff --git a/typedapi/types/scriptscorequery.go b/typedapi/types/scriptscorequery.go index 9050ab7aa5..9bc327165e 100644 --- a/typedapi/types/scriptscorequery.go +++ b/typedapi/types/scriptscorequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ScriptScoreQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L348-L365 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L348-L365 type ScriptScoreQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -43,7 +43,7 @@ type ScriptScoreQuery struct { // from the search results. MinScore *float32 `json:"min_score,omitempty"` // Query Query used to return documents. - Query *Query `json:"query,omitempty"` + Query Query `json:"query"` QueryName_ *string `json:"_name,omitempty"` // Script Script used to compute the score of documents returned by the query. // Important: final relevance scores from the `script_score` query cannot be @@ -131,3 +131,13 @@ func NewScriptScoreQuery() *ScriptScoreQuery { return r } + +// true + +type ScriptScoreQueryVariant interface { + ScriptScoreQueryCaster() *ScriptScoreQuery +} + +func (s *ScriptScoreQuery) ScriptScoreQueryCaster() *ScriptScoreQuery { + return s +} diff --git a/typedapi/types/scriptsort.go b/typedapi/types/scriptsort.go index f408288e86..f81d37e9d5 100644 --- a/typedapi/types/scriptsort.go +++ b/typedapi/types/scriptsort.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -28,7 +28,7 @@ import ( // ScriptSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/sort.ts#L72-L78 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/sort.ts#L72-L78 type ScriptSort struct { Mode *sortmode.SortMode `json:"mode,omitempty"` Nested *NestedSortValue `json:"nested,omitempty"` @@ -43,3 +43,13 @@ func NewScriptSort() *ScriptSort { return r } + +// true + +type ScriptSortVariant interface { + ScriptSortCaster() *ScriptSort +} + +func (s *ScriptSort) ScriptSortCaster() *ScriptSort { + return s +} diff --git a/typedapi/types/scripttransform.go b/typedapi/types/scripttransform.go index 664884a296..fb603bc79d 100644 --- a/typedapi/types/scripttransform.go +++ b/typedapi/types/scripttransform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ScriptTransform type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Transform.ts#L36-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Transform.ts#L36-L44 type ScriptTransform struct { Id *string `json:"id,omitempty"` Lang *string `json:"lang,omitempty"` @@ -106,8 +106,18 @@ func (s *ScriptTransform) UnmarshalJSON(data []byte) error { // NewScriptTransform returns a ScriptTransform. func NewScriptTransform() *ScriptTransform { r := &ScriptTransform{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type ScriptTransformVariant interface { + ScriptTransformCaster() *ScriptTransform +} + +func (s *ScriptTransform) ScriptTransformCaster() *ScriptTransform { + return s +} diff --git a/typedapi/types/scrollids.go b/typedapi/types/scrollids.go index 920866633b..fe9d36134d 100644 --- a/typedapi/types/scrollids.go +++ b/typedapi/types/scrollids.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ScrollIds type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L56-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L50-L50 type ScrollIds []string + +type ScrollIdsVariant interface { + ScrollIdsCaster() *ScrollIds +} diff --git a/typedapi/types/searchablesnapshotaction.go b/typedapi/types/searchablesnapshotaction.go index 96b1e4d1bb..6e9885c211 100644 --- a/typedapi/types/searchablesnapshotaction.go +++ b/typedapi/types/searchablesnapshotaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SearchableSnapshotAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/_types/Phase.ts#L131-L134 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/_types/Phase.ts#L128-L131 type SearchableSnapshotAction struct { ForceMergeIndex *bool `json:"force_merge_index,omitempty"` SnapshotRepository string `json:"snapshot_repository"` @@ -89,3 +89,13 @@ func NewSearchableSnapshotAction() *SearchableSnapshotAction { return r } + +// true + +type SearchableSnapshotActionVariant interface { + SearchableSnapshotActionCaster() *SearchableSnapshotAction +} + +func (s *SearchableSnapshotAction) SearchableSnapshotActionCaster() *SearchableSnapshotAction { + return s +} diff --git a/typedapi/types/searchablesnapshots.go b/typedapi/types/searchablesnapshots.go index 5a31fa04dc..dec7457fc0 100644 --- a/typedapi/types/searchablesnapshots.go +++ b/typedapi/types/searchablesnapshots.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SearchableSnapshots type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L426-L430 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L434-L438 type SearchableSnapshots struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -142,3 +142,5 @@ func NewSearchableSnapshots() *SearchableSnapshots { return r } + +// false diff --git a/typedapi/types/searchaccess.go b/typedapi/types/searchaccess.go index 597b205f16..7b40b39560 100644 --- a/typedapi/types/searchaccess.go +++ b/typedapi/types/searchaccess.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SearchAccess type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Privileges.ts#L387-L407 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L454-L474 type SearchAccess struct { // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that // cover restricted indices. Implicitly, restricted indices have limited @@ -86,8 +86,19 @@ func (s *SearchAccess) UnmarshalJSON(data []byte) error { } case "names": - if err := dec.Decode(&s.Names); err != nil { - return fmt.Errorf("%s | %w", "Names", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } } case "query": @@ -108,7 +119,7 @@ func (s *SearchAccess) UnmarshalJSON(data []byte) error { switch t { - case "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": o := NewQuery() localDec := json.NewDecoder(bytes.NewReader(message)) if err := localDec.Decode(&o); err != nil { @@ -146,3 +157,13 @@ func NewSearchAccess() *SearchAccess { return r } + +// true + +type SearchAccessVariant interface { + SearchAccessCaster() *SearchAccess +} + +func (s *SearchAccess) SearchAccessCaster() *SearchAccess { + return s +} diff --git a/typedapi/types/searchapplication.go b/typedapi/types/searchapplication.go index 12cd0a8554..40f2c32f1e 100644 --- a/typedapi/types/searchapplication.go +++ b/typedapi/types/searchapplication.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // SearchApplication type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/search_application/_types/SearchApplication.ts#L24-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/_types/SearchApplication.ts#L24-L33 type SearchApplication struct { // AnalyticsCollectionName Analytics collection associated to the Search Application. AnalyticsCollectionName *string `json:"analytics_collection_name,omitempty"` @@ -95,3 +95,5 @@ func NewSearchApplication() *SearchApplication { return r } + +// false diff --git a/typedapi/types/searchapplicationparameters.go b/typedapi/types/searchapplicationparameters.go index 319cf1157e..6f203908c8 100644 --- a/typedapi/types/searchapplicationparameters.go +++ b/typedapi/types/searchapplicationparameters.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // SearchApplicationParameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/search_application/_types/SearchApplicationParameters.ts#L23-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/_types/SearchApplicationParameters.ts#L23-L36 type SearchApplicationParameters struct { // AnalyticsCollectionName Analytics collection associated to the Search Application. AnalyticsCollectionName *string `json:"analytics_collection_name,omitempty"` @@ -81,3 +81,13 @@ func NewSearchApplicationParameters() *SearchApplicationParameters { return r } + +// true + +type SearchApplicationParametersVariant interface { + SearchApplicationParametersCaster() *SearchApplicationParameters +} + +func (s *SearchApplicationParameters) SearchApplicationParametersCaster() *SearchApplicationParameters { + return s +} diff --git a/typedapi/types/searchapplicationtemplate.go b/typedapi/types/searchapplicationtemplate.go index 01e38f4382..f7441ae000 100644 --- a/typedapi/types/searchapplicationtemplate.go +++ b/typedapi/types/searchapplicationtemplate.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // SearchApplicationTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/search_application/_types/SearchApplicationTemplate.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/search_application/_types/SearchApplicationTemplate.ts#L22-L27 type SearchApplicationTemplate struct { // Script The associated mustache template. Script Script `json:"script"` @@ -34,3 +34,13 @@ func NewSearchApplicationTemplate() *SearchApplicationTemplate { return r } + +// true + +type SearchApplicationTemplateVariant interface { + SearchApplicationTemplateCaster() *SearchApplicationTemplate +} + +func (s *SearchApplicationTemplate) SearchApplicationTemplateCaster() *SearchApplicationTemplate { + return s +} diff --git a/typedapi/types/searchasyoutypeproperty.go b/typedapi/types/searchasyoutypeproperty.go index 48fc53a177..01afb60974 100644 --- a/typedapi/types/searchasyoutypeproperty.go +++ b/typedapi/types/searchasyoutypeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption" ) // SearchAsYouTypeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L212-L223 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L228-L239 type SearchAsYouTypeProperty struct { Analyzer *string `json:"analyzer,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -46,15 +47,16 @@ type SearchAsYouTypeProperty struct { IndexOptions *indexoptions.IndexOptions `json:"index_options,omitempty"` MaxShingleSize *int `json:"max_shingle_size,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Norms *bool `json:"norms,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - SearchAnalyzer *string `json:"search_analyzer,omitempty"` - SearchQuoteAnalyzer *string `json:"search_quote_analyzer,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - TermVector *termvectoroption.TermVectorOption `json:"term_vector,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Norms *bool `json:"norms,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SearchAnalyzer *string `json:"search_analyzer,omitempty"` + SearchQuoteAnalyzer *string `json:"search_quote_analyzer,omitempty"` + Similarity *string `json:"similarity,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TermVector *termvectoroption.TermVectorOption `json:"term_vector,omitempty"` + Type string `json:"type,omitempty"` } func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { @@ -124,301 +126,313 @@ func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -516,301 +530,313 @@ func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -866,6 +892,11 @@ func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "term_vector": if err := dec.Decode(&s.TermVector); err != nil { return fmt.Errorf("%s | %w", "TermVector", err) @@ -900,6 +931,7 @@ func (s SearchAsYouTypeProperty) MarshalJSON() ([]byte, error) { SearchQuoteAnalyzer: s.SearchQuoteAnalyzer, Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TermVector: s.TermVector, Type: s.Type, } @@ -912,10 +944,20 @@ func (s SearchAsYouTypeProperty) MarshalJSON() ([]byte, error) { // NewSearchAsYouTypeProperty returns a SearchAsYouTypeProperty. func NewSearchAsYouTypeProperty() *SearchAsYouTypeProperty { r := &SearchAsYouTypeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type SearchAsYouTypePropertyVariant interface { + SearchAsYouTypePropertyCaster() *SearchAsYouTypeProperty +} + +func (s *SearchAsYouTypeProperty) SearchAsYouTypePropertyCaster() *SearchAsYouTypeProperty { + return s +} diff --git a/typedapi/types/searchidle.go b/typedapi/types/searchidle.go index f1f1439792..db817592e6 100644 --- a/typedapi/types/searchidle.go +++ b/typedapi/types/searchidle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // SearchIdle type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L245-L248 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L253-L256 type SearchIdle struct { After Duration `json:"after,omitempty"` } @@ -66,3 +66,13 @@ func NewSearchIdle() *SearchIdle { return r } + +// true + +type SearchIdleVariant interface { + SearchIdleCaster() *SearchIdle +} + +func (s *SearchIdle) SearchIdleCaster() *SearchIdle { + return s +} diff --git a/typedapi/types/searchinput.go b/typedapi/types/searchinput.go index ef5115a244..0b0d0a7d81 100644 --- a/typedapi/types/searchinput.go +++ b/typedapi/types/searchinput.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // SearchInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Input.ts#L112-L116 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Input.ts#L112-L116 type SearchInput struct { Extract []string `json:"extract,omitempty"` Request SearchInputRequestDefinition `json:"request"` @@ -78,3 +78,13 @@ func NewSearchInput() *SearchInput { return r } + +// true + +type SearchInputVariant interface { + SearchInputCaster() *SearchInput +} + +func (s *SearchInput) SearchInputCaster() *SearchInput { + return s +} diff --git a/typedapi/types/searchinputrequestbody.go b/typedapi/types/searchinputrequestbody.go index 9f7e9c626f..8c9eb3a368 100644 --- a/typedapi/types/searchinputrequestbody.go +++ b/typedapi/types/searchinputrequestbody.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // SearchInputRequestBody type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Input.ts#L147-L149 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Input.ts#L147-L149 type SearchInputRequestBody struct { Query Query `json:"query"` } @@ -33,3 +33,13 @@ func NewSearchInputRequestBody() *SearchInputRequestBody { return r } + +// true + +type SearchInputRequestBodyVariant interface { + SearchInputRequestBodyCaster() *SearchInputRequestBody +} + +func (s *SearchInputRequestBody) SearchInputRequestBodyCaster() *SearchInputRequestBody { + return s +} diff --git a/typedapi/types/searchinputrequestdefinition.go b/typedapi/types/searchinputrequestdefinition.go index 64d4bba95a..ce5a81aa02 100644 --- a/typedapi/types/searchinputrequestdefinition.go +++ b/typedapi/types/searchinputrequestdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // SearchInputRequestDefinition type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Input.ts#L118-L125 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Input.ts#L118-L125 type SearchInputRequestDefinition struct { Body *SearchInputRequestBody `json:"body,omitempty"` Indices []string `json:"indices,omitempty"` @@ -108,3 +108,13 @@ func NewSearchInputRequestDefinition() *SearchInputRequestDefinition { return r } + +// true + +type SearchInputRequestDefinitionVariant interface { + SearchInputRequestDefinitionCaster() *SearchInputRequestDefinition +} + +func (s *SearchInputRequestDefinition) SearchInputRequestDefinitionCaster() *SearchInputRequestDefinition { + return s +} diff --git a/typedapi/types/searchprofile.go b/typedapi/types/searchprofile.go index 67c47cb31f..88b39b50ac 100644 --- a/typedapi/types/searchprofile.go +++ b/typedapi/types/searchprofile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SearchProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L136-L140 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L136-L140 type SearchProfile struct { Collector []Collector `json:"collector"` Query []QueryProfile `json:"query"` @@ -89,3 +89,5 @@ func NewSearchProfile() *SearchProfile { return r } + +// false diff --git a/typedapi/types/searchshardsnodeattributes.go b/typedapi/types/searchshardsnodeattributes.go index 6fc8d20b40..5a2b9d7315 100644 --- a/typedapi/types/searchshardsnodeattributes.go +++ b/typedapi/types/searchshardsnodeattributes.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // SearchShardsNodeAttributes type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search_shards/SearchShardsResponse.ts#L42-L60 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search_shards/SearchShardsResponse.ts#L42-L60 type SearchShardsNodeAttributes struct { // Attributes Lists node attributes. Attributes map[string]string `json:"attributes"` @@ -150,8 +150,10 @@ func (s *SearchShardsNodeAttributes) UnmarshalJSON(data []byte) error { // NewSearchShardsNodeAttributes returns a SearchShardsNodeAttributes. func NewSearchShardsNodeAttributes() *SearchShardsNodeAttributes { r := &SearchShardsNodeAttributes{ - Attributes: make(map[string]string, 0), + Attributes: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/searchstats.go b/typedapi/types/searchstats.go index 916da097cf..30682262e7 100644 --- a/typedapi/types/searchstats.go +++ b/typedapi/types/searchstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SearchStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L252-L271 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L255-L274 type SearchStats struct { FetchCurrent int64 `json:"fetch_current"` FetchTime Duration `json:"fetch_time,omitempty"` @@ -259,8 +259,10 @@ func (s *SearchStats) UnmarshalJSON(data []byte) error { // NewSearchStats returns a SearchStats. func NewSearchStats() *SearchStats { r := &SearchStats{ - Groups: make(map[string]SearchStats, 0), + Groups: make(map[string]SearchStats), } return r } + +// false diff --git a/typedapi/types/searchtemplaterequestbody.go b/typedapi/types/searchtemplaterequestbody.go index 3a39a7ed37..9102845daa 100644 --- a/typedapi/types/searchtemplaterequestbody.go +++ b/typedapi/types/searchtemplaterequestbody.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SearchTemplateRequestBody type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Input.ts#L128-L145 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Input.ts#L128-L145 type SearchTemplateRequestBody struct { Explain *bool `json:"explain,omitempty"` // Id ID of the search template to use. If no source is specified, @@ -121,8 +121,18 @@ func (s *SearchTemplateRequestBody) UnmarshalJSON(data []byte) error { // NewSearchTemplateRequestBody returns a SearchTemplateRequestBody. func NewSearchTemplateRequestBody() *SearchTemplateRequestBody { r := &SearchTemplateRequestBody{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type SearchTemplateRequestBodyVariant interface { + SearchTemplateRequestBodyCaster() *SearchTemplateRequestBody +} + +func (s *SearchTemplateRequestBody) SearchTemplateRequestBodyCaster() *SearchTemplateRequestBody { + return s +} diff --git a/typedapi/types/searchtransform.go b/typedapi/types/searchtransform.go index e4a77cb5b8..f6b6e8d0d8 100644 --- a/typedapi/types/searchtransform.go +++ b/typedapi/types/searchtransform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // SearchTransform type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Transform.ts#L46-L49 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Transform.ts#L46-L49 type SearchTransform struct { Request SearchInputRequestDefinition `json:"request"` Timeout Duration `json:"timeout"` @@ -72,3 +72,13 @@ func NewSearchTransform() *SearchTransform { return r } + +// true + +type SearchTransformVariant interface { + SearchTransformCaster() *SearchTransform +} + +func (s *SearchTransform) SearchTransformCaster() *SearchTransform { + return s +} diff --git a/typedapi/types/security.go b/typedapi/types/security.go index 7f53ac7f8d..430ce16021 100644 --- a/typedapi/types/security.go +++ b/typedapi/types/security.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Security type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L432-L445 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L440-L453 type Security struct { Anonymous FeatureToggle `json:"anonymous"` ApiKeyService FeatureToggle `json:"api_key_service"` @@ -166,9 +166,11 @@ func (s *Security) UnmarshalJSON(data []byte) error { // NewSecurity returns a Security. func NewSecurity() *Security { r := &Security{ - Realms: make(map[string]XpackRealm, 0), - RoleMapping: make(map[string]XpackRoleMapping, 0), + Realms: make(map[string]XpackRealm), + RoleMapping: make(map[string]XpackRoleMapping), } return r } + +// false diff --git a/typedapi/types/securityrolemapping.go b/typedapi/types/securityrolemapping.go index f8ce1602bd..b1b6146aba 100644 --- a/typedapi/types/securityrolemapping.go +++ b/typedapi/types/securityrolemapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SecurityRoleMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/RoleMapping.ts#L25-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/RoleMapping.ts#L25-L33 type SecurityRoleMapping struct { Enabled bool `json:"enabled"` Metadata Metadata `json:"metadata"` @@ -100,3 +100,5 @@ func NewSecurityRoleMapping() *SecurityRoleMapping { return r } + +// false diff --git a/typedapi/types/securityroles.go b/typedapi/types/securityroles.go index 95390bae86..5af4a5534d 100644 --- a/typedapi/types/securityroles.go +++ b/typedapi/types/securityroles.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // SecurityRoles type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L294-L298 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L306-L310 type SecurityRoles struct { Dls SecurityRolesDls `json:"dls"` File SecurityRolesFile `json:"file"` @@ -35,3 +35,5 @@ func NewSecurityRoles() *SecurityRoles { return r } + +// false diff --git a/typedapi/types/securityrolesdls.go b/typedapi/types/securityrolesdls.go index 58c3241e5d..42e9223f15 100644 --- a/typedapi/types/securityrolesdls.go +++ b/typedapi/types/securityrolesdls.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // SecurityRolesDls type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L306-L308 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L318-L320 type SecurityRolesDls struct { BitSetCache SecurityRolesDlsBitSetCache `json:"bit_set_cache"` } @@ -33,3 +33,5 @@ func NewSecurityRolesDls() *SecurityRolesDls { return r } + +// false diff --git a/typedapi/types/securityrolesdlsbitsetcache.go b/typedapi/types/securityrolesdlsbitsetcache.go index f5349824b6..16b494b1fb 100644 --- a/typedapi/types/securityrolesdlsbitsetcache.go +++ b/typedapi/types/securityrolesdlsbitsetcache.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SecurityRolesDlsBitSetCache type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L310-L314 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L322-L326 type SecurityRolesDlsBitSetCache struct { Count int `json:"count"` Memory ByteSize `json:"memory,omitempty"` @@ -90,3 +90,5 @@ func NewSecurityRolesDlsBitSetCache() *SecurityRolesDlsBitSetCache { return r } + +// false diff --git a/typedapi/types/securityrolesfile.go b/typedapi/types/securityrolesfile.go index ba2750c11e..b9a0ea920b 100644 --- a/typedapi/types/securityrolesfile.go +++ b/typedapi/types/securityrolesfile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SecurityRolesFile type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L316-L320 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L328-L332 type SecurityRolesFile struct { Dls bool `json:"dls"` Fls bool `json:"fls"` @@ -107,3 +107,5 @@ func NewSecurityRolesFile() *SecurityRolesFile { return r } + +// false diff --git a/typedapi/types/securityrolesnative.go b/typedapi/types/securityrolesnative.go index b37ef4d1d4..e8355116da 100644 --- a/typedapi/types/securityrolesnative.go +++ b/typedapi/types/securityrolesnative.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SecurityRolesNative type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L300-L304 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L312-L316 type SecurityRolesNative struct { Dls bool `json:"dls"` Fls bool `json:"fls"` @@ -107,3 +107,5 @@ func NewSecurityRolesNative() *SecurityRolesNative { return r } + +// false diff --git a/typedapi/types/securitysettings.go b/typedapi/types/securitysettings.go new file mode 100644 index 0000000000..1261bda420 --- /dev/null +++ b/typedapi/types/securitysettings.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +// SecuritySettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/SecuritySettings.ts#L22-L24 +type SecuritySettings struct { + Index *IndexSettings `json:"index,omitempty"` +} + +// NewSecuritySettings returns a SecuritySettings. +func NewSecuritySettings() *SecuritySettings { + r := &SecuritySettings{} + + return r +} + +// true + +type SecuritySettingsVariant interface { + SecuritySettingsCaster() *SecuritySettings +} + +func (s *SecuritySettings) SecuritySettingsCaster() *SecuritySettings { + return s +} diff --git a/typedapi/types/segment.go b/typedapi/types/segment.go index 511a26b9a0..931fe6cc87 100644 --- a/typedapi/types/segment.go +++ b/typedapi/types/segment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Segment type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/segments/types.ts#L28-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/segments/types.ts#L28-L38 type Segment struct { Attributes map[string]string `json:"attributes"` Committed bool `json:"committed"` @@ -184,8 +184,10 @@ func (s *Segment) UnmarshalJSON(data []byte) error { // NewSegment returns a Segment. func NewSegment() *Segment { r := &Segment{ - Attributes: make(map[string]string, 0), + Attributes: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/segmentsrecord.go b/typedapi/types/segmentsrecord.go index be71d323f4..917425618b 100644 --- a/typedapi/types/segmentsrecord.go +++ b/typedapi/types/segmentsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SegmentsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/segments/types.ts#L22-L107 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/segments/types.ts#L22-L107 type SegmentsRecord struct { // Committed If `true`, the segment is synced to disk. // Segments that are synced can survive a hard reboot. @@ -259,3 +259,5 @@ func NewSegmentsRecord() *SegmentsRecord { return r } + +// false diff --git a/typedapi/types/segmentsstats.go b/typedapi/types/segmentsstats.go index bd1dbbb4bd..850fc7de3a 100644 --- a/typedapi/types/segmentsstats.go +++ b/typedapi/types/segmentsstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SegmentsStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L273-L366 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L276-L369 type SegmentsStats struct { // Count Total number of segments across all shards assigned to selected nodes. Count int `json:"count"` @@ -381,8 +381,10 @@ func (s *SegmentsStats) UnmarshalJSON(data []byte) error { // NewSegmentsStats returns a SegmentsStats. func NewSegmentsStats() *SegmentsStats { r := &SegmentsStats{ - FileSizes: make(map[string]ShardFileSizeInfo, 0), + FileSizes: make(map[string]ShardFileSizeInfo), } return r } + +// false diff --git a/typedapi/types/selectoption.go b/typedapi/types/selectoption.go index fda4f61ecb..7dca4cdcef 100644 --- a/typedapi/types/selectoption.go +++ b/typedapi/types/selectoption.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SelectOption type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L25-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L25-L28 type SelectOption struct { Label string `json:"label"` Value ScalarValue `json:"value"` @@ -80,3 +80,13 @@ func NewSelectOption() *SelectOption { return r } + +// true + +type SelectOptionVariant interface { + SelectOptionCaster() *SelectOption +} + +func (s *SelectOption) SelectOptionCaster() *SelectOption { + return s +} diff --git a/typedapi/types/semanticquery.go b/typedapi/types/semanticquery.go index 94f6fe7641..fd9ee4a7a3 100644 --- a/typedapi/types/semanticquery.go +++ b/typedapi/types/semanticquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SemanticQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/SemanticQuery.ts#L22-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/SemanticQuery.ts#L22-L30 type SemanticQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -124,3 +124,13 @@ func NewSemanticQuery() *SemanticQuery { return r } + +// true + +type SemanticQueryVariant interface { + SemanticQueryCaster() *SemanticQuery +} + +func (s *SemanticQuery) SemanticQueryCaster() *SemanticQuery { + return s +} diff --git a/typedapi/types/semantictextproperty.go b/typedapi/types/semantictextproperty.go index 0f43c98f94..4fb648c34c 100644 --- a/typedapi/types/semantictextproperty.go +++ b/typedapi/types/semantictextproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,11 +30,22 @@ import ( // SemanticTextProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L206-L210 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L210-L226 type SemanticTextProperty struct { - InferenceId string `json:"inference_id"` + // InferenceId Inference endpoint that will be used to generate embeddings for the field. + // This parameter cannot be updated. Use the Create inference API to create the + // endpoint. + // If `search_inference_id` is specified, the inference endpoint will only be + // used at index time. + InferenceId *string `json:"inference_id,omitempty"` Meta map[string]string `json:"meta,omitempty"` - Type string `json:"type,omitempty"` + // SearchInferenceId Inference endpoint that will be used to generate embeddings at query time. + // You can update this parameter by using the Update mapping API. Use the Create + // inference API to create the endpoint. + // If not specified, the inference endpoint defined by inference_id will be used + // at both index and query time. + SearchInferenceId *string `json:"search_inference_id,omitempty"` + Type string `json:"type,omitempty"` } func (s *SemanticTextProperty) UnmarshalJSON(data []byte) error { @@ -65,6 +76,11 @@ func (s *SemanticTextProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Meta", err) } + case "search_inference_id": + if err := dec.Decode(&s.SearchInferenceId); err != nil { + return fmt.Errorf("%s | %w", "SearchInferenceId", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -79,9 +95,10 @@ func (s *SemanticTextProperty) UnmarshalJSON(data []byte) error { func (s SemanticTextProperty) MarshalJSON() ([]byte, error) { type innerSemanticTextProperty SemanticTextProperty tmp := innerSemanticTextProperty{ - InferenceId: s.InferenceId, - Meta: s.Meta, - Type: s.Type, + InferenceId: s.InferenceId, + Meta: s.Meta, + SearchInferenceId: s.SearchInferenceId, + Type: s.Type, } tmp.Type = "semantic_text" @@ -92,8 +109,18 @@ func (s SemanticTextProperty) MarshalJSON() ([]byte, error) { // NewSemanticTextProperty returns a SemanticTextProperty. func NewSemanticTextProperty() *SemanticTextProperty { r := &SemanticTextProperty{ - Meta: make(map[string]string, 0), + Meta: make(map[string]string), } return r } + +// true + +type SemanticTextPropertyVariant interface { + SemanticTextPropertyCaster() *SemanticTextProperty +} + +func (s *SemanticTextProperty) SemanticTextPropertyCaster() *SemanticTextProperty { + return s +} diff --git a/typedapi/types/serbiananalyzer.go b/typedapi/types/serbiananalyzer.go index b140a0d4eb..b3f0010864 100644 --- a/typedapi/types/serbiananalyzer.go +++ b/typedapi/types/serbiananalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SerbianAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L271-L276 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L282-L287 type SerbianAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewSerbianAnalyzer() *SerbianAnalyzer { return r } + +// true + +type SerbianAnalyzerVariant interface { + SerbianAnalyzerCaster() *SerbianAnalyzer +} + +func (s *SerbianAnalyzer) SerbianAnalyzerCaster() *SerbianAnalyzer { + return s +} diff --git a/typedapi/types/serialdifferencingaggregation.go b/typedapi/types/serialdifferencingaggregation.go index bd1dc39049..f994f87c73 100644 --- a/typedapi/types/serialdifferencingaggregation.go +++ b/typedapi/types/serialdifferencingaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // SerialDifferencingAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L399-L408 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L399-L408 type SerialDifferencingAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -112,3 +112,13 @@ func NewSerialDifferencingAggregation() *SerialDifferencingAggregation { return r } + +// true + +type SerialDifferencingAggregationVariant interface { + SerialDifferencingAggregationCaster() *SerialDifferencingAggregation +} + +func (s *SerialDifferencingAggregation) SerialDifferencingAggregationCaster() *SerialDifferencingAggregation { + return s +} diff --git a/typedapi/types/serializedclusterstate.go b/typedapi/types/serializedclusterstate.go index 2719aa79d3..7ed09d3fc1 100644 --- a/typedapi/types/serializedclusterstate.go +++ b/typedapi/types/serializedclusterstate.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // SerializedClusterState type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L232-L238 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L232-L238 type SerializedClusterState struct { Diffs *SerializedClusterStateDetail `json:"diffs,omitempty"` // FullStates Number of published cluster states. @@ -35,3 +35,5 @@ func NewSerializedClusterState() *SerializedClusterState { return r } + +// false diff --git a/typedapi/types/serializedclusterstatedetail.go b/typedapi/types/serializedclusterstatedetail.go index 1849787a9f..3db837e57c 100644 --- a/typedapi/types/serializedclusterstatedetail.go +++ b/typedapi/types/serializedclusterstatedetail.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SerializedClusterStateDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L240-L246 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L240-L246 type SerializedClusterStateDetail struct { CompressedSize *string `json:"compressed_size,omitempty"` CompressedSizeInBytes *int64 `json:"compressed_size_in_bytes,omitempty"` @@ -135,3 +135,5 @@ func NewSerializedClusterStateDetail() *SerializedClusterStateDetail { return r } + +// false diff --git a/typedapi/types/servicetoken.go b/typedapi/types/servicetoken.go index 54b6b81ee8..2b9d5dd52a 100644 --- a/typedapi/types/servicetoken.go +++ b/typedapi/types/servicetoken.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ServiceToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/create_service_token/types.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/create_service_token/types.ts#L22-L25 type ServiceToken struct { Name string `json:"name"` Value string `json:"value"` @@ -80,3 +80,5 @@ func NewServiceToken() *ServiceToken { return r } + +// false diff --git a/typedapi/types/setpriorityaction.go b/typedapi/types/setpriorityaction.go index dbecee280a..0cd867f9bb 100644 --- a/typedapi/types/setpriorityaction.go +++ b/typedapi/types/setpriorityaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SetPriorityAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/_types/Phase.ts#L98-L100 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/_types/Phase.ts#L95-L97 type SetPriorityAction struct { Priority *int `json:"priority,omitempty"` } @@ -78,3 +78,13 @@ func NewSetPriorityAction() *SetPriorityAction { return r } + +// true + +type SetPriorityActionVariant interface { + SetPriorityActionCaster() *SetPriorityAction +} + +func (s *SetPriorityAction) SetPriorityActionCaster() *SetPriorityAction { + return s +} diff --git a/typedapi/types/setprocessor.go b/typedapi/types/setprocessor.go index 87d72ec014..8241f7243f 100644 --- a/typedapi/types/setprocessor.go +++ b/typedapi/types/setprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SetProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1386-L1420 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1427-L1461 type SetProcessor struct { // CopyFrom The origin field which will be copied to `field`, cannot set `value` // simultaneously. @@ -45,7 +45,7 @@ type SetProcessor struct { // Supports template snippets. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreEmptyValue If `true` and `value` is a template snippet that evaluates to `null` or the // empty string, the processor quietly exits without modifying the document. IgnoreEmptyValue *bool `json:"ignore_empty_value,omitempty"` @@ -109,16 +109,9 @@ func (s *SetProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_empty_value": var tmp any @@ -207,3 +200,13 @@ func NewSetProcessor() *SetProcessor { return r } + +// true + +type SetProcessorVariant interface { + SetProcessorCaster() *SetProcessor +} + +func (s *SetProcessor) SetProcessorCaster() *SetProcessor { + return s +} diff --git a/typedapi/types/setsecurityuserprocessor.go b/typedapi/types/setsecurityuserprocessor.go index db94c07fce..e7621c8e1d 100644 --- a/typedapi/types/setsecurityuserprocessor.go +++ b/typedapi/types/setsecurityuserprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SetSecurityUserProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1422-L1431 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1463-L1472 type SetSecurityUserProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type SetSecurityUserProcessor struct { // Field The field to store the user information into. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // OnFailure Handle failures for the processor. @@ -84,16 +84,9 @@ func (s *SetSecurityUserProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -142,3 +135,13 @@ func NewSetSecurityUserProcessor() *SetSecurityUserProcessor { return r } + +// true + +type SetSecurityUserProcessorVariant interface { + SetSecurityUserProcessorCaster() *SetSecurityUserProcessor +} + +func (s *SetSecurityUserProcessor) SetSecurityUserProcessorCaster() *SetSecurityUserProcessor { + return s +} diff --git a/typedapi/types/settings.go b/typedapi/types/settings.go index d4bbb17af4..b364fc0286 100644 --- a/typedapi/types/settings.go +++ b/typedapi/types/settings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Settings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/_types/Transform.ts#L98-L144 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/_types/Transform.ts#L98-L144 type Settings struct { // AlignCheckpoints Specifies whether the transform checkpoint ranges should be optimized for // performance. Such optimization can align @@ -182,3 +182,13 @@ func NewSettings() *Settings { return r } + +// true + +type SettingsVariant interface { + SettingsCaster() *Settings +} + +func (s *Settings) SettingsCaster() *Settings { + return s +} diff --git a/typedapi/types/settingsanalyze.go b/typedapi/types/settingsanalyze.go index 0e9a162bbd..4e1616831b 100644 --- a/typedapi/types/settingsanalyze.go +++ b/typedapi/types/settingsanalyze.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // SettingsAnalyze type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L235-L238 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L243-L246 type SettingsAnalyze struct { MaxTokenCount Stringifiedinteger `json:"max_token_count,omitempty"` } @@ -66,3 +66,13 @@ func NewSettingsAnalyze() *SettingsAnalyze { return r } + +// true + +type SettingsAnalyzeVariant interface { + SettingsAnalyzeCaster() *SettingsAnalyze +} + +func (s *SettingsAnalyze) SettingsAnalyzeCaster() *SettingsAnalyze { + return s +} diff --git a/typedapi/types/settingshighlight.go b/typedapi/types/settingshighlight.go index 8f340a253e..2819ea30f9 100644 --- a/typedapi/types/settingshighlight.go +++ b/typedapi/types/settingshighlight.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SettingsHighlight type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L230-L233 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L238-L241 type SettingsHighlight struct { MaxAnalyzedOffset *int `json:"max_analyzed_offset,omitempty"` } @@ -78,3 +78,13 @@ func NewSettingsHighlight() *SettingsHighlight { return r } + +// true + +type SettingsHighlightVariant interface { + SettingsHighlightCaster() *SettingsHighlight +} + +func (s *SettingsHighlight) SettingsHighlightCaster() *SettingsHighlight { + return s +} diff --git a/typedapi/types/settingsquerystring.go b/typedapi/types/settingsquerystring.go index 31a87bc7d2..5d0d4a8127 100644 --- a/typedapi/types/settingsquerystring.go +++ b/typedapi/types/settingsquerystring.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // SettingsQueryString type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L250-L252 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L258-L260 type SettingsQueryString struct { Lenient Stringifiedboolean `json:"lenient"` } @@ -66,3 +66,13 @@ func NewSettingsQueryString() *SettingsQueryString { return r } + +// true + +type SettingsQueryStringVariant interface { + SettingsQueryStringCaster() *SettingsQueryString +} + +func (s *SettingsQueryString) SettingsQueryStringCaster() *SettingsQueryString { + return s +} diff --git a/typedapi/types/settingssearch.go b/typedapi/types/settingssearch.go index 2f0f7a5296..01968f451c 100644 --- a/typedapi/types/settingssearch.go +++ b/typedapi/types/settingssearch.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // SettingsSearch type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L240-L243 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L248-L251 type SettingsSearch struct { Idle *SearchIdle `json:"idle,omitempty"` Slowlog *SlowlogSettings `json:"slowlog,omitempty"` @@ -34,3 +34,13 @@ func NewSettingsSearch() *SettingsSearch { return r } + +// true + +type SettingsSearchVariant interface { + SettingsSearchCaster() *SettingsSearch +} + +func (s *SettingsSearch) SettingsSearchCaster() *SettingsSearch { + return s +} diff --git a/typedapi/types/settingssimilarity.go b/typedapi/types/settingssimilarity.go index 8d6c17a8f7..1ee84645ea 100644 --- a/typedapi/types/settingssimilarity.go +++ b/typedapi/types/settingssimilarity.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,5 +31,9 @@ package types // SettingsSimilarityLmj // SettingsSimilarityScripted // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L171-L182 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L178-L190 type SettingsSimilarity any + +type SettingsSimilarityVariant interface { + SettingsSimilarityCaster() *SettingsSimilarity +} diff --git a/typedapi/types/settingssimilaritybm25.go b/typedapi/types/settingssimilaritybm25.go index 4a7a1247b8..9fde26d504 100644 --- a/typedapi/types/settingssimilaritybm25.go +++ b/typedapi/types/settingssimilaritybm25.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SettingsSimilarityBm25 type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L188-L193 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L196-L201 type SettingsSimilarityBm25 struct { B *Float64 `json:"b,omitempty"` DiscountOverlaps *bool `json:"discount_overlaps,omitempty"` @@ -131,3 +131,13 @@ func NewSettingsSimilarityBm25() *SettingsSimilarityBm25 { return r } + +// true + +type SettingsSimilarityBm25Variant interface { + SettingsSimilarityBm25Caster() *SettingsSimilarityBm25 +} + +func (s *SettingsSimilarityBm25) SettingsSimilarityBm25Caster() *SettingsSimilarityBm25 { + return s +} diff --git a/typedapi/types/settingssimilarityboolean.go b/typedapi/types/settingssimilarityboolean.go index e6ce5e5433..2b88fb95e1 100644 --- a/typedapi/types/settingssimilarityboolean.go +++ b/typedapi/types/settingssimilarityboolean.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // SettingsSimilarityBoolean type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L184-L186 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L192-L194 type SettingsSimilarityBoolean struct { Type string `json:"type,omitempty"` } @@ -49,3 +49,13 @@ func NewSettingsSimilarityBoolean() *SettingsSimilarityBoolean { return r } + +// true + +type SettingsSimilarityBooleanVariant interface { + SettingsSimilarityBooleanCaster() *SettingsSimilarityBoolean +} + +func (s *SettingsSimilarityBoolean) SettingsSimilarityBooleanCaster() *SettingsSimilarityBoolean { + return s +} diff --git a/typedapi/types/settingssimilaritydfi.go b/typedapi/types/settingssimilaritydfi.go index 85f679dda8..da420b92b6 100644 --- a/typedapi/types/settingssimilaritydfi.go +++ b/typedapi/types/settingssimilaritydfi.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -28,7 +28,7 @@ import ( // SettingsSimilarityDfi type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L195-L198 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L203-L206 type SettingsSimilarityDfi struct { IndependenceMeasure dfiindependencemeasure.DFIIndependenceMeasure `json:"independence_measure"` Type string `json:"type,omitempty"` @@ -53,3 +53,13 @@ func NewSettingsSimilarityDfi() *SettingsSimilarityDfi { return r } + +// true + +type SettingsSimilarityDfiVariant interface { + SettingsSimilarityDfiCaster() *SettingsSimilarityDfi +} + +func (s *SettingsSimilarityDfi) SettingsSimilarityDfiCaster() *SettingsSimilarityDfi { + return s +} diff --git a/typedapi/types/settingssimilaritydfr.go b/typedapi/types/settingssimilaritydfr.go index bf4deda19e..f8687aa332 100644 --- a/typedapi/types/settingssimilaritydfr.go +++ b/typedapi/types/settingssimilaritydfr.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // SettingsSimilarityDfr type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L200-L205 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L208-L213 type SettingsSimilarityDfr struct { AfterEffect dfraftereffect.DFRAfterEffect `json:"after_effect"` BasicModel dfrbasicmodel.DFRBasicModel `json:"basic_model"` @@ -59,3 +59,13 @@ func NewSettingsSimilarityDfr() *SettingsSimilarityDfr { return r } + +// true + +type SettingsSimilarityDfrVariant interface { + SettingsSimilarityDfrCaster() *SettingsSimilarityDfr +} + +func (s *SettingsSimilarityDfr) SettingsSimilarityDfrCaster() *SettingsSimilarityDfr { + return s +} diff --git a/typedapi/types/settingssimilarityib.go b/typedapi/types/settingssimilarityib.go index 13f47d5a03..cf090c5959 100644 --- a/typedapi/types/settingssimilarityib.go +++ b/typedapi/types/settingssimilarityib.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // SettingsSimilarityIb type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L207-L212 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L215-L220 type SettingsSimilarityIb struct { Distribution ibdistribution.IBDistribution `json:"distribution"` Lambda iblambda.IBLambda `json:"lambda"` @@ -59,3 +59,13 @@ func NewSettingsSimilarityIb() *SettingsSimilarityIb { return r } + +// true + +type SettingsSimilarityIbVariant interface { + SettingsSimilarityIbCaster() *SettingsSimilarityIb +} + +func (s *SettingsSimilarityIb) SettingsSimilarityIbCaster() *SettingsSimilarityIb { + return s +} diff --git a/typedapi/types/settingssimilaritylmd.go b/typedapi/types/settingssimilaritylmd.go index 5ae9e06afb..59614606fe 100644 --- a/typedapi/types/settingssimilaritylmd.go +++ b/typedapi/types/settingssimilaritylmd.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SettingsSimilarityLmd type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L214-L217 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L222-L225 type SettingsSimilarityLmd struct { Mu *Float64 `json:"mu,omitempty"` Type string `json:"type,omitempty"` @@ -97,3 +97,13 @@ func NewSettingsSimilarityLmd() *SettingsSimilarityLmd { return r } + +// true + +type SettingsSimilarityLmdVariant interface { + SettingsSimilarityLmdCaster() *SettingsSimilarityLmd +} + +func (s *SettingsSimilarityLmd) SettingsSimilarityLmdCaster() *SettingsSimilarityLmd { + return s +} diff --git a/typedapi/types/settingssimilaritylmj.go b/typedapi/types/settingssimilaritylmj.go index b5b932af0e..039ea1a6b8 100644 --- a/typedapi/types/settingssimilaritylmj.go +++ b/typedapi/types/settingssimilaritylmj.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SettingsSimilarityLmj type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L219-L222 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L227-L230 type SettingsSimilarityLmj struct { Lambda *Float64 `json:"lambda,omitempty"` Type string `json:"type,omitempty"` @@ -97,3 +97,13 @@ func NewSettingsSimilarityLmj() *SettingsSimilarityLmj { return r } + +// true + +type SettingsSimilarityLmjVariant interface { + SettingsSimilarityLmjCaster() *SettingsSimilarityLmj +} + +func (s *SettingsSimilarityLmj) SettingsSimilarityLmjCaster() *SettingsSimilarityLmj { + return s +} diff --git a/typedapi/types/settingssimilarityscripted.go b/typedapi/types/settingssimilarityscripted.go index 16821870ff..78685c7718 100644 --- a/typedapi/types/settingssimilarityscripted.go +++ b/typedapi/types/settingssimilarityscripted.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // SettingsSimilarityScripted type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L224-L228 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L232-L236 type SettingsSimilarityScripted struct { Script Script `json:"script"` Type string `json:"type,omitempty"` @@ -53,3 +53,13 @@ func NewSettingsSimilarityScripted() *SettingsSimilarityScripted { return r } + +// true + +type SettingsSimilarityScriptedVariant interface { + SettingsSimilarityScriptedCaster() *SettingsSimilarityScripted +} + +func (s *SettingsSimilarityScripted) SettingsSimilarityScriptedCaster() *SettingsSimilarityScripted { + return s +} diff --git a/typedapi/types/shapefieldquery.go b/typedapi/types/shapefieldquery.go index 159da19588..d29d198c77 100644 --- a/typedapi/types/shapefieldquery.go +++ b/typedapi/types/shapefieldquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // ShapeFieldQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L383-L396 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L383-L396 type ShapeFieldQuery struct { // IndexedShape Queries using a pre-indexed shape. IndexedShape *FieldLookup `json:"indexed_shape,omitempty"` @@ -84,3 +84,13 @@ func NewShapeFieldQuery() *ShapeFieldQuery { return r } + +// true + +type ShapeFieldQueryVariant interface { + ShapeFieldQueryCaster() *ShapeFieldQuery +} + +func (s *ShapeFieldQuery) ShapeFieldQueryCaster() *ShapeFieldQuery { + return s +} diff --git a/typedapi/types/shapeproperty.go b/typedapi/types/shapeproperty.go index cc192c6155..cb36f00ca4 100644 --- a/typedapi/types/shapeproperty.go +++ b/typedapi/types/shapeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,11 +30,12 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoorientation" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // ShapeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/geo.ts#L73-L85 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/geo.ts#L73-L85 type ShapeProperty struct { Coerce *bool `json:"coerce,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -45,11 +46,12 @@ type ShapeProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` IgnoreZValue *bool `json:"ignore_z_value,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Orientation *geoorientation.GeoOrientation `json:"orientation,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Orientation *geoorientation.GeoOrientation `json:"orientation,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *ShapeProperty) UnmarshalJSON(data []byte) error { @@ -135,301 +137,313 @@ func (s *ShapeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -511,301 +525,313 @@ func (s *ShapeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -825,6 +851,11 @@ func (s *ShapeProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -839,19 +870,20 @@ func (s *ShapeProperty) UnmarshalJSON(data []byte) error { func (s ShapeProperty) MarshalJSON() ([]byte, error) { type innerShapeProperty ShapeProperty tmp := innerShapeProperty{ - Coerce: s.Coerce, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - IgnoreMalformed: s.IgnoreMalformed, - IgnoreZValue: s.IgnoreZValue, - Meta: s.Meta, - Orientation: s.Orientation, - Properties: s.Properties, - Store: s.Store, - Type: s.Type, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + IgnoreZValue: s.IgnoreZValue, + Meta: s.Meta, + Orientation: s.Orientation, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "shape" @@ -862,10 +894,20 @@ func (s ShapeProperty) MarshalJSON() ([]byte, error) { // NewShapeProperty returns a ShapeProperty. func NewShapeProperty() *ShapeProperty { r := &ShapeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type ShapePropertyVariant interface { + ShapePropertyCaster() *ShapeProperty +} + +func (s *ShapeProperty) ShapePropertyCaster() *ShapeProperty { + return s +} diff --git a/typedapi/types/shapequery.go b/typedapi/types/shapequery.go index 124444287a..d6b0716e8e 100644 --- a/typedapi/types/shapequery.go +++ b/typedapi/types/shapequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShapeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L367-L381 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L367-L381 type ShapeQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -153,8 +153,18 @@ func (s ShapeQuery) MarshalJSON() ([]byte, error) { // NewShapeQuery returns a ShapeQuery. func NewShapeQuery() *ShapeQuery { r := &ShapeQuery{ - ShapeQuery: make(map[string]ShapeFieldQuery, 0), + ShapeQuery: make(map[string]ShapeFieldQuery), } return r } + +// true + +type ShapeQueryVariant interface { + ShapeQueryCaster() *ShapeQuery +} + +func (s *ShapeQuery) ShapeQueryCaster() *ShapeQuery { + return s +} diff --git a/typedapi/types/shardcommit.go b/typedapi/types/shardcommit.go index 0e959e87ca..ee014af0f5 100644 --- a/typedapi/types/shardcommit.go +++ b/typedapi/types/shardcommit.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardCommit type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/stats/types.ts#L112-L117 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/stats/types.ts#L112-L117 type ShardCommit struct { Generation int `json:"generation"` Id string `json:"id"` @@ -106,8 +106,10 @@ func (s *ShardCommit) UnmarshalJSON(data []byte) error { // NewShardCommit returns a ShardCommit. func NewShardCommit() *ShardCommit { r := &ShardCommit{ - UserData: make(map[string]string, 0), + UserData: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/shardfailure.go b/typedapi/types/shardfailure.go index f9b386db4c..7e8a398fc6 100644 --- a/typedapi/types/shardfailure.go +++ b/typedapi/types/shardfailure.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardFailure type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Errors.ts#L52-L58 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Errors.ts#L52-L58 type ShardFailure struct { Index *string `json:"index,omitempty"` Node *string `json:"node,omitempty"` @@ -116,3 +116,5 @@ func NewShardFailure() *ShardFailure { return r } + +// false diff --git a/typedapi/types/shardfilesizeinfo.go b/typedapi/types/shardfilesizeinfo.go index a20276e9db..4236fcfd8e 100644 --- a/typedapi/types/shardfilesizeinfo.go +++ b/typedapi/types/shardfilesizeinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardFileSizeInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/stats/types.ts#L124-L131 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/stats/types.ts#L124-L131 type ShardFileSizeInfo struct { AverageSizeInBytes *int64 `json:"average_size_in_bytes,omitempty"` Count *int64 `json:"count,omitempty"` @@ -154,3 +154,5 @@ func NewShardFileSizeInfo() *ShardFileSizeInfo { return r } + +// false diff --git a/typedapi/types/shardhealthstats.go b/typedapi/types/shardhealthstats.go index 433dffdfc9..863f33cd84 100644 --- a/typedapi/types/shardhealthstats.go +++ b/typedapi/types/shardhealthstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // ShardHealthStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/health/types.ts#L37-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/health/types.ts#L37-L45 type ShardHealthStats struct { ActiveShards int `json:"active_shards"` InitializingShards int `json:"initializing_shards"` @@ -169,3 +169,5 @@ func NewShardHealthStats() *ShardHealthStats { return r } + +// false diff --git a/typedapi/types/shardlease.go b/typedapi/types/shardlease.go index 587374e1ec..faf3850478 100644 --- a/typedapi/types/shardlease.go +++ b/typedapi/types/shardlease.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardLease type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/stats/types.ts#L133-L138 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/stats/types.ts#L133-L138 type ShardLease struct { Id string `json:"id"` RetainingSeqNo int64 `json:"retaining_seq_no"` @@ -102,3 +102,5 @@ func NewShardLease() *ShardLease { return r } + +// false diff --git a/typedapi/types/shardmigrationstatus.go b/typedapi/types/shardmigrationstatus.go index ef394a71ed..4d1f1184d8 100644 --- a/typedapi/types/shardmigrationstatus.go +++ b/typedapi/types/shardmigrationstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // ShardMigrationStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L52-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L52-L54 type ShardMigrationStatus struct { Status shutdownstatus.ShutdownStatus `json:"status"` } @@ -37,3 +37,5 @@ func NewShardMigrationStatus() *ShardMigrationStatus { return r } + +// false diff --git a/typedapi/types/shardpath.go b/typedapi/types/shardpath.go index 092082b895..8527b209c2 100644 --- a/typedapi/types/shardpath.go +++ b/typedapi/types/shardpath.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardPath type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/stats/types.ts#L140-L144 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/stats/types.ts#L140-L144 type ShardPath struct { DataPath string `json:"data_path"` IsCustomDataPath bool `json:"is_custom_data_path"` @@ -102,3 +102,5 @@ func NewShardPath() *ShardPath { return r } + +// false diff --git a/typedapi/types/shardprofile.go b/typedapi/types/shardprofile.go index ccc213cd0c..335695836e 100644 --- a/typedapi/types/shardprofile.go +++ b/typedapi/types/shardprofile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/profile.ts#L142-L152 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/profile.ts#L142-L152 type ShardProfile struct { Aggregations []AggregationProfile `json:"aggregations"` Cluster string `json:"cluster"` @@ -139,3 +139,5 @@ func NewShardProfile() *ShardProfile { return r } + +// false diff --git a/typedapi/types/shardquerycache.go b/typedapi/types/shardquerycache.go index 2f19ecb4ab..3b90ae4fd8 100644 --- a/typedapi/types/shardquerycache.go +++ b/typedapi/types/shardquerycache.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardQueryCache type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/stats/types.ts#L146-L154 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/stats/types.ts#L146-L154 type ShardQueryCache struct { CacheCount int64 `json:"cache_count"` CacheSize int64 `json:"cache_size"` @@ -173,3 +173,5 @@ func NewShardQueryCache() *ShardQueryCache { return r } + +// false diff --git a/typedapi/types/shardrecovery.go b/typedapi/types/shardrecovery.go index 2269a47f34..1ce5ff3849 100644 --- a/typedapi/types/shardrecovery.go +++ b/typedapi/types/shardrecovery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardRecovery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/recovery/types.ts#L118-L135 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/recovery/types.ts#L118-L135 type ShardRecovery struct { Id int64 `json:"id"` Index RecoveryIndexStatus `json:"index"` @@ -190,3 +190,5 @@ func NewShardRecovery() *ShardRecovery { return r } + +// false diff --git a/typedapi/types/shardretentionleases.go b/typedapi/types/shardretentionleases.go index fc1225068c..c6ae83fb24 100644 --- a/typedapi/types/shardretentionleases.go +++ b/typedapi/types/shardretentionleases.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardRetentionLeases type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/stats/types.ts#L156-L160 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/stats/types.ts#L156-L160 type ShardRetentionLeases struct { Leases []ShardLease `json:"leases"` PrimaryTerm int64 `json:"primary_term"` @@ -89,3 +89,5 @@ func NewShardRetentionLeases() *ShardRetentionLeases { return r } + +// false diff --git a/typedapi/types/shardrouting.go b/typedapi/types/shardrouting.go index d717b229f7..17158f78e1 100644 --- a/typedapi/types/shardrouting.go +++ b/typedapi/types/shardrouting.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // ShardRouting type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/stats/types.ts#L162-L167 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/stats/types.ts#L162-L167 type ShardRouting struct { Node string `json:"node"` Primary bool `json:"primary"` @@ -110,3 +110,5 @@ func NewShardRouting() *ShardRouting { return r } + +// false diff --git a/typedapi/types/shardsavailabilityindicator.go b/typedapi/types/shardsavailabilityindicator.go index 4c41ebefd3..a0c7c49029 100644 --- a/typedapi/types/shardsavailabilityindicator.go +++ b/typedapi/types/shardsavailabilityindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // ShardsAvailabilityIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L105-L109 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L106-L110 type ShardsAvailabilityIndicator struct { Details *ShardsAvailabilityIndicatorDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` @@ -100,3 +100,5 @@ func NewShardsAvailabilityIndicator() *ShardsAvailabilityIndicator { return r } + +// false diff --git a/typedapi/types/shardsavailabilityindicatordetails.go b/typedapi/types/shardsavailabilityindicatordetails.go index 4be7b6b5e4..d92cbf43aa 100644 --- a/typedapi/types/shardsavailabilityindicatordetails.go +++ b/typedapi/types/shardsavailabilityindicatordetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardsAvailabilityIndicatorDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L110-L121 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L111-L122 type ShardsAvailabilityIndicatorDetails struct { CreatingPrimaries int64 `json:"creating_primaries"` CreatingReplicas int64 `json:"creating_replicas"` @@ -221,3 +221,5 @@ func NewShardsAvailabilityIndicatorDetails() *ShardsAvailabilityIndicatorDetails return r } + +// false diff --git a/typedapi/types/shardscapacityindicator.go b/typedapi/types/shardscapacityindicator.go index 5dc8af49ac..0664d20c29 100644 --- a/typedapi/types/shardscapacityindicator.go +++ b/typedapi/types/shardscapacityindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // ShardsCapacityIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L190-L194 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L191-L195 type ShardsCapacityIndicator struct { Details *ShardsCapacityIndicatorDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` @@ -100,3 +100,5 @@ func NewShardsCapacityIndicator() *ShardsCapacityIndicator { return r } + +// false diff --git a/typedapi/types/shardscapacityindicatordetails.go b/typedapi/types/shardscapacityindicatordetails.go index 319728c4cf..36c0c1fc04 100644 --- a/typedapi/types/shardscapacityindicatordetails.go +++ b/typedapi/types/shardscapacityindicatordetails.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ShardsCapacityIndicatorDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L196-L199 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L197-L200 type ShardsCapacityIndicatorDetails struct { Data ShardsCapacityIndicatorTierDetail `json:"data"` Frozen ShardsCapacityIndicatorTierDetail `json:"frozen"` @@ -34,3 +34,5 @@ func NewShardsCapacityIndicatorDetails() *ShardsCapacityIndicatorDetails { return r } + +// false diff --git a/typedapi/types/shardscapacityindicatortierdetail.go b/typedapi/types/shardscapacityindicatortierdetail.go index fb574fb9ae..982da83c4d 100644 --- a/typedapi/types/shardscapacityindicatortierdetail.go +++ b/typedapi/types/shardscapacityindicatortierdetail.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardsCapacityIndicatorTierDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L201-L204 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L202-L205 type ShardsCapacityIndicatorTierDetail struct { CurrentUsedShards *int `json:"current_used_shards,omitempty"` MaxShardsInCluster int `json:"max_shards_in_cluster"` @@ -95,3 +95,5 @@ func NewShardsCapacityIndicatorTierDetail() *ShardsCapacityIndicatorTierDetail { return r } + +// false diff --git a/typedapi/types/shardsegmentrouting.go b/typedapi/types/shardsegmentrouting.go index a5ef848508..045750957b 100644 --- a/typedapi/types/shardsegmentrouting.go +++ b/typedapi/types/shardsegmentrouting.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardSegmentRouting type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/segments/types.ts#L40-L44 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/segments/types.ts#L40-L44 type ShardSegmentRouting struct { Node string `json:"node"` Primary bool `json:"primary"` @@ -102,3 +102,5 @@ func NewShardSegmentRouting() *ShardSegmentRouting { return r } + +// false diff --git a/typedapi/types/shardsequencenumber.go b/typedapi/types/shardsequencenumber.go index 59c7ab71b9..c087090942 100644 --- a/typedapi/types/shardsequencenumber.go +++ b/typedapi/types/shardsequencenumber.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardSequenceNumber type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/stats/types.ts#L176-L180 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/stats/types.ts#L176-L180 type ShardSequenceNumber struct { GlobalCheckpoint int64 `json:"global_checkpoint"` LocalCheckpoint int64 `json:"local_checkpoint"` @@ -99,3 +99,5 @@ func NewShardSequenceNumber() *ShardSequenceNumber { return r } + +// false diff --git a/typedapi/types/shardsrecord.go b/typedapi/types/shardsrecord.go index f1fe4d5b92..1aae52c212 100644 --- a/typedapi/types/shardsrecord.go +++ b/typedapi/types/shardsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/shards/types.ts#L20-L427 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/shards/types.ts#L20-L427 type ShardsRecord struct { // BulkAvgSizeInBytes The average size in bytes of shard bulk operations. BulkAvgSizeInBytes *string `json:"bulk.avg_size_in_bytes,omitempty"` @@ -1179,3 +1179,5 @@ func NewShardsRecord() *ShardsRecord { return r } + +// false diff --git a/typedapi/types/shardssegment.go b/typedapi/types/shardssegment.go index 88d3bba926..4cf13739fe 100644 --- a/typedapi/types/shardssegment.go +++ b/typedapi/types/shardssegment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardsSegment type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/segments/types.ts#L46-L51 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/segments/types.ts#L46-L51 type ShardsSegment struct { NumCommittedSegments int `json:"num_committed_segments"` NumSearchSegments int `json:"num_search_segments"` @@ -107,8 +107,10 @@ func (s *ShardsSegment) UnmarshalJSON(data []byte) error { // NewShardsSegment returns a ShardsSegment. func NewShardsSegment() *ShardsSegment { r := &ShardsSegment{ - Segments: make(map[string]Segment, 0), + Segments: make(map[string]Segment), } return r } + +// false diff --git a/typedapi/types/shardsstatssummary.go b/typedapi/types/shardsstatssummary.go index f0031826a4..251fe970c5 100644 --- a/typedapi/types/shardsstatssummary.go +++ b/typedapi/types/shardsstatssummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ShardsStatsSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotShardsStatus.ts#L29-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotShardsStatus.ts#L29-L35 type ShardsStatsSummary struct { Incremental ShardsStatsSummaryItem `json:"incremental"` StartTimeInMillis int64 `json:"start_time_in_millis"` @@ -90,3 +90,5 @@ func NewShardsStatsSummary() *ShardsStatsSummary { return r } + +// false diff --git a/typedapi/types/shardsstatssummaryitem.go b/typedapi/types/shardsstatssummaryitem.go index f070fbe797..295a667d3a 100644 --- a/typedapi/types/shardsstatssummaryitem.go +++ b/typedapi/types/shardsstatssummaryitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardsStatsSummaryItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotShardsStatus.ts#L37-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotShardsStatus.ts#L37-L40 type ShardsStatsSummaryItem struct { FileCount int64 `json:"file_count"` SizeInBytes int64 `json:"size_in_bytes"` @@ -93,3 +93,5 @@ func NewShardsStatsSummaryItem() *ShardsStatsSummaryItem { return r } + +// false diff --git a/typedapi/types/shardstatistics.go b/typedapi/types/shardstatistics.go index 92fccb6a48..7acc4a1aa3 100644 --- a/typedapi/types/shardstatistics.go +++ b/typedapi/types/shardstatistics.go @@ -16,20 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ShardStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L54-L66 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L54-L69 type ShardStatistics struct { + // Failed The number of shards the operation or search attempted to run on but failed. Failed uint `json:"failed"` Failures []ShardFailure `json:"failures,omitempty"` Skipped *uint `json:"skipped,omitempty"` - // Successful Indicates how many shards have successfully run the search. + // Successful The number of shards the operation or search succeeded on. Successful uint `json:"successful"` - // Total Indicates how many shards the search will run on overall. + // Total The number of shards the operation or search will run on overall. Total uint `json:"total"` } @@ -39,3 +40,5 @@ func NewShardStatistics() *ShardStatistics { return r } + +// false diff --git a/typedapi/types/shardstore.go b/typedapi/types/shardstore.go index 330acba410..5e1ef9e00b 100644 --- a/typedapi/types/shardstore.go +++ b/typedapi/types/shardstore.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // ShardStore type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/shard_stores/types.ts#L29-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/shard_stores/types.ts#L29-L36 type ShardStore struct { Allocation shardstoreallocation.ShardStoreAllocation `json:"allocation"` AllocationId *string `json:"allocation_id,omitempty"` @@ -120,8 +120,10 @@ func (s ShardStore) MarshalJSON() ([]byte, error) { // NewShardStore returns a ShardStore. func NewShardStore() *ShardStore { r := &ShardStore{ - ShardStore: make(map[string]ShardStoreNode, 0), + ShardStore: make(map[string]ShardStoreNode), } return r } + +// false diff --git a/typedapi/types/shardstoreexception.go b/typedapi/types/shardstoreexception.go index c5655f93e8..0b8eeef2cc 100644 --- a/typedapi/types/shardstoreexception.go +++ b/typedapi/types/shardstoreexception.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardStoreException type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/shard_stores/types.ts#L53-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/shard_stores/types.ts#L53-L56 type ShardStoreException struct { Reason string `json:"reason"` Type string `json:"type"` @@ -87,3 +87,5 @@ func NewShardStoreException() *ShardStoreException { return r } + +// false diff --git a/typedapi/types/shardstoreindex.go b/typedapi/types/shardstoreindex.go index 944b17df8f..88e16ae5ea 100644 --- a/typedapi/types/shardstoreindex.go +++ b/typedapi/types/shardstoreindex.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ShardStoreIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search_shards/SearchShardsResponse.ts#L62-L65 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search_shards/SearchShardsResponse.ts#L62-L65 type ShardStoreIndex struct { Aliases []string `json:"aliases,omitempty"` Filter *Query `json:"filter,omitempty"` @@ -34,3 +34,5 @@ func NewShardStoreIndex() *ShardStoreIndex { return r } + +// false diff --git a/typedapi/types/shardstorenode.go b/typedapi/types/shardstorenode.go index b66a5724a9..faadeeec7d 100644 --- a/typedapi/types/shardstorenode.go +++ b/typedapi/types/shardstorenode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardStoreNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/shard_stores/types.ts#L38-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/shard_stores/types.ts#L38-L45 type ShardStoreNode struct { Attributes map[string]string `json:"attributes"` EphemeralId *string `json:"ephemeral_id,omitempty"` @@ -111,8 +111,10 @@ func (s *ShardStoreNode) UnmarshalJSON(data []byte) error { // NewShardStoreNode returns a ShardStoreNode. func NewShardStoreNode() *ShardStoreNode { r := &ShardStoreNode{ - Attributes: make(map[string]string, 0), + Attributes: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/shardstorewrapper.go b/typedapi/types/shardstorewrapper.go index bc09c9f703..a9409adee7 100644 --- a/typedapi/types/shardstorewrapper.go +++ b/typedapi/types/shardstorewrapper.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // ShardStoreWrapper type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/shard_stores/types.ts#L58-L60 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/shard_stores/types.ts#L58-L60 type ShardStoreWrapper struct { Stores []ShardStore `json:"stores"` } @@ -33,3 +33,5 @@ func NewShardStoreWrapper() *ShardStoreWrapper { return r } + +// false diff --git a/typedapi/types/shardstotalstats.go b/typedapi/types/shardstotalstats.go index e5857ee34e..5af65e0f2d 100644 --- a/typedapi/types/shardstotalstats.go +++ b/typedapi/types/shardstotalstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShardsTotalStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/stats/types.ts#L182-L184 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/stats/types.ts#L182-L184 type ShardsTotalStats struct { TotalCount int64 `json:"total_count"` } @@ -77,3 +77,5 @@ func NewShardsTotalStats() *ShardsTotalStats { return r } + +// false diff --git a/typedapi/types/shared.go b/typedapi/types/shared.go index 232137d49d..a532b30da3 100644 --- a/typedapi/types/shared.go +++ b/typedapi/types/shared.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Shared type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/searchable_snapshots/cache_stats/Response.ts#L34-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/searchable_snapshots/cache_stats/Response.ts#L34-L43 type Shared struct { BytesReadInBytes ByteSize `json:"bytes_read_in_bytes"` BytesWrittenInBytes ByteSize `json:"bytes_written_in_bytes"` @@ -150,3 +150,5 @@ func NewShared() *Shared { return r } + +// false diff --git a/typedapi/types/sharedfilesystemrepository.go b/typedapi/types/sharedfilesystemrepository.go index 3064de0051..6158c52cec 100644 --- a/typedapi/types/sharedfilesystemrepository.go +++ b/typedapi/types/sharedfilesystemrepository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,11 +30,13 @@ import ( // SharedFileSystemRepository type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotRepository.ts#L55-L58 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotRepository.ts#L80-L90 type SharedFileSystemRepository struct { + // Settings The repository settings. Settings SharedFileSystemRepositorySettings `json:"settings"` - Type string `json:"type,omitempty"` - Uuid *string `json:"uuid,omitempty"` + // Type The shared file system repository type. + Type string `json:"type,omitempty"` + Uuid *string `json:"uuid,omitempty"` } func (s *SharedFileSystemRepository) UnmarshalJSON(data []byte) error { @@ -92,3 +94,13 @@ func NewSharedFileSystemRepository() *SharedFileSystemRepository { return r } + +// true + +type SharedFileSystemRepositoryVariant interface { + SharedFileSystemRepositoryCaster() *SharedFileSystemRepository +} + +func (s *SharedFileSystemRepository) SharedFileSystemRepositoryCaster() *SharedFileSystemRepository { + return s +} diff --git a/typedapi/types/sharedfilesystemrepositorysettings.go b/typedapi/types/sharedfilesystemrepositorysettings.go index 528560473f..efa8a89f3f 100644 --- a/typedapi/types/sharedfilesystemrepositorysettings.go +++ b/typedapi/types/sharedfilesystemrepositorysettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,15 +31,57 @@ import ( // SharedFileSystemRepositorySettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotRepository.ts#L104-L108 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotRepository.ts#L348-L375 type SharedFileSystemRepositorySettings struct { - ChunkSize ByteSize `json:"chunk_size,omitempty"` - Compress *bool `json:"compress,omitempty"` - Location string `json:"location"` - MaxNumberOfSnapshots *int `json:"max_number_of_snapshots,omitempty"` - MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // ChunkSize Big files can be broken down into multiple smaller blobs in the blob store + // during snapshotting. + // It is not recommended to change this value from its default unless there is + // an explicit reason for limiting the size of blobs in the repository. + // Setting a value lower than the default can result in an increased number of + // API calls to the blob store during snapshot create and restore operations + // compared to using the default value and thus make both operations slower and + // more costly. + // Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. + // The default varies by repository type. + ChunkSize ByteSize `json:"chunk_size,omitempty"` + // Compress When set to `true`, metadata files are stored in compressed format. + // This setting doesn't affect index files that are already compressed by + // default. + Compress *bool `json:"compress,omitempty"` + // Location The location of the shared filesystem used to store and retrieve snapshots. + // This location must be registered in the `path.repo` setting on all master and + // data nodes in the cluster. + // Unlike `path.repo`, this setting supports only a single file path. + Location string `json:"location"` + // MaxNumberOfSnapshots The maximum number of snapshots the repository can contain. + // The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. + MaxNumberOfSnapshots *int `json:"max_number_of_snapshots,omitempty"` + // MaxRestoreBytesPerSec The maximum snapshot restore rate per node. + // It defaults to unlimited. + // Note that restores are also throttled through recovery settings. + MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // MaxSnapshotBytesPerSec The maximum snapshot creation rate per node. + // It defaults to 40mb per second. + // Note that if the recovery settings for managed services are set, then it + // defaults to unlimited, and the rate is additionally throttled through + // recovery settings. MaxSnapshotBytesPerSec ByteSize `json:"max_snapshot_bytes_per_sec,omitempty"` - Readonly *bool `json:"readonly,omitempty"` + // Readonly If `true`, the repository is read-only. + // The cluster can retrieve and restore snapshots from the repository but not + // write to the repository or create snapshots in it. + // + // Only a cluster with write access can create snapshots in the repository. + // All other clusters connected to the repository should have the `readonly` + // parameter set to `true`. + // + // If `false`, the cluster can write to the repository and create snapshots in + // it. + // + // IMPORTANT: If you register the same snapshot repository with multiple + // clusters, only one cluster should have write access to the repository. + // Having multiple clusters write to the repository at the same time risks + // corrupting the contents of the repository. + Readonly *bool `json:"readonly,omitempty"` } func (s *SharedFileSystemRepositorySettings) UnmarshalJSON(data []byte) error { @@ -139,3 +181,13 @@ func NewSharedFileSystemRepositorySettings() *SharedFileSystemRepositorySettings return r } + +// true + +type SharedFileSystemRepositorySettingsVariant interface { + SharedFileSystemRepositorySettingsCaster() *SharedFileSystemRepositorySettings +} + +func (s *SharedFileSystemRepositorySettings) SharedFileSystemRepositorySettingsCaster() *SharedFileSystemRepositorySettings { + return s +} diff --git a/typedapi/types/shingletokenfilter.go b/typedapi/types/shingletokenfilter.go index 953e28ec5a..340f1f5223 100644 --- a/typedapi/types/shingletokenfilter.go +++ b/typedapi/types/shingletokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShingleTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L86-L94 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L86-L94 type ShingleTokenFilter struct { FillerToken *string `json:"filler_token,omitempty"` MaxShingleSize string `json:"max_shingle_size,omitempty"` @@ -174,3 +174,13 @@ func NewShingleTokenFilter() *ShingleTokenFilter { return r } + +// true + +type ShingleTokenFilterVariant interface { + ShingleTokenFilterCaster() *ShingleTokenFilter +} + +func (s *ShingleTokenFilter) ShingleTokenFilterCaster() *ShingleTokenFilter { + return s +} diff --git a/typedapi/types/shortnumberproperty.go b/typedapi/types/shortnumberproperty.go index 16d29cf6fa..615516a5b3 100644 --- a/typedapi/types/shortnumberproperty.go +++ b/typedapi/types/shortnumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // ShortNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L167-L170 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L171-L174 type ShortNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,12 +48,13 @@ type ShortNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *int `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *int `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -161,301 +163,313 @@ func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -542,301 +556,313 @@ func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -861,6 +887,11 @@ func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -909,6 +940,7 @@ func (s ShortNumberProperty) MarshalJSON() ([]byte, error) { Properties: s.Properties, Script: s.Script, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -922,10 +954,20 @@ func (s ShortNumberProperty) MarshalJSON() ([]byte, error) { // NewShortNumberProperty returns a ShortNumberProperty. func NewShortNumberProperty() *ShortNumberProperty { r := &ShortNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type ShortNumberPropertyVariant interface { + ShortNumberPropertyCaster() *ShortNumberProperty +} + +func (s *ShortNumberProperty) ShortNumberPropertyCaster() *ShortNumberProperty { + return s +} diff --git a/typedapi/types/shrinkaction.go b/typedapi/types/shrinkaction.go index 943cf6edbf..517566d728 100644 --- a/typedapi/types/shrinkaction.go +++ b/typedapi/types/shrinkaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ShrinkAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/_types/Phase.ts#L120-L124 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/_types/Phase.ts#L117-L121 type ShrinkAction struct { AllowWriteAfterShrink *bool `json:"allow_write_after_shrink,omitempty"` MaxPrimaryShardSize ByteSize `json:"max_primary_shard_size,omitempty"` @@ -99,3 +99,13 @@ func NewShrinkAction() *ShrinkAction { return r } + +// true + +type ShrinkActionVariant interface { + ShrinkActionCaster() *ShrinkAction +} + +func (s *ShrinkAction) ShrinkActionCaster() *ShrinkAction { + return s +} diff --git a/typedapi/types/significantlongtermsaggregate.go b/typedapi/types/significantlongtermsaggregate.go index 7446a5fd30..4088d97e34 100644 --- a/typedapi/types/significantlongtermsaggregate.go +++ b/typedapi/types/significantlongtermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SignificantLongTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L668-L670 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L668-L670 type SignificantLongTermsAggregate struct { BgCount *int64 `json:"bg_count,omitempty"` Buckets BucketsSignificantLongTermsBucket `json:"buckets"` @@ -121,3 +121,5 @@ func NewSignificantLongTermsAggregate() *SignificantLongTermsAggregate { return r } + +// false diff --git a/typedapi/types/significantlongtermsbucket.go b/typedapi/types/significantlongtermsbucket.go index 274e723a1c..a10f7c2e74 100644 --- a/typedapi/types/significantlongtermsbucket.go +++ b/typedapi/types/significantlongtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // SignificantLongTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L677-L680 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L677-L680 type SignificantLongTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` BgCount int64 `json:"bg_count"` @@ -687,8 +687,10 @@ func (s SignificantLongTermsBucket) MarshalJSON() ([]byte, error) { // NewSignificantLongTermsBucket returns a SignificantLongTermsBucket. func NewSignificantLongTermsBucket() *SignificantLongTermsBucket { r := &SignificantLongTermsBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/significantstringtermsaggregate.go b/typedapi/types/significantstringtermsaggregate.go index 7e3d71c52d..d1b95cc1da 100644 --- a/typedapi/types/significantstringtermsaggregate.go +++ b/typedapi/types/significantstringtermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SignificantStringTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L682-L684 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L682-L684 type SignificantStringTermsAggregate struct { BgCount *int64 `json:"bg_count,omitempty"` Buckets BucketsSignificantStringTermsBucket `json:"buckets"` @@ -121,3 +121,5 @@ func NewSignificantStringTermsAggregate() *SignificantStringTermsAggregate { return r } + +// false diff --git a/typedapi/types/significantstringtermsbucket.go b/typedapi/types/significantstringtermsbucket.go index 90a84a0c2e..040a410b98 100644 --- a/typedapi/types/significantstringtermsbucket.go +++ b/typedapi/types/significantstringtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // SignificantStringTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L686-L688 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L686-L688 type SignificantStringTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` BgCount int64 `json:"bg_count"` @@ -671,8 +671,10 @@ func (s SignificantStringTermsBucket) MarshalJSON() ([]byte, error) { // NewSignificantStringTermsBucket returns a SignificantStringTermsBucket. func NewSignificantStringTermsBucket() *SignificantStringTermsBucket { r := &SignificantStringTermsBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/significanttermsaggregatebasesignificantlongtermsbucket.go b/typedapi/types/significanttermsaggregatebasesignificantlongtermsbucket.go deleted file mode 100644 index abb9719b3c..0000000000 --- a/typedapi/types/significanttermsaggregatebasesignificantlongtermsbucket.go +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// SignificantTermsAggregateBaseSignificantLongTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L658-L666 -type SignificantTermsAggregateBaseSignificantLongTermsBucket struct { - BgCount *int64 `json:"bg_count,omitempty"` - Buckets BucketsSignificantLongTermsBucket `json:"buckets"` - DocCount *int64 `json:"doc_count,omitempty"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *SignificantTermsAggregateBaseSignificantLongTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "bg_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "BgCount", err) - } - s.BgCount = &value - case float64: - f := int64(v) - s.BgCount = &f - } - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]SignificantLongTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []SignificantLongTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "doc_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DocCount", err) - } - s.DocCount = &value - case float64: - f := int64(v) - s.DocCount = &f - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewSignificantTermsAggregateBaseSignificantLongTermsBucket returns a SignificantTermsAggregateBaseSignificantLongTermsBucket. -func NewSignificantTermsAggregateBaseSignificantLongTermsBucket() *SignificantTermsAggregateBaseSignificantLongTermsBucket { - r := &SignificantTermsAggregateBaseSignificantLongTermsBucket{} - - return r -} diff --git a/typedapi/types/significanttermsaggregatebasesignificantstringtermsbucket.go b/typedapi/types/significanttermsaggregatebasesignificantstringtermsbucket.go deleted file mode 100644 index 5a839eed8d..0000000000 --- a/typedapi/types/significanttermsaggregatebasesignificantstringtermsbucket.go +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// SignificantTermsAggregateBaseSignificantStringTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L658-L666 -type SignificantTermsAggregateBaseSignificantStringTermsBucket struct { - BgCount *int64 `json:"bg_count,omitempty"` - Buckets BucketsSignificantStringTermsBucket `json:"buckets"` - DocCount *int64 `json:"doc_count,omitempty"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *SignificantTermsAggregateBaseSignificantStringTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "bg_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "BgCount", err) - } - s.BgCount = &value - case float64: - f := int64(v) - s.BgCount = &f - } - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]SignificantStringTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []SignificantStringTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "doc_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DocCount", err) - } - s.DocCount = &value - case float64: - f := int64(v) - s.DocCount = &f - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewSignificantTermsAggregateBaseSignificantStringTermsBucket returns a SignificantTermsAggregateBaseSignificantStringTermsBucket. -func NewSignificantTermsAggregateBaseSignificantStringTermsBucket() *SignificantTermsAggregateBaseSignificantStringTermsBucket { - r := &SignificantTermsAggregateBaseSignificantStringTermsBucket{} - - return r -} diff --git a/typedapi/types/significanttermsaggregatebasevoid.go b/typedapi/types/significanttermsaggregatebasevoid.go deleted file mode 100644 index 143b85634c..0000000000 --- a/typedapi/types/significanttermsaggregatebasevoid.go +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// SignificantTermsAggregateBaseVoid type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L658-L666 -type SignificantTermsAggregateBaseVoid struct { - BgCount *int64 `json:"bg_count,omitempty"` - Buckets BucketsVoid `json:"buckets"` - DocCount *int64 `json:"doc_count,omitempty"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *SignificantTermsAggregateBaseVoid) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "bg_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "BgCount", err) - } - s.BgCount = &value - case float64: - f := int64(v) - s.BgCount = &f - } - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]any, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []any{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "doc_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DocCount", err) - } - s.DocCount = &value - case float64: - f := int64(v) - s.DocCount = &f - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewSignificantTermsAggregateBaseVoid returns a SignificantTermsAggregateBaseVoid. -func NewSignificantTermsAggregateBaseVoid() *SignificantTermsAggregateBaseVoid { - r := &SignificantTermsAggregateBaseVoid{} - - return r -} diff --git a/typedapi/types/significanttermsaggregation.go b/typedapi/types/significanttermsaggregation.go index 5266dcbc01..f26935f8f2 100644 --- a/typedapi/types/significanttermsaggregation.go +++ b/typedapi/types/significanttermsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // SignificantTermsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L817-L884 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L817-L884 type SignificantTermsAggregation struct { // BackgroundFilter A background filter that can be used to focus in on significant terms within // a narrower context, instead of the entire index. @@ -263,3 +263,13 @@ func NewSignificantTermsAggregation() *SignificantTermsAggregation { return r } + +// true + +type SignificantTermsAggregationVariant interface { + SignificantTermsAggregationCaster() *SignificantTermsAggregation +} + +func (s *SignificantTermsAggregation) SignificantTermsAggregationCaster() *SignificantTermsAggregation { + return s +} diff --git a/typedapi/types/significanttextaggregation.go b/typedapi/types/significanttextaggregation.go index 4a55cf8c23..05eae48574 100644 --- a/typedapi/types/significanttextaggregation.go +++ b/typedapi/types/significanttextaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // SignificantTextAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L886-L961 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L886-L961 type SignificantTextAggregation struct { // BackgroundFilter A background filter that can be used to focus in on significant terms within // a narrower context, instead of the entire index. @@ -297,3 +297,13 @@ func NewSignificantTextAggregation() *SignificantTextAggregation { return r } + +// true + +type SignificantTextAggregationVariant interface { + SignificantTextAggregationCaster() *SignificantTextAggregation +} + +func (s *SignificantTextAggregation) SignificantTextAggregationCaster() *SignificantTextAggregation { + return s +} diff --git a/typedapi/types/simpleanalyzer.go b/typedapi/types/simpleanalyzer.go index 4c6083b25a..f465cc5ddd 100644 --- a/typedapi/types/simpleanalyzer.go +++ b/typedapi/types/simpleanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // SimpleAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L329-L332 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L367-L371 type SimpleAnalyzer struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewSimpleAnalyzer() *SimpleAnalyzer { return r } + +// true + +type SimpleAnalyzerVariant interface { + SimpleAnalyzerCaster() *SimpleAnalyzer +} + +func (s *SimpleAnalyzer) SimpleAnalyzerCaster() *SimpleAnalyzer { + return s +} diff --git a/typedapi/types/simplemovingaverageaggregation.go b/typedapi/types/simplemovingaverageaggregation.go index e65b1d8c45..01c76434a3 100644 --- a/typedapi/types/simplemovingaverageaggregation.go +++ b/typedapi/types/simplemovingaverageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // SimpleMovingAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L273-L276 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L273-L276 type SimpleMovingAverageAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -173,3 +173,13 @@ func NewSimpleMovingAverageAggregation() *SimpleMovingAverageAggregation { return r } + +// true + +type SimpleMovingAverageAggregationVariant interface { + SimpleMovingAverageAggregationCaster() *SimpleMovingAverageAggregation +} + +func (s *SimpleMovingAverageAggregation) SimpleMovingAverageAggregationCaster() *SimpleMovingAverageAggregation { + return s +} diff --git a/typedapi/types/simplepatternsplittokenizer.go b/typedapi/types/simplepatternsplittokenizer.go index 83f70d70df..7e246776fc 100644 --- a/typedapi/types/simplepatternsplittokenizer.go +++ b/typedapi/types/simplepatternsplittokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SimplePatternSplitTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L116-L119 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L116-L119 type SimplePatternSplitTokenizer struct { Pattern *string `json:"pattern,omitempty"` Type string `json:"type,omitempty"` @@ -100,3 +100,13 @@ func NewSimplePatternSplitTokenizer() *SimplePatternSplitTokenizer { return r } + +// true + +type SimplePatternSplitTokenizerVariant interface { + SimplePatternSplitTokenizerCaster() *SimplePatternSplitTokenizer +} + +func (s *SimplePatternSplitTokenizer) SimplePatternSplitTokenizerCaster() *SimplePatternSplitTokenizer { + return s +} diff --git a/typedapi/types/simplepatterntokenizer.go b/typedapi/types/simplepatterntokenizer.go index 1125abc003..5ab819e13e 100644 --- a/typedapi/types/simplepatterntokenizer.go +++ b/typedapi/types/simplepatterntokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SimplePatternTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L111-L114 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L111-L114 type SimplePatternTokenizer struct { Pattern *string `json:"pattern,omitempty"` Type string `json:"type,omitempty"` @@ -100,3 +100,13 @@ func NewSimplePatternTokenizer() *SimplePatternTokenizer { return r } + +// true + +type SimplePatternTokenizerVariant interface { + SimplePatternTokenizerCaster() *SimplePatternTokenizer +} + +func (s *SimplePatternTokenizer) SimplePatternTokenizerCaster() *SimplePatternTokenizer { + return s +} diff --git a/typedapi/types/simplequerystringflags.go b/typedapi/types/simplequerystringflags.go index 35ab4188a8..af3219e818 100644 --- a/typedapi/types/simplequerystringflags.go +++ b/typedapi/types/simplequerystringflags.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // SimpleQueryStringFlags type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L723-L727 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L723-L727 type SimpleQueryStringFlags PipeSeparatedFlagsSimpleQueryStringFlag + +type SimpleQueryStringFlagsVariant interface { + SimpleQueryStringFlagsCaster() *SimpleQueryStringFlags +} diff --git a/typedapi/types/simplequerystringquery.go b/typedapi/types/simplequerystringquery.go index cac10d6cb6..f2b6bf3f1e 100644 --- a/typedapi/types/simplequerystringquery.go +++ b/typedapi/types/simplequerystringquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // SimpleQueryStringQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/fulltext.ts#L786-L854 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/fulltext.ts#L786-L854 type SimpleQueryStringQuery struct { // AnalyzeWildcard If `true`, the query attempts to analyze wildcard terms in the query string. AnalyzeWildcard *bool `json:"analyze_wildcard,omitempty"` @@ -278,3 +278,13 @@ func NewSimpleQueryStringQuery() *SimpleQueryStringQuery { return r } + +// true + +type SimpleQueryStringQueryVariant interface { + SimpleQueryStringQueryCaster() *SimpleQueryStringQuery +} + +func (s *SimpleQueryStringQuery) SimpleQueryStringQueryCaster() *SimpleQueryStringQuery { + return s +} diff --git a/typedapi/types/simplevalueaggregate.go b/typedapi/types/simplevalueaggregate.go index 3938b2bb0e..a65707ba99 100644 --- a/typedapi/types/simplevalueaggregate.go +++ b/typedapi/types/simplevalueaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SimpleValueAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L238-L239 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L238-L239 type SimpleValueAggregate struct { Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to @@ -89,3 +89,5 @@ func NewSimpleValueAggregate() *SimpleValueAggregate { return r } + +// false diff --git a/typedapi/types/simulatedactions.go b/typedapi/types/simulatedactions.go index 0bd9b5fc82..b4d3dba336 100644 --- a/typedapi/types/simulatedactions.go +++ b/typedapi/types/simulatedactions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SimulatedActions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Action.ts#L90-L94 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Action.ts#L90-L94 type SimulatedActions struct { Actions []string `json:"actions"` All *SimulatedActions `json:"all,omitempty"` @@ -88,3 +88,13 @@ func NewSimulatedActions() *SimulatedActions { return r } + +// true + +type SimulatedActionsVariant interface { + SimulatedActionsCaster() *SimulatedActions +} + +func (s *SimulatedActions) SimulatedActionsCaster() *SimulatedActions { + return s +} diff --git a/typedapi/types/simulatedocumentresult.go b/typedapi/types/simulatedocumentresult.go index ffdd32ce2d..2910faf0b4 100644 --- a/typedapi/types/simulatedocumentresult.go +++ b/typedapi/types/simulatedocumentresult.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // SimulateDocumentResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/simulate/types.ts#L46-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Simulation.ts#L46-L50 type SimulateDocumentResult struct { Doc *DocumentSimulation `json:"doc,omitempty"` Error *ErrorCause `json:"error,omitempty"` @@ -35,3 +35,5 @@ func NewSimulateDocumentResult() *SimulateDocumentResult { return r } + +// false diff --git a/typedapi/types/simulateingestdocumentresult.go b/typedapi/types/simulateingestdocumentresult.go new file mode 100644 index 0000000000..57b1c36111 --- /dev/null +++ b/typedapi/types/simulateingestdocumentresult.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +// SimulateIngestDocumentResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/simulate/ingest/SimulateIngestResponse.ts#L31-L33 +type SimulateIngestDocumentResult struct { + Doc *IngestDocumentSimulation `json:"doc,omitempty"` +} + +// NewSimulateIngestDocumentResult returns a SimulateIngestDocumentResult. +func NewSimulateIngestDocumentResult() *SimulateIngestDocumentResult { + r := &SimulateIngestDocumentResult{} + + return r +} + +// false diff --git a/typedapi/types/sizefield.go b/typedapi/types/sizefield.go index 51e388da29..34bc08475d 100644 --- a/typedapi/types/sizefield.go +++ b/typedapi/types/sizefield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SizeField type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/meta-fields.ts#L54-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/meta-fields.ts#L54-L56 type SizeField struct { Enabled bool `json:"enabled"` } @@ -76,3 +76,13 @@ func NewSizeField() *SizeField { return r } + +// true + +type SizeFieldVariant interface { + SizeFieldCaster() *SizeField +} + +func (s *SizeField) SizeFieldCaster() *SizeField { + return s +} diff --git a/typedapi/types/sizehttphistogram.go b/typedapi/types/sizehttphistogram.go index a78b14913c..816d672b17 100644 --- a/typedapi/types/sizehttphistogram.go +++ b/typedapi/types/sizehttphistogram.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SizeHttpHistogram type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L714-L718 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L714-L718 type SizeHttpHistogram struct { Count int64 `json:"count"` GeBytes *int64 `json:"ge_bytes,omitempty"` @@ -109,3 +109,5 @@ func NewSizeHttpHistogram() *SizeHttpHistogram { return r } + +// false diff --git a/typedapi/types/slackaction.go b/typedapi/types/slackaction.go index 1ce2fe1cda..ec59812833 100644 --- a/typedapi/types/slackaction.go +++ b/typedapi/types/slackaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SlackAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L91-L94 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L91-L94 type SlackAction struct { Account *string `json:"account,omitempty"` Message SlackMessage `json:"message"` @@ -80,3 +80,13 @@ func NewSlackAction() *SlackAction { return r } + +// true + +type SlackActionVariant interface { + SlackActionCaster() *SlackAction +} + +func (s *SlackAction) SlackActionCaster() *SlackAction { + return s +} diff --git a/typedapi/types/slackattachment.go b/typedapi/types/slackattachment.go index 99ab1f4036..b58c749746 100644 --- a/typedapi/types/slackattachment.go +++ b/typedapi/types/slackattachment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SlackAttachment type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L101-L117 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L101-L117 type SlackAttachment struct { AuthorIcon *string `json:"author_icon,omitempty"` AuthorLink *string `json:"author_link,omitempty"` @@ -242,3 +242,13 @@ func NewSlackAttachment() *SlackAttachment { return r } + +// true + +type SlackAttachmentVariant interface { + SlackAttachmentCaster() *SlackAttachment +} + +func (s *SlackAttachment) SlackAttachmentCaster() *SlackAttachment { + return s +} diff --git a/typedapi/types/slackattachmentfield.go b/typedapi/types/slackattachmentfield.go index 0e639447b3..dc72353f1a 100644 --- a/typedapi/types/slackattachmentfield.go +++ b/typedapi/types/slackattachmentfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SlackAttachmentField type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L119-L123 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L119-L123 type SlackAttachmentField struct { Int bool `json:"short"` Title string `json:"title"` @@ -102,3 +102,13 @@ func NewSlackAttachmentField() *SlackAttachmentField { return r } + +// true + +type SlackAttachmentFieldVariant interface { + SlackAttachmentFieldCaster() *SlackAttachmentField +} + +func (s *SlackAttachmentField) SlackAttachmentFieldCaster() *SlackAttachmentField { + return s +} diff --git a/typedapi/types/slackdynamicattachment.go b/typedapi/types/slackdynamicattachment.go index 45b04938c5..a1ae2b20fd 100644 --- a/typedapi/types/slackdynamicattachment.go +++ b/typedapi/types/slackdynamicattachment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SlackDynamicAttachment type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L125-L128 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L125-L128 type SlackDynamicAttachment struct { AttachmentTemplate SlackAttachment `json:"attachment_template"` ListPath string `json:"list_path"` @@ -80,3 +80,13 @@ func NewSlackDynamicAttachment() *SlackDynamicAttachment { return r } + +// true + +type SlackDynamicAttachmentVariant interface { + SlackDynamicAttachmentCaster() *SlackDynamicAttachment +} + +func (s *SlackDynamicAttachment) SlackDynamicAttachmentCaster() *SlackDynamicAttachment { + return s +} diff --git a/typedapi/types/slackmessage.go b/typedapi/types/slackmessage.go index 409c4a9623..9657d667da 100644 --- a/typedapi/types/slackmessage.go +++ b/typedapi/types/slackmessage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SlackMessage type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L130-L137 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L130-L137 type SlackMessage struct { Attachments []SlackAttachment `json:"attachments"` DynamicAttachments *SlackDynamicAttachment `json:"dynamic_attachments,omitempty"` @@ -118,3 +118,13 @@ func NewSlackMessage() *SlackMessage { return r } + +// true + +type SlackMessageVariant interface { + SlackMessageCaster() *SlackMessage +} + +func (s *SlackMessage) SlackMessageCaster() *SlackMessage { + return s +} diff --git a/typedapi/types/slackresult.go b/typedapi/types/slackresult.go index 3935497a8b..e7579a0794 100644 --- a/typedapi/types/slackresult.go +++ b/typedapi/types/slackresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SlackResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L96-L99 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L96-L99 type SlackResult struct { Account *string `json:"account,omitempty"` Message SlackMessage `json:"message"` @@ -80,3 +80,5 @@ func NewSlackResult() *SlackResult { return r } + +// false diff --git a/typedapi/types/slicedscroll.go b/typedapi/types/slicedscroll.go index abc8b936f3..86afb731bf 100644 --- a/typedapi/types/slicedscroll.go +++ b/typedapi/types/slicedscroll.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SlicedScroll type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/SlicedScroll.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/SlicedScroll.ts#L23-L27 type SlicedScroll struct { Field *string `json:"field,omitempty"` Id string `json:"id"` @@ -90,3 +90,13 @@ func NewSlicedScroll() *SlicedScroll { return r } + +// true + +type SlicedScrollVariant interface { + SlicedScrollCaster() *SlicedScroll +} + +func (s *SlicedScroll) SlicedScrollCaster() *SlicedScroll { + return s +} diff --git a/typedapi/types/slices.go b/typedapi/types/slices.go index 5414b208fd..39bf68c34e 100644 --- a/typedapi/types/slices.go +++ b/typedapi/types/slices.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // int // slicescalculation.SlicesCalculation // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L365-L370 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L361-L366 type Slices any diff --git a/typedapi/types/slm.go b/typedapi/types/slm.go index 68bd39dd02..76bf0d3573 100644 --- a/typedapi/types/slm.go +++ b/typedapi/types/slm.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Slm type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L447-L450 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L455-L458 type Slm struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -114,3 +114,5 @@ func NewSlm() *Slm { return r } + +// false diff --git a/typedapi/types/slmindicator.go b/typedapi/types/slmindicator.go index ef5fe594a5..e5ed7b50c2 100644 --- a/typedapi/types/slmindicator.go +++ b/typedapi/types/slmindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // SlmIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L174-L178 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L175-L179 type SlmIndicator struct { Details *SlmIndicatorDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` @@ -100,3 +100,5 @@ func NewSlmIndicator() *SlmIndicator { return r } + +// false diff --git a/typedapi/types/slmindicatordetails.go b/typedapi/types/slmindicatordetails.go index 0ef5ebb884..f8b38e1ed3 100644 --- a/typedapi/types/slmindicatordetails.go +++ b/typedapi/types/slmindicatordetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // SlmIndicatorDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L179-L183 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L180-L184 type SlmIndicatorDetails struct { Policies int64 `json:"policies"` SlmStatus lifecycleoperationmode.LifecycleOperationMode `json:"slm_status"` @@ -91,3 +91,5 @@ func NewSlmIndicatorDetails() *SlmIndicatorDetails { return r } + +// false diff --git a/typedapi/types/slmindicatorunhealthypolicies.go b/typedapi/types/slmindicatorunhealthypolicies.go index be952d9080..02eeec730f 100644 --- a/typedapi/types/slmindicatorunhealthypolicies.go +++ b/typedapi/types/slmindicatorunhealthypolicies.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SlmIndicatorUnhealthyPolicies type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L185-L188 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L186-L189 type SlmIndicatorUnhealthyPolicies struct { Count int64 `json:"count"` InvocationsSinceLastSuccess map[string]int64 `json:"invocations_since_last_success,omitempty"` @@ -83,8 +83,10 @@ func (s *SlmIndicatorUnhealthyPolicies) UnmarshalJSON(data []byte) error { // NewSlmIndicatorUnhealthyPolicies returns a SlmIndicatorUnhealthyPolicies. func NewSlmIndicatorUnhealthyPolicies() *SlmIndicatorUnhealthyPolicies { r := &SlmIndicatorUnhealthyPolicies{ - InvocationsSinceLastSuccess: make(map[string]int64, 0), + InvocationsSinceLastSuccess: make(map[string]int64), } return r } + +// false diff --git a/typedapi/types/slmpolicy.go b/typedapi/types/slmpolicy.go index 5a87a15d80..6e8aecfec7 100644 --- a/typedapi/types/slmpolicy.go +++ b/typedapi/types/slmpolicy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SLMPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/_types/SnapshotLifecycle.ts#L76-L82 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/_types/SnapshotLifecycle.ts#L86-L92 type SLMPolicy struct { Config *Configuration `json:"config,omitempty"` Name string `json:"name"` @@ -98,3 +98,5 @@ func NewSLMPolicy() *SLMPolicy { return r } + +// false diff --git a/typedapi/types/slowlogsettings.go b/typedapi/types/slowlogsettings.go index 4caed20004..de09c6cda8 100644 --- a/typedapi/types/slowlogsettings.go +++ b/typedapi/types/slowlogsettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SlowlogSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L490-L495 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L515-L520 type SlowlogSettings struct { Level *string `json:"level,omitempty"` Reformat *bool `json:"reformat,omitempty"` @@ -112,3 +112,13 @@ func NewSlowlogSettings() *SlowlogSettings { return r } + +// true + +type SlowlogSettingsVariant interface { + SlowlogSettingsCaster() *SlowlogSettings +} + +func (s *SlowlogSettings) SlowlogSettingsCaster() *SlowlogSettings { + return s +} diff --git a/typedapi/types/slowlogtresholdlevels.go b/typedapi/types/slowlogtresholdlevels.go index 7e94761972..fb84c20b2b 100644 --- a/typedapi/types/slowlogtresholdlevels.go +++ b/typedapi/types/slowlogtresholdlevels.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // SlowlogTresholdLevels type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L502-L507 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L527-L532 type SlowlogTresholdLevels struct { Debug Duration `json:"debug,omitempty"` Info Duration `json:"info,omitempty"` @@ -84,3 +84,13 @@ func NewSlowlogTresholdLevels() *SlowlogTresholdLevels { return r } + +// true + +type SlowlogTresholdLevelsVariant interface { + SlowlogTresholdLevelsCaster() *SlowlogTresholdLevels +} + +func (s *SlowlogTresholdLevels) SlowlogTresholdLevelsCaster() *SlowlogTresholdLevels { + return s +} diff --git a/typedapi/types/slowlogtresholds.go b/typedapi/types/slowlogtresholds.go index 707bbca80f..d20efbe6cb 100644 --- a/typedapi/types/slowlogtresholds.go +++ b/typedapi/types/slowlogtresholds.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // SlowlogTresholds type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L497-L500 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L522-L525 type SlowlogTresholds struct { Fetch *SlowlogTresholdLevels `json:"fetch,omitempty"` Query *SlowlogTresholdLevels `json:"query,omitempty"` @@ -34,3 +34,13 @@ func NewSlowlogTresholds() *SlowlogTresholds { return r } + +// true + +type SlowlogTresholdsVariant interface { + SlowlogTresholdsCaster() *SlowlogTresholds +} + +func (s *SlowlogTresholds) SlowlogTresholdsCaster() *SlowlogTresholds { + return s +} diff --git a/typedapi/types/smoothingmodelcontainer.go b/typedapi/types/smoothingmodelcontainer.go index be5525d0ba..e65bf94f3c 100644 --- a/typedapi/types/smoothingmodelcontainer.go +++ b/typedapi/types/smoothingmodelcontainer.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // SmoothingModelContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L445-L461 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L445-L461 type SmoothingModelContainer struct { + AdditionalSmoothingModelContainerProperty map[string]json.RawMessage `json:"-"` // Laplace A smoothing model that uses an additive smoothing where a constant (typically // `1.0` or smaller) is added to all counts to balance weights. Laplace *LaplaceSmoothingModel `json:"laplace,omitempty"` @@ -36,9 +42,50 @@ type SmoothingModelContainer struct { StupidBackoff *StupidBackoffSmoothingModel `json:"stupid_backoff,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s SmoothingModelContainer) MarshalJSON() ([]byte, error) { + type opt SmoothingModelContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalSmoothingModelContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalSmoothingModelContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewSmoothingModelContainer returns a SmoothingModelContainer. func NewSmoothingModelContainer() *SmoothingModelContainer { - r := &SmoothingModelContainer{} + r := &SmoothingModelContainer{ + AdditionalSmoothingModelContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type SmoothingModelContainerVariant interface { + SmoothingModelContainerCaster() *SmoothingModelContainer +} + +func (s *SmoothingModelContainer) SmoothingModelContainerCaster() *SmoothingModelContainer { + return s +} diff --git a/typedapi/types/snapshotindexstats.go b/typedapi/types/snapshotindexstats.go index 5f490111d0..f5ec9b53aa 100644 --- a/typedapi/types/snapshotindexstats.go +++ b/typedapi/types/snapshotindexstats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // SnapshotIndexStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotIndexStats.ts#L25-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotIndexStats.ts#L25-L29 type SnapshotIndexStats struct { Shards map[string]SnapshotShardsStatus `json:"shards"` ShardsStats SnapshotShardsStats `json:"shards_stats"` @@ -32,8 +32,10 @@ type SnapshotIndexStats struct { // NewSnapshotIndexStats returns a SnapshotIndexStats. func NewSnapshotIndexStats() *SnapshotIndexStats { r := &SnapshotIndexStats{ - Shards: make(map[string]SnapshotShardsStatus, 0), + Shards: make(map[string]SnapshotShardsStatus), } return r } + +// false diff --git a/typedapi/types/snapshotinfo.go b/typedapi/types/snapshotinfo.go index efac76be6f..3fa2d0da48 100644 --- a/typedapi/types/snapshotinfo.go +++ b/typedapi/types/snapshotinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SnapshotInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotInfo.ts#L41-L71 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotInfo.ts#L41-L71 type SnapshotInfo struct { DataStreams []string `json:"data_streams"` Duration Duration `json:"duration,omitempty"` @@ -210,8 +210,10 @@ func (s *SnapshotInfo) UnmarshalJSON(data []byte) error { // NewSnapshotInfo returns a SnapshotInfo. func NewSnapshotInfo() *SnapshotInfo { r := &SnapshotInfo{ - IndexDetails: make(map[string]IndexDetails, 0), + IndexDetails: make(map[string]IndexDetails), } return r } + +// false diff --git a/typedapi/types/snapshotlifecycle.go b/typedapi/types/snapshotlifecycle.go index c339a5a6d3..12a66d583a 100644 --- a/typedapi/types/snapshotlifecycle.go +++ b/typedapi/types/snapshotlifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,18 +30,22 @@ import ( // SnapshotLifecycle type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/_types/SnapshotLifecycle.ts#L38-L49 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/_types/SnapshotLifecycle.ts#L38-L59 type SnapshotLifecycle struct { - InProgress *InProgress `json:"in_progress,omitempty"` - LastFailure *Invocation `json:"last_failure,omitempty"` - LastSuccess *Invocation `json:"last_success,omitempty"` - ModifiedDate DateTime `json:"modified_date,omitempty"` - ModifiedDateMillis int64 `json:"modified_date_millis"` - NextExecution DateTime `json:"next_execution,omitempty"` - NextExecutionMillis int64 `json:"next_execution_millis"` - Policy SLMPolicy `json:"policy"` - Stats Statistics `json:"stats"` - Version int64 `json:"version"` + InProgress *InProgress `json:"in_progress,omitempty"` + LastFailure *Invocation `json:"last_failure,omitempty"` + LastSuccess *Invocation `json:"last_success,omitempty"` + // ModifiedDate The last time the policy was modified. + ModifiedDate DateTime `json:"modified_date,omitempty"` + ModifiedDateMillis int64 `json:"modified_date_millis"` + // NextExecution The next time the policy will run. + NextExecution DateTime `json:"next_execution,omitempty"` + NextExecutionMillis int64 `json:"next_execution_millis"` + Policy SLMPolicy `json:"policy"` + Stats Statistics `json:"stats"` + // Version The version of the snapshot policy. + // Only the latest version is stored and incremented when the policy is updated. + Version int64 `json:"version"` } func (s *SnapshotLifecycle) UnmarshalJSON(data []byte) error { @@ -120,3 +124,5 @@ func NewSnapshotLifecycle() *SnapshotLifecycle { return r } + +// false diff --git a/typedapi/types/queryruleset.go b/typedapi/types/snapshotnodeinfo.go similarity index 57% rename from typedapi/types/queryruleset.go rename to typedapi/types/snapshotnodeinfo.go index 82e96590cc..667b4c4dad 100644 --- a/typedapi/types/queryruleset.go +++ b/typedapi/types/snapshotnodeinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -28,17 +28,15 @@ import ( "io" ) -// QueryRuleset type. +// SnapshotNodeInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/query_rules/_types/QueryRuleset.ts#L25-L34 -type QueryRuleset struct { - // Rules Rules associated with the query ruleset - Rules []QueryRule `json:"rules"` - // RulesetId Query Ruleset unique identifier - RulesetId string `json:"ruleset_id"` +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L110-L113 +type SnapshotNodeInfo struct { + Id string `json:"id"` + Name string `json:"name"` } -func (s *QueryRuleset) UnmarshalJSON(data []byte) error { +func (s *SnapshotNodeInfo) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -53,14 +51,14 @@ func (s *QueryRuleset) UnmarshalJSON(data []byte) error { switch t { - case "rules": - if err := dec.Decode(&s.Rules); err != nil { - return fmt.Errorf("%s | %w", "Rules", err) + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) } - case "ruleset_id": - if err := dec.Decode(&s.RulesetId); err != nil { - return fmt.Errorf("%s | %w", "RulesetId", err) + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) } } @@ -68,9 +66,11 @@ func (s *QueryRuleset) UnmarshalJSON(data []byte) error { return nil } -// NewQueryRuleset returns a QueryRuleset. -func NewQueryRuleset() *QueryRuleset { - r := &QueryRuleset{} +// NewSnapshotNodeInfo returns a SnapshotNodeInfo. +func NewSnapshotNodeInfo() *SnapshotNodeInfo { + r := &SnapshotNodeInfo{} return r } + +// false diff --git a/typedapi/types/snapshotresponseitem.go b/typedapi/types/snapshotresponseitem.go index cba6fbfdf3..2c4c4be515 100644 --- a/typedapi/types/snapshotresponseitem.go +++ b/typedapi/types/snapshotresponseitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // SnapshotResponseItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/get/SnapshotGetResponse.ts#L44-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/get/SnapshotGetResponse.ts#L49-L53 type SnapshotResponseItem struct { Error *ErrorCause `json:"error,omitempty"` Repository string `json:"repository"` @@ -78,3 +78,5 @@ func NewSnapshotResponseItem() *SnapshotResponseItem { return r } + +// false diff --git a/typedapi/types/snapshotrestore.go b/typedapi/types/snapshotrestore.go index e29ca5baf9..aaf1c3215d 100644 --- a/typedapi/types/snapshotrestore.go +++ b/typedapi/types/snapshotrestore.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SnapshotRestore type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/restore/SnapshotRestoreResponse.ts#L30-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/restore/SnapshotRestoreResponse.ts#L30-L34 type SnapshotRestore struct { Indices []string `json:"indices"` Shards ShardStatistics `json:"shards"` @@ -86,3 +86,5 @@ func NewSnapshotRestore() *SnapshotRestore { return r } + +// false diff --git a/typedapi/types/snapshotshardfailure.go b/typedapi/types/snapshotshardfailure.go index 9e66d1a68b..487c01e21f 100644 --- a/typedapi/types/snapshotshardfailure.go +++ b/typedapi/types/snapshotshardfailure.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SnapshotShardFailure type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotShardFailure.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotShardFailure.ts#L22-L29 type SnapshotShardFailure struct { Index string `json:"index"` IndexUuid string `json:"index_uuid"` @@ -111,3 +111,5 @@ func NewSnapshotShardFailure() *SnapshotShardFailure { return r } + +// false diff --git a/typedapi/types/snapshotshardsstats.go b/typedapi/types/snapshotshardsstats.go index b65afa20f6..cad5cb0462 100644 --- a/typedapi/types/snapshotshardsstats.go +++ b/typedapi/types/snapshotshardsstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,14 +31,20 @@ import ( // SnapshotShardsStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotShardsStats.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotShardsStats.ts#L22-L47 type SnapshotShardsStats struct { - Done int64 `json:"done"` - Failed int64 `json:"failed"` - Finalizing int64 `json:"finalizing"` + // Done The number of shards that initialized, started, and finalized successfully. + Done int64 `json:"done"` + // Failed The number of shards that failed to be included in the snapshot. + Failed int64 `json:"failed"` + // Finalizing The number of shards that are finalizing but are not done. + Finalizing int64 `json:"finalizing"` + // Initializing The number of shards that are still initializing. Initializing int64 `json:"initializing"` - Started int64 `json:"started"` - Total int64 `json:"total"` + // Started The number of shards that have started but are not finalized. + Started int64 `json:"started"` + // Total The total number of shards included in the snapshot. + Total int64 `json:"total"` } func (s *SnapshotShardsStats) UnmarshalJSON(data []byte) error { @@ -157,3 +163,5 @@ func NewSnapshotShardsStats() *SnapshotShardsStats { return r } + +// false diff --git a/typedapi/types/snapshotshardsstatus.go b/typedapi/types/snapshotshardsstatus.go index e01e6bd474..f72f245bef 100644 --- a/typedapi/types/snapshotshardsstatus.go +++ b/typedapi/types/snapshotshardsstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // SnapshotShardsStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotShardsStatus.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotShardsStatus.ts#L24-L27 type SnapshotShardsStatus struct { Stage shardsstatsstage.ShardsStatsStage `json:"stage"` Stats ShardsStatsSummary `json:"stats"` @@ -38,3 +38,5 @@ func NewSnapshotShardsStatus() *SnapshotShardsStatus { return r } + +// false diff --git a/typedapi/types/snapshotsrecord.go b/typedapi/types/snapshotsrecord.go index 1aeeb77a9d..ebe2c1e792 100644 --- a/typedapi/types/snapshotsrecord.go +++ b/typedapi/types/snapshotsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SnapshotsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/snapshots/types.ts#L24-L96 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/snapshots/types.ts#L24-L96 type SnapshotsRecord struct { // Duration The time it took the snapshot process to complete, in time units. Duration Duration `json:"duration,omitempty"` @@ -248,3 +248,5 @@ func NewSnapshotsRecord() *SnapshotsRecord { return r } + +// false diff --git a/typedapi/types/snapshotstats.go b/typedapi/types/snapshotstats.go index afe2cbf00a..2f5b96e51e 100644 --- a/typedapi/types/snapshotstats.go +++ b/typedapi/types/snapshotstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,13 +30,22 @@ import ( // SnapshotStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotStats.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotStats.ts#L23-L42 type SnapshotStats struct { - Incremental FileCountSnapshotStats `json:"incremental"` - StartTimeInMillis int64 `json:"start_time_in_millis"` - Time Duration `json:"time,omitempty"` - TimeInMillis int64 `json:"time_in_millis"` - Total FileCountSnapshotStats `json:"total"` + // Incremental The number and size of files that still need to be copied as part of the + // incremental snapshot. + // For completed snapshots, this property indicates the number and size of files + // that were not already in the repository and were copied as part of the + // incremental snapshot. + Incremental FileCountSnapshotStats `json:"incremental"` + // StartTimeInMillis The time, in milliseconds, when the snapshot creation process started. + StartTimeInMillis int64 `json:"start_time_in_millis"` + Time Duration `json:"time,omitempty"` + // TimeInMillis The total time, in milliseconds, that it took for the snapshot process to + // complete. + TimeInMillis int64 `json:"time_in_millis"` + // Total The total number and size of files that are referenced by the snapshot. + Total FileCountSnapshotStats `json:"total"` } func (s *SnapshotStats) UnmarshalJSON(data []byte) error { @@ -90,3 +99,5 @@ func NewSnapshotStats() *SnapshotStats { return r } + +// false diff --git a/typedapi/types/snowballanalyzer.go b/typedapi/types/snowballanalyzer.go index 943863af18..e5f63c1841 100644 --- a/typedapi/types/snowballanalyzer.go +++ b/typedapi/types/snowballanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // SnowballAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L334-L339 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L374-L380 type SnowballAnalyzer struct { Language snowballlanguage.SnowballLanguage `json:"language"` Stopwords []string `json:"stopwords,omitempty"` @@ -112,3 +112,13 @@ func NewSnowballAnalyzer() *SnowballAnalyzer { return r } + +// true + +type SnowballAnalyzerVariant interface { + SnowballAnalyzerCaster() *SnowballAnalyzer +} + +func (s *SnowballAnalyzer) SnowballAnalyzerCaster() *SnowballAnalyzer { + return s +} diff --git a/typedapi/types/snowballtokenfilter.go b/typedapi/types/snowballtokenfilter.go index 3a742e42c1..087243dd90 100644 --- a/typedapi/types/snowballtokenfilter.go +++ b/typedapi/types/snowballtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // SnowballTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L310-L313 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L310-L313 type SnowballTokenFilter struct { Language *snowballlanguage.SnowballLanguage `json:"language,omitempty"` Type string `json:"type,omitempty"` @@ -94,3 +94,13 @@ func NewSnowballTokenFilter() *SnowballTokenFilter { return r } + +// true + +type SnowballTokenFilterVariant interface { + SnowballTokenFilterCaster() *SnowballTokenFilter +} + +func (s *SnowballTokenFilter) SnowballTokenFilterCaster() *SnowballTokenFilter { + return s +} diff --git a/typedapi/types/softdeletes.go b/typedapi/types/softdeletes.go index ade9531323..2fc5e7e0e6 100644 --- a/typedapi/types/softdeletes.go +++ b/typedapi/types/softdeletes.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SoftDeletes type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L50-L63 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L51-L64 type SoftDeletes struct { // Enabled Indicates whether soft deletes are enabled on the index. Enabled *bool `json:"enabled,omitempty"` @@ -90,3 +90,13 @@ func NewSoftDeletes() *SoftDeletes { return r } + +// true + +type SoftDeletesVariant interface { + SoftDeletesCaster() *SoftDeletes +} + +func (s *SoftDeletes) SoftDeletesCaster() *SoftDeletes { + return s +} diff --git a/typedapi/types/soranianalyzer.go b/typedapi/types/soranianalyzer.go index 15b8103855..5603dc7e63 100644 --- a/typedapi/types/soranianalyzer.go +++ b/typedapi/types/soranianalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SoraniAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L278-L283 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L289-L294 type SoraniAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewSoraniAnalyzer() *SoraniAnalyzer { return r } + +// true + +type SoraniAnalyzerVariant interface { + SoraniAnalyzerCaster() *SoraniAnalyzer +} + +func (s *SoraniAnalyzer) SoraniAnalyzerCaster() *SoraniAnalyzer { + return s +} diff --git a/typedapi/types/sort.go b/typedapi/types/sort.go index 7508161ee8..3e3483150a 100644 --- a/typedapi/types/sort.go +++ b/typedapi/types/sort.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Sort type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/sort.ts#L104-L104 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/sort.ts#L104-L104 type Sort []SortCombinations + +type SortVariant interface { + SortCaster() *Sort +} diff --git a/typedapi/types/sortcombinations.go b/typedapi/types/sortcombinations.go index 6837dad0ce..e724583006 100644 --- a/typedapi/types/sortcombinations.go +++ b/typedapi/types/sortcombinations.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // string // SortOptions // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/sort.ts#L98-L102 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/sort.ts#L98-L102 type SortCombinations any + +type SortCombinationsVariant interface { + SortCombinationsCaster() *SortCombinations +} diff --git a/typedapi/types/sortoptions.go b/typedapi/types/sortoptions.go index f63c13aa15..39ac335508 100644 --- a/typedapi/types/sortoptions.go +++ b/typedapi/types/sortoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -27,7 +27,7 @@ import ( // SortOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/sort.ts#L86-L96 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/sort.ts#L86-L96 type SortOptions struct { Doc_ *ScoreSort `json:"_doc,omitempty"` GeoDistance_ *GeoDistanceSort `json:"_geo_distance,omitempty"` @@ -68,8 +68,18 @@ func (s SortOptions) MarshalJSON() ([]byte, error) { // NewSortOptions returns a SortOptions. func NewSortOptions() *SortOptions { r := &SortOptions{ - SortOptions: make(map[string]FieldSort, 0), + SortOptions: make(map[string]FieldSort), } return r } + +// true + +type SortOptionsVariant interface { + SortOptionsCaster() *SortOptions +} + +func (s *SortOptions) SortOptionsCaster() *SortOptions { + return s +} diff --git a/typedapi/types/sortprocessor.go b/typedapi/types/sortprocessor.go index 1ef481b2f1..938fb6d8e1 100644 --- a/typedapi/types/sortprocessor.go +++ b/typedapi/types/sortprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // SortProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1438-L1454 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1479-L1495 type SortProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -41,7 +41,7 @@ type SortProcessor struct { // Field The field to be sorted. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // OnFailure Handle failures for the processor. @@ -90,16 +90,9 @@ func (s *SortProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -153,3 +146,13 @@ func NewSortProcessor() *SortProcessor { return r } + +// true + +type SortProcessorVariant interface { + SortProcessorCaster() *SortProcessor +} + +func (s *SortProcessor) SortProcessorCaster() *SortProcessor { + return s +} diff --git a/typedapi/types/sourceconfig.go b/typedapi/types/sourceconfig.go index d8b76dc1db..2f52c8d407 100644 --- a/typedapi/types/sourceconfig.go +++ b/typedapi/types/sourceconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // bool // SourceFilter // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/SourceFilter.ts#L33-L37 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/SourceFilter.ts#L33-L37 type SourceConfig any + +type SourceConfigVariant interface { + SourceConfigCaster() *SourceConfig +} diff --git a/typedapi/types/sourceconfigparam.go b/typedapi/types/sourceconfigparam.go index 0c03636715..b4a2039af2 100644 --- a/typedapi/types/sourceconfigparam.go +++ b/typedapi/types/sourceconfigparam.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // bool // []string // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/SourceFilter.ts#L39-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/SourceFilter.ts#L39-L45 type SourceConfigParam any diff --git a/typedapi/types/sourcefield.go b/typedapi/types/sourcefield.go index 20e1f67c0f..2f3dcdaddb 100644 --- a/typedapi/types/sourcefield.go +++ b/typedapi/types/sourcefield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // SourceField type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/meta-fields.ts#L58-L65 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/meta-fields.ts#L58-L65 type SourceField struct { Compress *bool `json:"compress,omitempty"` CompressThreshold *string `json:"compress_threshold,omitempty"` @@ -124,3 +124,13 @@ func NewSourceField() *SourceField { return r } + +// true + +type SourceFieldVariant interface { + SourceFieldCaster() *SourceField +} + +func (s *SourceField) SourceFieldCaster() *SourceField { + return s +} diff --git a/typedapi/types/sourcefilter.go b/typedapi/types/sourcefilter.go index 0933863c5f..d61d0b9527 100644 --- a/typedapi/types/sourcefilter.go +++ b/typedapi/types/sourcefilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // SourceFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/SourceFilter.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/SourceFilter.ts#L23-L31 type SourceFilter struct { Excludes []string `json:"excludes,omitempty"` Includes []string `json:"includes,omitempty"` @@ -104,3 +104,13 @@ func NewSourceFilter() *SourceFilter { return r } + +// true + +type SourceFilterVariant interface { + SourceFilterCaster() *SourceFilter +} + +func (s *SourceFilter) SourceFilterCaster() *SourceFilter { + return s +} diff --git a/typedapi/types/bucketpathaggregation.go b/typedapi/types/sourceindex.go similarity index 58% rename from typedapi/types/bucketpathaggregation.go rename to typedapi/types/sourceindex.go index 4db3ad753e..e753a9f478 100644 --- a/typedapi/types/bucketpathaggregation.go +++ b/typedapi/types/sourceindex.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -28,15 +28,14 @@ import ( "io" ) -// BucketPathAggregation type. +// SourceIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L31-L37 -type BucketPathAggregation struct { - // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/migrate_reindex/MigrateReindexRequest.ts#L50-L52 +type SourceIndex struct { + Index string `json:"index"` } -func (s *BucketPathAggregation) UnmarshalJSON(data []byte) error { +func (s *SourceIndex) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -51,9 +50,9 @@ func (s *BucketPathAggregation) UnmarshalJSON(data []byte) error { switch t { - case "buckets_path": - if err := dec.Decode(&s.BucketsPath); err != nil { - return fmt.Errorf("%s | %w", "BucketsPath", err) + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) } } @@ -61,9 +60,19 @@ func (s *BucketPathAggregation) UnmarshalJSON(data []byte) error { return nil } -// NewBucketPathAggregation returns a BucketPathAggregation. -func NewBucketPathAggregation() *BucketPathAggregation { - r := &BucketPathAggregation{} +// NewSourceIndex returns a SourceIndex. +func NewSourceIndex() *SourceIndex { + r := &SourceIndex{} return r } + +// true + +type SourceIndexVariant interface { + SourceIndexCaster() *SourceIndex +} + +func (s *SourceIndex) SourceIndexCaster() *SourceIndex { + return s +} diff --git a/typedapi/types/sourceonlyrepository.go b/typedapi/types/sourceonlyrepository.go index b9b878285a..75f14ebf8c 100644 --- a/typedapi/types/sourceonlyrepository.go +++ b/typedapi/types/sourceonlyrepository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,11 +30,13 @@ import ( // SourceOnlyRepository type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotRepository.ts#L65-L68 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotRepository.ts#L104-L114 type SourceOnlyRepository struct { + // Settings The repository settings. Settings SourceOnlyRepositorySettings `json:"settings"` - Type string `json:"type,omitempty"` - Uuid *string `json:"uuid,omitempty"` + // Type The source-only repository type. + Type string `json:"type,omitempty"` + Uuid *string `json:"uuid,omitempty"` } func (s *SourceOnlyRepository) UnmarshalJSON(data []byte) error { @@ -92,3 +94,13 @@ func NewSourceOnlyRepository() *SourceOnlyRepository { return r } + +// true + +type SourceOnlyRepositoryVariant interface { + SourceOnlyRepositoryCaster() *SourceOnlyRepository +} + +func (s *SourceOnlyRepository) SourceOnlyRepositoryCaster() *SourceOnlyRepository { + return s +} diff --git a/typedapi/types/sourceonlyrepositorysettings.go b/typedapi/types/sourceonlyrepositorysettings.go index d5ebb0504a..e31385f58f 100644 --- a/typedapi/types/sourceonlyrepositorysettings.go +++ b/typedapi/types/sourceonlyrepositorysettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,15 +31,57 @@ import ( // SourceOnlyRepositorySettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotRepository.ts#L117-L124 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotRepository.ts#L414-L441 type SourceOnlyRepositorySettings struct { - ChunkSize ByteSize `json:"chunk_size,omitempty"` - Compress *bool `json:"compress,omitempty"` - DelegateType *string `json:"delegate_type,omitempty"` - MaxNumberOfSnapshots *int `json:"max_number_of_snapshots,omitempty"` - MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // ChunkSize Big files can be broken down into multiple smaller blobs in the blob store + // during snapshotting. + // It is not recommended to change this value from its default unless there is + // an explicit reason for limiting the size of blobs in the repository. + // Setting a value lower than the default can result in an increased number of + // API calls to the blob store during snapshot create and restore operations + // compared to using the default value and thus make both operations slower and + // more costly. + // Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. + // The default varies by repository type. + ChunkSize ByteSize `json:"chunk_size,omitempty"` + // Compress When set to `true`, metadata files are stored in compressed format. + // This setting doesn't affect index files that are already compressed by + // default. + Compress *bool `json:"compress,omitempty"` + // DelegateType The delegated repository type. For valid values, refer to the `type` + // parameter. + // Source repositories can use `settings` properties for its delegated + // repository type. + DelegateType *string `json:"delegate_type,omitempty"` + // MaxNumberOfSnapshots The maximum number of snapshots the repository can contain. + // The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. + MaxNumberOfSnapshots *int `json:"max_number_of_snapshots,omitempty"` + // MaxRestoreBytesPerSec The maximum snapshot restore rate per node. + // It defaults to unlimited. + // Note that restores are also throttled through recovery settings. + MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // MaxSnapshotBytesPerSec The maximum snapshot creation rate per node. + // It defaults to 40mb per second. + // Note that if the recovery settings for managed services are set, then it + // defaults to unlimited, and the rate is additionally throttled through + // recovery settings. MaxSnapshotBytesPerSec ByteSize `json:"max_snapshot_bytes_per_sec,omitempty"` - ReadOnly *bool `json:"read_only,omitempty"` + // ReadOnly If `true`, the repository is read-only. + // The cluster can retrieve and restore snapshots from the repository but not + // write to the repository or create snapshots in it. + // + // Only a cluster with write access can create snapshots in the repository. + // All other clusters connected to the repository should have the `readonly` + // parameter set to `true`. + // + // If `false`, the cluster can write to the repository and create snapshots in + // it. + // + // IMPORTANT: If you register the same snapshot repository with multiple + // clusters, only one cluster should have write access to the repository. + // Having multiple clusters write to the repository at the same time risks + // corrupting the contents of the repository. + ReadOnly *bool `json:"read_only,omitempty"` } func (s *SourceOnlyRepositorySettings) UnmarshalJSON(data []byte) error { @@ -139,3 +181,13 @@ func NewSourceOnlyRepositorySettings() *SourceOnlyRepositorySettings { return r } + +// true + +type SourceOnlyRepositorySettingsVariant interface { + SourceOnlyRepositorySettingsCaster() *SourceOnlyRepositorySettings +} + +func (s *SourceOnlyRepositorySettings) SourceOnlyRepositorySettingsCaster() *SourceOnlyRepositorySettings { + return s +} diff --git a/typedapi/types/spancontainingquery.go b/typedapi/types/spancontainingquery.go index 4455854232..2967ac6082 100644 --- a/typedapi/types/spancontainingquery.go +++ b/typedapi/types/spancontainingquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,11 +31,11 @@ import ( // SpanContainingQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/span.ts#L25-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/span.ts#L25-L39 type SpanContainingQuery struct { // Big Can be any span query. // Matching spans from `big` that contain matches from `little` are returned. - Big *SpanQuery `json:"big,omitempty"` + Big SpanQuery `json:"big"` // Boost Floating point number used to decrease or increase the relevance scores of // the query. // Boost values are relative to the default value of 1.0. @@ -44,8 +44,8 @@ type SpanContainingQuery struct { Boost *float32 `json:"boost,omitempty"` // Little Can be any span query. // Matching spans from `big` that contain matches from `little` are returned. - Little *SpanQuery `json:"little,omitempty"` - QueryName_ *string `json:"_name,omitempty"` + Little SpanQuery `json:"little"` + QueryName_ *string `json:"_name,omitempty"` } func (s *SpanContainingQuery) UnmarshalJSON(data []byte) error { @@ -112,3 +112,13 @@ func NewSpanContainingQuery() *SpanContainingQuery { return r } + +// true + +type SpanContainingQueryVariant interface { + SpanContainingQueryCaster() *SpanContainingQuery +} + +func (s *SpanContainingQuery) SpanContainingQueryCaster() *SpanContainingQuery { + return s +} diff --git a/typedapi/types/spanfieldmaskingquery.go b/typedapi/types/spanfieldmaskingquery.go index 5b8f1ecb15..34ad80da82 100644 --- a/typedapi/types/spanfieldmaskingquery.go +++ b/typedapi/types/spanfieldmaskingquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,17 +31,17 @@ import ( // SpanFieldMaskingQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/span.ts#L41-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/span.ts#L41-L47 type SpanFieldMaskingQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. // Boost values are relative to the default value of 1.0. // A boost value between 0 and 1.0 decreases the relevance score. // A value greater than 1.0 increases the relevance score. - Boost *float32 `json:"boost,omitempty"` - Field string `json:"field"` - Query *SpanQuery `json:"query,omitempty"` - QueryName_ *string `json:"_name,omitempty"` + Boost *float32 `json:"boost,omitempty"` + Field string `json:"field"` + Query SpanQuery `json:"query"` + QueryName_ *string `json:"_name,omitempty"` } func (s *SpanFieldMaskingQuery) UnmarshalJSON(data []byte) error { @@ -108,3 +108,13 @@ func NewSpanFieldMaskingQuery() *SpanFieldMaskingQuery { return r } + +// true + +type SpanFieldMaskingQueryVariant interface { + SpanFieldMaskingQueryCaster() *SpanFieldMaskingQuery +} + +func (s *SpanFieldMaskingQuery) SpanFieldMaskingQueryCaster() *SpanFieldMaskingQuery { + return s +} diff --git a/typedapi/types/spanfirstquery.go b/typedapi/types/spanfirstquery.go index 15606762d8..e5ed60b39f 100644 --- a/typedapi/types/spanfirstquery.go +++ b/typedapi/types/spanfirstquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SpanFirstQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/span.ts#L49-L61 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/span.ts#L49-L61 type SpanFirstQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -42,8 +42,8 @@ type SpanFirstQuery struct { // End Controls the maximum end position permitted in a match. End int `json:"end"` // Match Can be any other span type query. - Match *SpanQuery `json:"match,omitempty"` - QueryName_ *string `json:"_name,omitempty"` + Match SpanQuery `json:"match"` + QueryName_ *string `json:"_name,omitempty"` } func (s *SpanFirstQuery) UnmarshalJSON(data []byte) error { @@ -121,3 +121,13 @@ func NewSpanFirstQuery() *SpanFirstQuery { return r } + +// true + +type SpanFirstQueryVariant interface { + SpanFirstQueryCaster() *SpanFirstQuery +} + +func (s *SpanFirstQuery) SpanFirstQueryCaster() *SpanFirstQuery { + return s +} diff --git a/typedapi/types/spangapquery.go b/typedapi/types/spangapquery.go index 6096fc6495..6036b99378 100644 --- a/typedapi/types/spangapquery.go +++ b/typedapi/types/spangapquery.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // SpanGapQuery type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/span.ts#L63-L65 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/span.ts#L63-L65 type SpanGapQuery map[string]int + +type SpanGapQueryVariant interface { + SpanGapQueryCaster() *SpanGapQuery +} diff --git a/typedapi/types/spanishanalyzer.go b/typedapi/types/spanishanalyzer.go index d34724a3e1..59ede2543f 100644 --- a/typedapi/types/spanishanalyzer.go +++ b/typedapi/types/spanishanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SpanishAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L285-L290 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L296-L301 type SpanishAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewSpanishAnalyzer() *SpanishAnalyzer { return r } + +// true + +type SpanishAnalyzerVariant interface { + SpanishAnalyzerCaster() *SpanishAnalyzer +} + +func (s *SpanishAnalyzer) SpanishAnalyzerCaster() *SpanishAnalyzer { + return s +} diff --git a/typedapi/types/spanmultitermquery.go b/typedapi/types/spanmultitermquery.go index 8d15ffaaba..00dd32e838 100644 --- a/typedapi/types/spanmultitermquery.go +++ b/typedapi/types/spanmultitermquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SpanMultiTermQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/span.ts#L67-L75 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/span.ts#L67-L75 type SpanMultiTermQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -41,7 +41,7 @@ type SpanMultiTermQuery struct { Boost *float32 `json:"boost,omitempty"` // Match Should be a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, // or `regexp` query). - Match *Query `json:"match,omitempty"` + Match Query `json:"match"` QueryName_ *string `json:"_name,omitempty"` } @@ -104,3 +104,13 @@ func NewSpanMultiTermQuery() *SpanMultiTermQuery { return r } + +// true + +type SpanMultiTermQueryVariant interface { + SpanMultiTermQueryCaster() *SpanMultiTermQuery +} + +func (s *SpanMultiTermQuery) SpanMultiTermQueryCaster() *SpanMultiTermQuery { + return s +} diff --git a/typedapi/types/spannearquery.go b/typedapi/types/spannearquery.go index c610a6114b..916bffb1a6 100644 --- a/typedapi/types/spannearquery.go +++ b/typedapi/types/spannearquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SpanNearQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/span.ts#L77-L93 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/span.ts#L77-L93 type SpanNearQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -137,3 +137,13 @@ func NewSpanNearQuery() *SpanNearQuery { return r } + +// true + +type SpanNearQueryVariant interface { + SpanNearQueryCaster() *SpanNearQuery +} + +func (s *SpanNearQuery) SpanNearQueryCaster() *SpanNearQuery { + return s +} diff --git a/typedapi/types/spannotquery.go b/typedapi/types/spannotquery.go index 43eadf3f1b..570173d687 100644 --- a/typedapi/types/spannotquery.go +++ b/typedapi/types/spannotquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SpanNotQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/span.ts#L95-L122 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/span.ts#L95-L122 type SpanNotQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -44,9 +44,9 @@ type SpanNotQuery struct { // Equivalent to setting both `pre` and `post`. Dist *int `json:"dist,omitempty"` // Exclude Span query whose matches must not overlap those returned. - Exclude *SpanQuery `json:"exclude,omitempty"` + Exclude SpanQuery `json:"exclude"` // Include Span query whose matches are filtered. - Include *SpanQuery `json:"include,omitempty"` + Include SpanQuery `json:"include"` // Post The number of tokens after the include span that can’t have overlap with the // exclude span. Post *int `json:"post,omitempty"` @@ -168,3 +168,13 @@ func NewSpanNotQuery() *SpanNotQuery { return r } + +// true + +type SpanNotQueryVariant interface { + SpanNotQueryCaster() *SpanNotQuery +} + +func (s *SpanNotQuery) SpanNotQueryCaster() *SpanNotQuery { + return s +} diff --git a/typedapi/types/spanorquery.go b/typedapi/types/spanorquery.go index f355334658..498c7258cb 100644 --- a/typedapi/types/spanorquery.go +++ b/typedapi/types/spanorquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SpanOrQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/span.ts#L124-L132 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/span.ts#L124-L132 type SpanOrQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -103,3 +103,13 @@ func NewSpanOrQuery() *SpanOrQuery { return r } + +// true + +type SpanOrQueryVariant interface { + SpanOrQueryCaster() *SpanOrQuery +} + +func (s *SpanOrQuery) SpanOrQueryCaster() *SpanOrQuery { + return s +} diff --git a/typedapi/types/spanquery.go b/typedapi/types/spanquery.go index b015e73c90..15d072d712 100644 --- a/typedapi/types/spanquery.go +++ b/typedapi/types/spanquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,8 +30,9 @@ import ( // SpanQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/span.ts#L158-L200 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/span.ts#L159-L201 type SpanQuery struct { + AdditionalSpanQueryProperty map[string]json.RawMessage `json:"-"` // SpanContaining Accepts a list of span queries, but only returns those spans which also match // a second span query. SpanContaining *SpanContainingQuery `json:"span_containing,omitempty"` @@ -126,16 +127,69 @@ func (s *SpanQuery) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "SpanWithin", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalSpanQueryProperty == nil { + s.AdditionalSpanQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalSpanQueryProperty", err) + } + s.AdditionalSpanQueryProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s SpanQuery) MarshalJSON() ([]byte, error) { + type opt SpanQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalSpanQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalSpanQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewSpanQuery returns a SpanQuery. func NewSpanQuery() *SpanQuery { r := &SpanQuery{ - SpanTerm: make(map[string]SpanTermQuery, 0), + AdditionalSpanQueryProperty: make(map[string]json.RawMessage), + SpanTerm: make(map[string]SpanTermQuery), } return r } + +// true + +type SpanQueryVariant interface { + SpanQueryCaster() *SpanQuery +} + +func (s *SpanQuery) SpanQueryCaster() *SpanQuery { + return s +} diff --git a/typedapi/types/spantermquery.go b/typedapi/types/spantermquery.go index 9646bbee79..cce8839886 100644 --- a/typedapi/types/spantermquery.go +++ b/typedapi/types/spantermquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,30 +31,23 @@ import ( // SpanTermQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/span.ts#L134-L140 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/span.ts#L134-L141 type SpanTermQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. // Boost values are relative to the default value of 1.0. // A boost value between 0 and 1.0 decreases the relevance score. // A value greater than 1.0 increases the relevance score. - Boost *float32 `json:"boost,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Value string `json:"value"` + Boost *float32 `json:"boost,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + Value FieldValue `json:"value"` } func (s *SpanTermQuery) UnmarshalJSON(data []byte) error { if !bytes.HasPrefix(data, []byte(`{`)) { - if !bytes.HasPrefix(data, []byte(`"`)) { - data = append([]byte{'"'}, data...) - data = append(data, []byte{'"'}...) - } err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Value) - if err != nil { - return err - } - return nil + return err } dec := json.NewDecoder(bytes.NewReader(data)) @@ -98,17 +91,10 @@ func (s *SpanTermQuery) UnmarshalJSON(data []byte) error { } s.QueryName_ = &o - case "value": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + case "value", "term": + if err := dec.Decode(&s.Value); err != nil { return fmt.Errorf("%s | %w", "Value", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Value = o } } @@ -121,3 +107,13 @@ func NewSpanTermQuery() *SpanTermQuery { return r } + +// true + +type SpanTermQueryVariant interface { + SpanTermQueryCaster() *SpanTermQuery +} + +func (s *SpanTermQuery) SpanTermQueryCaster() *SpanTermQuery { + return s +} diff --git a/typedapi/types/spanwithinquery.go b/typedapi/types/spanwithinquery.go index 6b628bde59..a56b70a4e8 100644 --- a/typedapi/types/spanwithinquery.go +++ b/typedapi/types/spanwithinquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,11 +31,11 @@ import ( // SpanWithinQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/span.ts#L142-L156 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/span.ts#L143-L157 type SpanWithinQuery struct { // Big Can be any span query. // Matching spans from `little` that are enclosed within `big` are returned. - Big *SpanQuery `json:"big,omitempty"` + Big SpanQuery `json:"big"` // Boost Floating point number used to decrease or increase the relevance scores of // the query. // Boost values are relative to the default value of 1.0. @@ -44,8 +44,8 @@ type SpanWithinQuery struct { Boost *float32 `json:"boost,omitempty"` // Little Can be any span query. // Matching spans from `little` that are enclosed within `big` are returned. - Little *SpanQuery `json:"little,omitempty"` - QueryName_ *string `json:"_name,omitempty"` + Little SpanQuery `json:"little"` + QueryName_ *string `json:"_name,omitempty"` } func (s *SpanWithinQuery) UnmarshalJSON(data []byte) error { @@ -112,3 +112,13 @@ func NewSpanWithinQuery() *SpanWithinQuery { return r } + +// true + +type SpanWithinQueryVariant interface { + SpanWithinQueryCaster() *SpanWithinQuery +} + +func (s *SpanWithinQuery) SpanWithinQueryCaster() *SpanWithinQuery { + return s +} diff --git a/typedapi/types/sparseembeddingresult.go b/typedapi/types/sparseembeddingresult.go index d3b3db33e2..935c5b5ff9 100644 --- a/typedapi/types/sparseembeddingresult.go +++ b/typedapi/types/sparseembeddingresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // SparseEmbeddingResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/_types/Results.ts#L36-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/_types/Results.ts#L36-L38 type SparseEmbeddingResult struct { Embedding SparseVector `json:"embedding"` } @@ -66,3 +66,5 @@ func NewSparseEmbeddingResult() *SparseEmbeddingResult { return r } + +// false diff --git a/typedapi/types/sparsevector.go b/typedapi/types/sparsevector.go index de3a7b9056..5c4ae37ec0 100644 --- a/typedapi/types/sparsevector.go +++ b/typedapi/types/sparsevector.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // SparseVector type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/_types/Results.ts#L24-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/_types/Results.ts#L24-L28 type SparseVector map[string]float32 diff --git a/typedapi/types/sparsevectorproperty.go b/typedapi/types/sparsevectorproperty.go index c57aa9b994..9667c80efe 100644 --- a/typedapi/types/sparsevectorproperty.go +++ b/typedapi/types/sparsevectorproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,19 +29,21 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // SparseVectorProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L202-L204 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L206-L208 type SparseVectorProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *SparseVectorProperty) UnmarshalJSON(data []byte) error { @@ -83,301 +85,313 @@ func (s *SparseVectorProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -426,306 +440,323 @@ func (s *SparseVectorProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -740,12 +771,13 @@ func (s *SparseVectorProperty) UnmarshalJSON(data []byte) error { func (s SparseVectorProperty) MarshalJSON() ([]byte, error) { type innerSparseVectorProperty SparseVectorProperty tmp := innerSparseVectorProperty{ - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Properties: s.Properties, - Type: s.Type, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "sparse_vector" @@ -756,10 +788,20 @@ func (s SparseVectorProperty) MarshalJSON() ([]byte, error) { // NewSparseVectorProperty returns a SparseVectorProperty. func NewSparseVectorProperty() *SparseVectorProperty { r := &SparseVectorProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type SparseVectorPropertyVariant interface { + SparseVectorPropertyCaster() *SparseVectorProperty +} + +func (s *SparseVectorProperty) SparseVectorPropertyCaster() *SparseVectorProperty { + return s +} diff --git a/typedapi/types/sparsevectorquery.go b/typedapi/types/sparsevectorquery.go index 70800cada3..9ae624ac76 100644 --- a/typedapi/types/sparsevectorquery.go +++ b/typedapi/types/sparsevectorquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,8 +31,9 @@ import ( // SparseVectorQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/SparseVectorQuery.ts#L26-L80 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/SparseVectorQuery.ts#L26-L80 type SparseVectorQuery struct { + AdditionalSparseVectorQueryProperty map[string]json.RawMessage `json:"-"` // Boost Floating point number used to decrease or increase the relevance scores of // the query. // Boost values are relative to the default value of 1.0. @@ -164,16 +165,69 @@ func (s *SparseVectorQuery) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "QueryVector", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalSparseVectorQueryProperty == nil { + s.AdditionalSparseVectorQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalSparseVectorQueryProperty", err) + } + s.AdditionalSparseVectorQueryProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s SparseVectorQuery) MarshalJSON() ([]byte, error) { + type opt SparseVectorQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalSparseVectorQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalSparseVectorQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewSparseVectorQuery returns a SparseVectorQuery. func NewSparseVectorQuery() *SparseVectorQuery { r := &SparseVectorQuery{ - QueryVector: make(map[string]float32, 0), + AdditionalSparseVectorQueryProperty: make(map[string]json.RawMessage), + QueryVector: make(map[string]float32), } return r } + +// true + +type SparseVectorQueryVariant interface { + SparseVectorQueryCaster() *SparseVectorQuery +} + +func (s *SparseVectorQuery) SparseVectorQueryCaster() *SparseVectorQuery { + return s +} diff --git a/typedapi/types/splitprocessor.go b/typedapi/types/splitprocessor.go index 73879faaf7..ba31f330f1 100644 --- a/typedapi/types/splitprocessor.go +++ b/typedapi/types/splitprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SplitProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1456-L1481 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1497-L1522 type SplitProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type SplitProcessor struct { // Field The field to split. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without @@ -92,16 +92,9 @@ func (s *SplitProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -190,3 +183,13 @@ func NewSplitProcessor() *SplitProcessor { return r } + +// true + +type SplitProcessorVariant interface { + SplitProcessorCaster() *SplitProcessor +} + +func (s *SplitProcessor) SplitProcessorCaster() *SplitProcessor { + return s +} diff --git a/typedapi/types/sql.go b/typedapi/types/sql.go index 1bd18a2a63..b23673f0e3 100644 --- a/typedapi/types/sql.go +++ b/typedapi/types/sql.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Sql type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L384-L387 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L392-L395 type Sql struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -106,9 +106,11 @@ func (s *Sql) UnmarshalJSON(data []byte) error { // NewSql returns a Sql. func NewSql() *Sql { r := &Sql{ - Features: make(map[string]int, 0), - Queries: make(map[string]XpackQuery, 0), + Features: make(map[string]int), + Queries: make(map[string]XpackQuery), } return r } + +// false diff --git a/typedapi/types/ssl.go b/typedapi/types/ssl.go index d4f953facd..7ad4142d37 100644 --- a/typedapi/types/ssl.go +++ b/typedapi/types/ssl.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Ssl type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L389-L392 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L397-L400 type Ssl struct { Http FeatureToggle `json:"http"` Transport FeatureToggle `json:"transport"` @@ -34,3 +34,5 @@ func NewSsl() *Ssl { return r } + +// false diff --git a/typedapi/types/stagnatingbackingindices.go b/typedapi/types/stagnatingbackingindices.go index ad1623f6d8..b251dae691 100644 --- a/typedapi/types/stagnatingbackingindices.go +++ b/typedapi/types/stagnatingbackingindices.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // StagnatingBackingIndices type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/health_report/types.ts#L157-L161 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/health_report/types.ts#L158-L162 type StagnatingBackingIndices struct { FirstOccurrenceTimestamp int64 `json:"first_occurrence_timestamp"` IndexName string `json:"index_name"` @@ -100,3 +100,5 @@ func NewStagnatingBackingIndices() *StagnatingBackingIndices { return r } + +// false diff --git a/typedapi/types/standardanalyzer.go b/typedapi/types/standardanalyzer.go index 89940714c6..60c6eb22ae 100644 --- a/typedapi/types/standardanalyzer.go +++ b/typedapi/types/standardanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,11 +31,19 @@ import ( // StandardAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L341-L345 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L382-L402 type StandardAnalyzer struct { - MaxTokenLength *int `json:"max_token_length,omitempty"` - Stopwords []string `json:"stopwords,omitempty"` - Type string `json:"type,omitempty"` + // MaxTokenLength The maximum token length. If a token is seen that exceeds this length then it + // is split at `max_token_length` intervals. + // Defaults to `255`. + MaxTokenLength *int `json:"max_token_length,omitempty"` + // Stopwords A pre-defined stop words list like `_english_` or an array containing a list + // of stop words. + // Defaults to `_none_`. + Stopwords []string `json:"stopwords,omitempty"` + // StopwordsPath The path to a file containing stop words. + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` } func (s *StandardAnalyzer) UnmarshalJSON(data []byte) error { @@ -85,6 +93,18 @@ func (s *StandardAnalyzer) UnmarshalJSON(data []byte) error { } } + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -101,6 +121,7 @@ func (s StandardAnalyzer) MarshalJSON() ([]byte, error) { tmp := innerStandardAnalyzer{ MaxTokenLength: s.MaxTokenLength, Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, Type: s.Type, } @@ -115,3 +136,13 @@ func NewStandardAnalyzer() *StandardAnalyzer { return r } + +// true + +type StandardAnalyzerVariant interface { + StandardAnalyzerCaster() *StandardAnalyzer +} + +func (s *StandardAnalyzer) StandardAnalyzerCaster() *StandardAnalyzer { + return s +} diff --git a/typedapi/types/standarddeviationbounds.go b/typedapi/types/standarddeviationbounds.go index a53dfc333f..55aacd9f4b 100644 --- a/typedapi/types/standarddeviationbounds.go +++ b/typedapi/types/standarddeviationbounds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // StandardDeviationBounds type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L281-L288 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L281-L288 type StandardDeviationBounds struct { Lower *Float64 `json:"lower,omitempty"` LowerPopulation *Float64 `json:"lower_population,omitempty"` @@ -96,3 +96,5 @@ func NewStandardDeviationBounds() *StandardDeviationBounds { return r } + +// false diff --git a/typedapi/types/standarddeviationboundsasstring.go b/typedapi/types/standarddeviationboundsasstring.go index 43466517f2..e3356e61b0 100644 --- a/typedapi/types/standarddeviationboundsasstring.go +++ b/typedapi/types/standarddeviationboundsasstring.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // StandardDeviationBoundsAsString type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L290-L297 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L290-L297 type StandardDeviationBoundsAsString struct { Lower string `json:"lower"` LowerPopulation string `json:"lower_population"` @@ -139,3 +139,5 @@ func NewStandardDeviationBoundsAsString() *StandardDeviationBoundsAsString { return r } + +// false diff --git a/typedapi/types/standardretriever.go b/typedapi/types/standardretriever.go index 41eb23edb4..5e66b18d13 100644 --- a/typedapi/types/standardretriever.go +++ b/typedapi/types/standardretriever.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // StandardRetriever type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Retriever.ts#L51-L62 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Retriever.ts#L51-L62 type StandardRetriever struct { // Collapse Collapses the top documents by a specified key into a single top document per // key. @@ -156,3 +156,13 @@ func NewStandardRetriever() *StandardRetriever { return r } + +// true + +type StandardRetrieverVariant interface { + StandardRetrieverCaster() *StandardRetriever +} + +func (s *StandardRetriever) StandardRetrieverCaster() *StandardRetriever { + return s +} diff --git a/typedapi/types/standardtokenizer.go b/typedapi/types/standardtokenizer.go index a7a3c6eaaa..fea1beedd5 100644 --- a/typedapi/types/standardtokenizer.go +++ b/typedapi/types/standardtokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // StandardTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L121-L124 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L121-L124 type StandardTokenizer struct { MaxTokenLength *int `json:"max_token_length,omitempty"` Type string `json:"type,omitempty"` @@ -104,3 +104,13 @@ func NewStandardTokenizer() *StandardTokenizer { return r } + +// true + +type StandardTokenizerVariant interface { + StandardTokenizerCaster() *StandardTokenizer +} + +func (s *StandardTokenizer) StandardTokenizerCaster() *StandardTokenizer { + return s +} diff --git a/typedapi/types/statistics.go b/typedapi/types/statistics.go index 8c972a8d0a..e4e5385f64 100644 --- a/typedapi/types/statistics.go +++ b/typedapi/types/statistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Statistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/slm/_types/SnapshotLifecycle.ts#L51-L74 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/slm/_types/SnapshotLifecycle.ts#L61-L84 type Statistics struct { Policy *string `json:"policy,omitempty"` RetentionDeletionTime Duration `json:"retention_deletion_time,omitempty"` @@ -191,3 +191,5 @@ func NewStatistics() *Statistics { return r } + +// false diff --git a/typedapi/types/stats.go b/typedapi/types/stats.go index ac47ce61f4..68233c8494 100644 --- a/typedapi/types/stats.go +++ b/typedapi/types/stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // Stats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L30-L114 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L30-L114 type Stats struct { // AdaptiveSelection Statistics about adaptive replica selection. AdaptiveSelection map[string]AdaptiveSelection `json:"adaptive_selection,omitempty"` @@ -270,12 +270,14 @@ func (s *Stats) UnmarshalJSON(data []byte) error { // NewStats returns a Stats. func NewStats() *Stats { r := &Stats{ - AdaptiveSelection: make(map[string]AdaptiveSelection, 0), - Attributes: make(map[string]string, 0), - Breakers: make(map[string]Breaker, 0), - ScriptCache: make(map[string][]ScriptCache, 0), - ThreadPool: make(map[string]ThreadCount, 0), + AdaptiveSelection: make(map[string]AdaptiveSelection), + Attributes: make(map[string]string), + Breakers: make(map[string]Breaker), + ScriptCache: make(map[string][]ScriptCache), + ThreadPool: make(map[string]ThreadCount), } return r } + +// false diff --git a/typedapi/types/statsaggregate.go b/typedapi/types/statsaggregate.go index 1000edd14f..1df737cef4 100644 --- a/typedapi/types/statsaggregate.go +++ b/typedapi/types/statsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // StatsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L257-L273 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L257-L273 type StatsAggregate struct { Avg *Float64 `json:"avg,omitempty"` AvgAsString *string `json:"avg_as_string,omitempty"` @@ -170,3 +170,5 @@ func NewStatsAggregate() *StatsAggregate { return r } + +// false diff --git a/typedapi/types/statsaggregation.go b/typedapi/types/statsaggregation.go index 093e9bddab..be43ff6984 100644 --- a/typedapi/types/statsaggregation.go +++ b/typedapi/types/statsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // StatsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L291-L291 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L291-L291 type StatsAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -95,3 +95,13 @@ func NewStatsAggregation() *StatsAggregation { return r } + +// true + +type StatsAggregationVariant interface { + StatsAggregationCaster() *StatsAggregation +} + +func (s *StatsAggregation) StatsAggregationCaster() *StatsAggregation { + return s +} diff --git a/typedapi/types/statsbucketaggregate.go b/typedapi/types/statsbucketaggregate.go index 95135d1a19..917a2858fc 100644 --- a/typedapi/types/statsbucketaggregate.go +++ b/typedapi/types/statsbucketaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // StatsBucketAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L275-L279 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L275-L279 type StatsBucketAggregate struct { Avg *Float64 `json:"avg,omitempty"` AvgAsString *string `json:"avg_as_string,omitempty"` @@ -170,3 +170,5 @@ func NewStatsBucketAggregate() *StatsBucketAggregate { return r } + +// false diff --git a/typedapi/types/statsbucketaggregation.go b/typedapi/types/statsbucketaggregation.go index e4e742d57f..3c796c256d 100644 --- a/typedapi/types/statsbucketaggregation.go +++ b/typedapi/types/statsbucketaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // StatsBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L410-L410 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L410-L410 type StatsBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -93,3 +93,13 @@ func NewStatsBucketAggregation() *StatsBucketAggregation { return r } + +// true + +type StatsBucketAggregationVariant interface { + StatsBucketAggregationCaster() *StatsBucketAggregation +} + +func (s *StatsBucketAggregation) StatsBucketAggregationCaster() *StatsBucketAggregation { + return s +} diff --git a/typedapi/types/status.go b/typedapi/types/status.go index 44863673bd..5f167bbe17 100644 --- a/typedapi/types/status.go +++ b/typedapi/types/status.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,16 +31,28 @@ import ( // Status type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/snapshot/_types/SnapshotStatus.ts#L26-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/_types/SnapshotStatus.ts#L26-L60 type Status struct { + // IncludeGlobalState Indicates whether the current cluster state is included in the snapshot. IncludeGlobalState bool `json:"include_global_state"` Indices map[string]SnapshotIndexStats `json:"indices"` - Repository string `json:"repository"` - ShardsStats SnapshotShardsStats `json:"shards_stats"` - Snapshot string `json:"snapshot"` - State string `json:"state"` - Stats SnapshotStats `json:"stats"` - Uuid string `json:"uuid"` + // Repository The name of the repository that includes the snapshot. + Repository string `json:"repository"` + // ShardsStats Statistics for the shards in the snapshot. + ShardsStats SnapshotShardsStats `json:"shards_stats"` + // Snapshot The name of the snapshot. + Snapshot string `json:"snapshot"` + // State The current snapshot state: + // + // * `FAILED`: The snapshot finished with an error and failed to store any data. + // * `STARTED`: The snapshot is currently running. + // * `SUCCESS`: The snapshot completed. + State string `json:"state"` + // Stats Details about the number (`file_count`) and size (`size_in_bytes`) of files + // included in the snapshot. + Stats SnapshotStats `json:"stats"` + // Uuid The universally unique identifier (UUID) for the snapshot. + Uuid string `json:"uuid"` } func (s *Status) UnmarshalJSON(data []byte) error { @@ -139,8 +151,10 @@ func (s *Status) UnmarshalJSON(data []byte) error { // NewStatus returns a Status. func NewStatus() *Status { r := &Status{ - Indices: make(map[string]SnapshotIndexStats, 0), + Indices: make(map[string]SnapshotIndexStats), } return r } + +// false diff --git a/typedapi/types/forcemergeresponsebody.go b/typedapi/types/statuserror.go similarity index 57% rename from typedapi/types/forcemergeresponsebody.go rename to typedapi/types/statuserror.go index 8ae1fb18e3..f54a6f5e9b 100644 --- a/typedapi/types/forcemergeresponsebody.go +++ b/typedapi/types/statuserror.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,17 +29,15 @@ import ( "strconv" ) -// ForceMergeResponseBody type. +// StatusError type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/forcemerge/_types/response.ts#L22-L28 -type ForceMergeResponseBody struct { - Shards_ *ShardStatistics `json:"_shards,omitempty"` - // Task task contains a task id returned when wait_for_completion=false, - // you can use the task_id to get the status of the task at _tasks/ - Task *string `json:"task,omitempty"` +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_migrate_reindex_status/MigrateGetReindexStatusResponse.ts#L44-L47 +type StatusError struct { + Index string `json:"index"` + Message string `json:"message"` } -func (s *ForceMergeResponseBody) UnmarshalJSON(data []byte) error { +func (s *StatusError) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -54,31 +52,40 @@ func (s *ForceMergeResponseBody) UnmarshalJSON(data []byte) error { switch t { - case "_shards": - if err := dec.Decode(&s.Shards_); err != nil { - return fmt.Errorf("%s | %w", "Shards_", err) + case "index": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) } + s.Index = o - case "task": + case "message": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Task", err) + return fmt.Errorf("%s | %w", "Message", err) } o := string(tmp[:]) o, err = strconv.Unquote(o) if err != nil { o = string(tmp[:]) } - s.Task = &o + s.Message = o } } return nil } -// NewForceMergeResponseBody returns a ForceMergeResponseBody. -func NewForceMergeResponseBody() *ForceMergeResponseBody { - r := &ForceMergeResponseBody{} +// NewStatusError returns a StatusError. +func NewStatusError() *StatusError { + r := &StatusError{} return r } + +// false diff --git a/typedapi/types/frozenindices.go b/typedapi/types/statusinprogress.go similarity index 52% rename from typedapi/types/frozenindices.go rename to typedapi/types/statusinprogress.go index 5aa6428097..e1ccbae8d5 100644 --- a/typedapi/types/frozenindices.go +++ b/typedapi/types/statusinprogress.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,16 +29,16 @@ import ( "strconv" ) -// FrozenIndices type. +// StatusInProgress type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L358-L360 -type FrozenIndices struct { - Available bool `json:"available"` - Enabled bool `json:"enabled"` - IndicesCount int64 `json:"indices_count"` +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_migrate_reindex_status/MigrateGetReindexStatusResponse.ts#L38-L42 +type StatusInProgress struct { + Index string `json:"index"` + ReindexedDocCount int64 `json:"reindexed_doc_count"` + TotalDocCount int64 `json:"total_doc_count"` } -func (s *FrozenIndices) UnmarshalJSON(data []byte) error { +func (s *StatusInProgress) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -53,47 +53,46 @@ func (s *FrozenIndices) UnmarshalJSON(data []byte) error { switch t { - case "available": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "Available", err) - } - s.Available = value - case bool: - s.Available = v + case "index": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Index", err) } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Index = o - case "enabled": + case "reindexed_doc_count": var tmp any dec.Decode(&tmp) switch v := tmp.(type) { case string: - value, err := strconv.ParseBool(v) + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - return fmt.Errorf("%s | %w", "Enabled", err) + return fmt.Errorf("%s | %w", "ReindexedDocCount", err) } - s.Enabled = value - case bool: - s.Enabled = v + s.ReindexedDocCount = value + case float64: + f := int64(v) + s.ReindexedDocCount = f } - case "indices_count": + case "total_doc_count": var tmp any dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseInt(v, 10, 64) if err != nil { - return fmt.Errorf("%s | %w", "IndicesCount", err) + return fmt.Errorf("%s | %w", "TotalDocCount", err) } - s.IndicesCount = value + s.TotalDocCount = value case float64: f := int64(v) - s.IndicesCount = f + s.TotalDocCount = f } } @@ -101,9 +100,11 @@ func (s *FrozenIndices) UnmarshalJSON(data []byte) error { return nil } -// NewFrozenIndices returns a FrozenIndices. -func NewFrozenIndices() *FrozenIndices { - r := &FrozenIndices{} +// NewStatusInProgress returns a StatusInProgress. +func NewStatusInProgress() *StatusInProgress { + r := &StatusInProgress{} return r } + +// false diff --git a/typedapi/types/stemmeroverridetokenfilter.go b/typedapi/types/stemmeroverridetokenfilter.go index eacc5729fd..30c839b7fb 100644 --- a/typedapi/types/stemmeroverridetokenfilter.go +++ b/typedapi/types/stemmeroverridetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // StemmerOverrideTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L315-L319 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L315-L319 type StemmerOverrideTokenFilter struct { Rules []string `json:"rules,omitempty"` RulesPath *string `json:"rules_path,omitempty"` @@ -107,3 +107,13 @@ func NewStemmerOverrideTokenFilter() *StemmerOverrideTokenFilter { return r } + +// true + +type StemmerOverrideTokenFilterVariant interface { + StemmerOverrideTokenFilterCaster() *StemmerOverrideTokenFilter +} + +func (s *StemmerOverrideTokenFilter) StemmerOverrideTokenFilterCaster() *StemmerOverrideTokenFilter { + return s +} diff --git a/typedapi/types/stemmertokenfilter.go b/typedapi/types/stemmertokenfilter.go index 6126f47b92..645b55c6b0 100644 --- a/typedapi/types/stemmertokenfilter.go +++ b/typedapi/types/stemmertokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // StemmerTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L321-L325 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L321-L325 type StemmerTokenFilter struct { Language *string `json:"language,omitempty"` Type string `json:"type,omitempty"` @@ -100,3 +100,13 @@ func NewStemmerTokenFilter() *StemmerTokenFilter { return r } + +// true + +type StemmerTokenFilterVariant interface { + StemmerTokenFilterCaster() *StemmerTokenFilter +} + +func (s *StemmerTokenFilter) StemmerTokenFilterCaster() *StemmerTokenFilter { + return s +} diff --git a/typedapi/types/stepkey.go b/typedapi/types/stepkey.go index 231ad541a7..c1135b20f9 100644 --- a/typedapi/types/stepkey.go +++ b/typedapi/types/stepkey.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,11 +31,13 @@ import ( // StepKey type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/move_to_step/types.ts#L20-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/move_to_step/types.ts#L20-L31 type StepKey struct { + // Action The optional action to which the index will be moved. Action *string `json:"action,omitempty"` - Name *string `json:"name,omitempty"` - Phase string `json:"phase"` + // Name The optional step name to which the index will be moved. + Name *string `json:"name,omitempty"` + Phase string `json:"phase"` } func (s *StepKey) UnmarshalJSON(data []byte) error { @@ -100,3 +102,13 @@ func NewStepKey() *StepKey { return r } + +// true + +type StepKeyVariant interface { + StepKeyCaster() *StepKey +} + +func (s *StepKey) StepKeyCaster() *StepKey { + return s +} diff --git a/typedapi/types/stopanalyzer.go b/typedapi/types/stopanalyzer.go index c34041611c..a24c1d388c 100644 --- a/typedapi/types/stopanalyzer.go +++ b/typedapi/types/stopanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,12 +31,16 @@ import ( // StopAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L347-L352 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L404-L419 type StopAnalyzer struct { - Stopwords []string `json:"stopwords,omitempty"` - StopwordsPath *string `json:"stopwords_path,omitempty"` - Type string `json:"type,omitempty"` - Version *string `json:"version,omitempty"` + // Stopwords A pre-defined stop words list like `_english_` or an array containing a list + // of stop words. + // Defaults to `_none_`. + Stopwords []string `json:"stopwords,omitempty"` + // StopwordsPath The path to a file containing stop words. + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` } func (s *StopAnalyzer) UnmarshalJSON(data []byte) error { @@ -118,3 +122,13 @@ func NewStopAnalyzer() *StopAnalyzer { return r } + +// true + +type StopAnalyzerVariant interface { + StopAnalyzerCaster() *StopAnalyzer +} + +func (s *StopAnalyzer) StopAnalyzerCaster() *StopAnalyzer { + return s +} diff --git a/typedapi/types/stoptokenfilter.go b/typedapi/types/stoptokenfilter.go index 462e095c7f..4212bdac67 100644 --- a/typedapi/types/stoptokenfilter.go +++ b/typedapi/types/stoptokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // StopTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L96-L102 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L96-L102 type StopTokenFilter struct { IgnoreCase *bool `json:"ignore_case,omitempty"` RemoveTrailing *bool `json:"remove_trailing,omitempty"` @@ -150,3 +150,13 @@ func NewStopTokenFilter() *StopTokenFilter { return r } + +// true + +type StopTokenFilterVariant interface { + StopTokenFilterCaster() *StopTokenFilter +} + +func (s *StopTokenFilter) StopTokenFilterCaster() *StopTokenFilter { + return s +} diff --git a/typedapi/types/stopwords.go b/typedapi/types/stopwords.go index 95ca2ef4b8..0314451ed4 100644 --- a/typedapi/types/stopwords.go +++ b/typedapi/types/stopwords.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // StopWords type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/StopWords.ts#L20-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/StopWords.ts#L20-L26 type StopWords []string + +type StopWordsVariant interface { + StopWordsCaster() *StopWords +} diff --git a/typedapi/types/storage.go b/typedapi/types/storage.go index c2caebffd1..228e8261c3 100644 --- a/typedapi/types/storage.go +++ b/typedapi/types/storage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // Storage type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L509-L518 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L534-L543 type Storage struct { // AllowMmap You can restrict the use of the mmapfs and the related hybridfs store type // via the setting node.store.allow_mmap. @@ -91,3 +91,13 @@ func NewStorage() *Storage { return r } + +// true + +type StorageVariant interface { + StorageCaster() *Storage +} + +func (s *Storage) StorageCaster() *Storage { + return s +} diff --git a/typedapi/types/storedscript.go b/typedapi/types/storedscript.go index 831c7a7103..b236e84c0c 100644 --- a/typedapi/types/storedscript.go +++ b/typedapi/types/storedscript.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,12 +33,14 @@ import ( // StoredScript type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Scripting.ts#L47-L57 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Scripting.ts#L47-L59 type StoredScript struct { - // Lang Specifies the language the script is written in. + // Lang The language the script is written in. + // For serach templates, use `mustache`. Lang scriptlanguage.ScriptLanguage `json:"lang"` Options map[string]string `json:"options,omitempty"` // Source The script source. + // For search templates, an object containing the search template. Source string `json:"source"` } @@ -90,8 +92,18 @@ func (s *StoredScript) UnmarshalJSON(data []byte) error { // NewStoredScript returns a StoredScript. func NewStoredScript() *StoredScript { r := &StoredScript{ - Options: make(map[string]string, 0), + Options: make(map[string]string), } return r } + +// true + +type StoredScriptVariant interface { + StoredScriptCaster() *StoredScript +} + +func (s *StoredScript) StoredScriptCaster() *StoredScript { + return s +} diff --git a/typedapi/types/storestats.go b/typedapi/types/storestats.go index a7028cb5c0..fffc77e18f 100644 --- a/typedapi/types/storestats.go +++ b/typedapi/types/storestats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // StoreStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L368-L395 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L371-L398 type StoreStats struct { // Reserved A prediction of how much larger the shard stores will eventually grow due to // ongoing peer recoveries, restoring snapshots, and similar activities. @@ -140,3 +140,5 @@ func NewStoreStats() *StoreStats { return r } + +// false diff --git a/typedapi/types/streamresult.go b/typedapi/types/streamresult.go new file mode 100644 index 0000000000..da5b453fa6 --- /dev/null +++ b/typedapi/types/streamresult.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +// StreamResult type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Binary.ts#L27-L27 +type StreamResult []byte diff --git a/typedapi/types/stringifiedboolean.go b/typedapi/types/stringifiedboolean.go index 1b9a9f6e72..2938c73bde 100644 --- a/typedapi/types/stringifiedboolean.go +++ b/typedapi/types/stringifiedboolean.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // bool // string // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_spec_utils/Stringified.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_spec_utils/Stringified.ts#L20-L27 type Stringifiedboolean any + +type StringifiedbooleanVariant interface { + StringifiedbooleanCaster() *Stringifiedboolean +} diff --git a/typedapi/types/stringifieddouble.go b/typedapi/types/stringifieddouble.go index 11c1afeca0..30caf1051c 100644 --- a/typedapi/types/stringifieddouble.go +++ b/typedapi/types/stringifieddouble.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // Float64 // string // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_spec_utils/Stringified.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_spec_utils/Stringified.ts#L20-L27 type Stringifieddouble any diff --git a/typedapi/types/stringifiedepochtimeunitmillis.go b/typedapi/types/stringifiedepochtimeunitmillis.go index a1e4212b06..43f633f839 100644 --- a/typedapi/types/stringifiedepochtimeunitmillis.go +++ b/typedapi/types/stringifiedepochtimeunitmillis.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // int64 // string // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_spec_utils/Stringified.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_spec_utils/Stringified.ts#L20-L27 type StringifiedEpochTimeUnitMillis any + +type StringifiedEpochTimeUnitMillisVariant interface { + StringifiedEpochTimeUnitMillisCaster() *StringifiedEpochTimeUnitMillis +} diff --git a/typedapi/types/stringifiedepochtimeunitseconds.go b/typedapi/types/stringifiedepochtimeunitseconds.go index a0ec07e12d..84684dcd81 100644 --- a/typedapi/types/stringifiedepochtimeunitseconds.go +++ b/typedapi/types/stringifiedepochtimeunitseconds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // int64 // string // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_spec_utils/Stringified.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_spec_utils/Stringified.ts#L20-L27 type StringifiedEpochTimeUnitSeconds any diff --git a/typedapi/types/stringifiedinteger.go b/typedapi/types/stringifiedinteger.go index 827941ca22..9ff289e787 100644 --- a/typedapi/types/stringifiedinteger.go +++ b/typedapi/types/stringifiedinteger.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // int // string // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_spec_utils/Stringified.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_spec_utils/Stringified.ts#L20-L27 type Stringifiedinteger any + +type StringifiedintegerVariant interface { + StringifiedintegerCaster() *Stringifiedinteger +} diff --git a/typedapi/types/stringifiedversionnumber.go b/typedapi/types/stringifiedversionnumber.go index ee47eca1cf..0a244290ea 100644 --- a/typedapi/types/stringifiedversionnumber.go +++ b/typedapi/types/stringifiedversionnumber.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // int64 // string // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_spec_utils/Stringified.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_spec_utils/Stringified.ts#L20-L27 type StringifiedVersionNumber any diff --git a/typedapi/types/stringraretermsaggregate.go b/typedapi/types/stringraretermsaggregate.go index 7e02cbb3a2..bbf4fbf788 100644 --- a/typedapi/types/stringraretermsaggregate.go +++ b/typedapi/types/stringraretermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // StringRareTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L483-L487 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L483-L487 type StringRareTermsAggregate struct { Buckets BucketsStringRareTermsBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewStringRareTermsAggregate() *StringRareTermsAggregate { return r } + +// false diff --git a/typedapi/types/stringraretermsbucket.go b/typedapi/types/stringraretermsbucket.go index 3bdbefc1b3..43862c770b 100644 --- a/typedapi/types/stringraretermsbucket.go +++ b/typedapi/types/stringraretermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // StringRareTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L489-L491 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L489-L491 type StringRareTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -638,8 +638,10 @@ func (s StringRareTermsBucket) MarshalJSON() ([]byte, error) { // NewStringRareTermsBucket returns a StringRareTermsBucket. func NewStringRareTermsBucket() *StringRareTermsBucket { r := &StringRareTermsBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/stringstatsaggregate.go b/typedapi/types/stringstatsaggregate.go index 39edfc670f..bcdacaa699 100644 --- a/typedapi/types/stringstatsaggregate.go +++ b/typedapi/types/stringstatsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // StringStatsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L793-L804 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L793-L804 type StringStatsAggregate struct { AvgLength *Float64 `json:"avg_length,omitempty"` AvgLengthAsString *string `json:"avg_length_as_string,omitempty"` @@ -152,3 +152,5 @@ func NewStringStatsAggregate() *StringStatsAggregate { return r } + +// false diff --git a/typedapi/types/stringstatsaggregation.go b/typedapi/types/stringstatsaggregation.go index dfd788f32f..fbbddc91fe 100644 --- a/typedapi/types/stringstatsaggregation.go +++ b/typedapi/types/stringstatsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // StringStatsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L293-L299 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L293-L299 type StringStatsAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -98,3 +98,13 @@ func NewStringStatsAggregation() *StringStatsAggregation { return r } + +// true + +type StringStatsAggregationVariant interface { + StringStatsAggregationCaster() *StringStatsAggregation +} + +func (s *StringStatsAggregation) StringStatsAggregationCaster() *StringStatsAggregation { + return s +} diff --git a/typedapi/types/stringtermsaggregate.go b/typedapi/types/stringtermsaggregate.go index e3ba88e9ab..94baf9a7e7 100644 --- a/typedapi/types/stringtermsaggregate.go +++ b/typedapi/types/stringtermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // StringTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L424-L429 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L424-L429 type StringTermsAggregate struct { Buckets BucketsStringTermsBucket `json:"buckets"` DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` @@ -121,3 +121,5 @@ func NewStringTermsAggregate() *StringTermsAggregate { return r } + +// false diff --git a/typedapi/types/stringtermsbucket.go b/typedapi/types/stringtermsbucket.go index 24fc2903af..f85643abfa 100644 --- a/typedapi/types/stringtermsbucket.go +++ b/typedapi/types/stringtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // StringTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L435-L437 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L435-L437 type StringTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -647,8 +647,10 @@ func (s StringTermsBucket) MarshalJSON() ([]byte, error) { // NewStringTermsBucket returns a StringTermsBucket. func NewStringTermsBucket() *StringTermsBucket { r := &StringTermsBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/stupidbackoffsmoothingmodel.go b/typedapi/types/stupidbackoffsmoothingmodel.go index bad4ae736f..6a4edd9301 100644 --- a/typedapi/types/stupidbackoffsmoothingmodel.go +++ b/typedapi/types/stupidbackoffsmoothingmodel.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // StupidBackoffSmoothingModel type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L463-L468 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L463-L468 type StupidBackoffSmoothingModel struct { // Discount A constant factor that the lower order n-gram model is discounted by. Discount Float64 `json:"discount"` @@ -79,3 +79,13 @@ func NewStupidBackoffSmoothingModel() *StupidBackoffSmoothingModel { return r } + +// true + +type StupidBackoffSmoothingModelVariant interface { + StupidBackoffSmoothingModelCaster() *StupidBackoffSmoothingModel +} + +func (s *StupidBackoffSmoothingModel) StupidBackoffSmoothingModelCaster() *StupidBackoffSmoothingModel { + return s +} diff --git a/typedapi/types/suggest.go b/typedapi/types/suggest.go index f011745ac3..0b567a3780 100644 --- a/typedapi/types/suggest.go +++ b/typedapi/types/suggest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,5 +26,5 @@ package types // PhraseSuggest // TermSuggest // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L34-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L34-L40 type Suggest any diff --git a/typedapi/types/suggestcontext.go b/typedapi/types/suggestcontext.go index 215efd8a2b..2d0d40715a 100644 --- a/typedapi/types/suggestcontext.go +++ b/typedapi/types/suggestcontext.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SuggestContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/specialized.ts#L43-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/specialized.ts#L43-L48 type SuggestContext struct { Name string `json:"name"` Path *string `json:"path,omitempty"` @@ -99,3 +99,13 @@ func NewSuggestContext() *SuggestContext { return r } + +// true + +type SuggestContextVariant interface { + SuggestContextCaster() *SuggestContext +} + +func (s *SuggestContext) SuggestContextCaster() *SuggestContext { + return s +} diff --git a/typedapi/types/suggester.go b/typedapi/types/suggester.go index 9ae3e2c3d3..e201540949 100644 --- a/typedapi/types/suggester.go +++ b/typedapi/types/suggester.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Suggester type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L101-L107 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L101-L107 type Suggester struct { Suggesters map[string]FieldSuggester `json:"-"` // Text Global suggest text, to avoid repetition when the same text is used in @@ -116,8 +116,18 @@ func (s Suggester) MarshalJSON() ([]byte, error) { // NewSuggester returns a Suggester. func NewSuggester() *Suggester { r := &Suggester{ - Suggesters: make(map[string]FieldSuggester, 0), + Suggesters: make(map[string]FieldSuggester), } return r } + +// true + +type SuggesterVariant interface { + SuggesterCaster() *Suggester +} + +func (s *Suggester) SuggesterCaster() *Suggester { + return s +} diff --git a/typedapi/types/suggestfuzziness.go b/typedapi/types/suggestfuzziness.go index 322f16fff1..63d7e479c4 100644 --- a/typedapi/types/suggestfuzziness.go +++ b/typedapi/types/suggestfuzziness.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SuggestFuzziness type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L196-L224 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L196-L224 type SuggestFuzziness struct { // Fuzziness The fuzziness factor. Fuzziness Fuzziness `json:"fuzziness,omitempty"` @@ -138,3 +138,13 @@ func NewSuggestFuzziness() *SuggestFuzziness { return r } + +// true + +type SuggestFuzzinessVariant interface { + SuggestFuzzinessCaster() *SuggestFuzziness +} + +func (s *SuggestFuzziness) SuggestFuzzinessCaster() *SuggestFuzziness { + return s +} diff --git a/typedapi/types/sumaggregate.go b/typedapi/types/sumaggregate.go index 741000b40b..d069660677 100644 --- a/typedapi/types/sumaggregate.go +++ b/typedapi/types/sumaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SumAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L211-L216 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L211-L216 type SumAggregate struct { Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to @@ -89,3 +89,5 @@ func NewSumAggregate() *SumAggregate { return r } + +// false diff --git a/typedapi/types/sumaggregation.go b/typedapi/types/sumaggregation.go index 656fae2f1a..3869a1bfa9 100644 --- a/typedapi/types/sumaggregation.go +++ b/typedapi/types/sumaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SumAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L301-L301 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L301-L301 type SumAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -95,3 +95,13 @@ func NewSumAggregation() *SumAggregation { return r } + +// true + +type SumAggregationVariant interface { + SumAggregationCaster() *SumAggregation +} + +func (s *SumAggregation) SumAggregationCaster() *SumAggregation { + return s +} diff --git a/typedapi/types/sumbucketaggregation.go b/typedapi/types/sumbucketaggregation.go index 5f4d7f74d5..a8c719e823 100644 --- a/typedapi/types/sumbucketaggregation.go +++ b/typedapi/types/sumbucketaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // SumBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/pipeline.ts#L412-L415 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/pipeline.ts#L412-L415 type SumBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -93,3 +93,13 @@ func NewSumBucketAggregation() *SumBucketAggregation { return r } + +// true + +type SumBucketAggregationVariant interface { + SumBucketAggregationCaster() *SumBucketAggregation +} + +func (s *SumBucketAggregation) SumBucketAggregationCaster() *SumBucketAggregation { + return s +} diff --git a/typedapi/types/summary.go b/typedapi/types/summary.go index 528e027cfc..a0e0fc3133 100644 --- a/typedapi/types/summary.go +++ b/typedapi/types/summary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // Summary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/enrich/_types/Policy.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/enrich/_types/Policy.ts#L24-L26 type Summary struct { Config map[policytype.PolicyType]EnrichPolicy `json:"config"` } @@ -34,8 +34,10 @@ type Summary struct { // NewSummary returns a Summary. func NewSummary() *Summary { r := &Summary{ - Config: make(map[policytype.PolicyType]EnrichPolicy, 0), + Config: make(map[policytype.PolicyType]EnrichPolicy), } return r } + +// false diff --git a/typedapi/types/summaryinfo.go b/typedapi/types/summaryinfo.go new file mode 100644 index 0000000000..9ad4ba9e47 --- /dev/null +++ b/typedapi/types/summaryinfo.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +// SummaryInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L193-L202 +type SummaryInfo struct { + // Read A collection of statistics that summarise the results of the read operations + // in the test. + Read ReadSummaryInfo `json:"read"` + // Write A collection of statistics that summarise the results of the write operations + // in the test. + Write WriteSummaryInfo `json:"write"` +} + +// NewSummaryInfo returns a SummaryInfo. +func NewSummaryInfo() *SummaryInfo { + r := &SummaryInfo{} + + return r +} + +// false diff --git a/typedapi/types/swedishanalyzer.go b/typedapi/types/swedishanalyzer.go index 35be069499..af1f1d5ee7 100644 --- a/typedapi/types/swedishanalyzer.go +++ b/typedapi/types/swedishanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SwedishAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L292-L297 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L303-L308 type SwedishAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewSwedishAnalyzer() *SwedishAnalyzer { return r } + +// true + +type SwedishAnalyzerVariant interface { + SwedishAnalyzerCaster() *SwedishAnalyzer +} + +func (s *SwedishAnalyzer) SwedishAnalyzerCaster() *SwedishAnalyzer { + return s +} diff --git a/typedapi/types/synccontainer.go b/typedapi/types/synccontainer.go index 920249365a..12686bc9b0 100644 --- a/typedapi/types/synccontainer.go +++ b/typedapi/types/synccontainer.go @@ -16,22 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // SyncContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/_types/Transform.ts#L169-L175 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/_types/Transform.ts#L169-L175 type SyncContainer struct { + AdditionalSyncContainerProperty map[string]json.RawMessage `json:"-"` // Time Specifies that the transform uses a time field to synchronize the source and // destination indices. Time *TimeSync `json:"time,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s SyncContainer) MarshalJSON() ([]byte, error) { + type opt SyncContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalSyncContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalSyncContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewSyncContainer returns a SyncContainer. func NewSyncContainer() *SyncContainer { - r := &SyncContainer{} + r := &SyncContainer{ + AdditionalSyncContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type SyncContainerVariant interface { + SyncContainerCaster() *SyncContainer +} + +func (s *SyncContainer) SyncContainerCaster() *SyncContainer { + return s +} diff --git a/typedapi/types/syncjobconnectorreference.go b/typedapi/types/syncjobconnectorreference.go index 4dc225c464..85a61b1317 100644 --- a/typedapi/types/syncjobconnectorreference.go +++ b/typedapi/types/syncjobconnectorreference.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SyncJobConnectorReference type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/SyncJob.ts#L31-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/SyncJob.ts#L31-L40 type SyncJobConnectorReference struct { Configuration ConnectorConfiguration `json:"configuration"` Filtering FilteringRules `json:"filtering"` @@ -130,3 +130,5 @@ func NewSyncJobConnectorReference() *SyncJobConnectorReference { return r } + +// false diff --git a/typedapi/types/syncrulesfeature.go b/typedapi/types/syncrulesfeature.go index b612d5ca24..f1af4691a5 100644 --- a/typedapi/types/syncrulesfeature.go +++ b/typedapi/types/syncrulesfeature.go @@ -16,16 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // SyncRulesFeature type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L219-L222 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L219-L228 type SyncRulesFeature struct { + // Advanced Indicates whether advanced sync rules are enabled. Advanced *FeatureEnabled `json:"advanced,omitempty"` - Basic *FeatureEnabled `json:"basic,omitempty"` + // Basic Indicates whether basic sync rules are enabled. + Basic *FeatureEnabled `json:"basic,omitempty"` } // NewSyncRulesFeature returns a SyncRulesFeature. @@ -34,3 +36,13 @@ func NewSyncRulesFeature() *SyncRulesFeature { return r } + +// true + +type SyncRulesFeatureVariant interface { + SyncRulesFeatureCaster() *SyncRulesFeature +} + +func (s *SyncRulesFeature) SyncRulesFeatureCaster() *SyncRulesFeature { + return s +} diff --git a/typedapi/types/synonymgraphtokenfilter.go b/typedapi/types/synonymgraphtokenfilter.go index 1102013e74..c4a6ca1bde 100644 --- a/typedapi/types/synonymgraphtokenfilter.go +++ b/typedapi/types/synonymgraphtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // SynonymGraphTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L109-L119 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L109-L119 type SynonymGraphTokenFilter struct { Expand *bool `json:"expand,omitempty"` Format *synonymformat.SynonymFormat `json:"format,omitempty"` @@ -192,3 +192,13 @@ func NewSynonymGraphTokenFilter() *SynonymGraphTokenFilter { return r } + +// true + +type SynonymGraphTokenFilterVariant interface { + SynonymGraphTokenFilterCaster() *SynonymGraphTokenFilter +} + +func (s *SynonymGraphTokenFilter) SynonymGraphTokenFilterCaster() *SynonymGraphTokenFilter { + return s +} diff --git a/typedapi/types/synonymrule.go b/typedapi/types/synonymrule.go index 0f1a96c96c..27dde35f95 100644 --- a/typedapi/types/synonymrule.go +++ b/typedapi/types/synonymrule.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,12 +30,13 @@ import ( // SynonymRule type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/synonyms/_types/SynonymRule.ts#L26-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/synonyms/_types/SynonymRule.ts#L26-L37 type SynonymRule struct { - // Id Synonym Rule identifier + // Id The identifier for the synonym rule. + // If you do not specify a synonym rule ID when you create a rule, an identifier + // is created automatically by Elasticsearch. Id *string `json:"id,omitempty"` - // Synonyms Synonyms, in Solr format, that conform the synonym rule. See - // https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-synonym-graph-tokenfilter.html#_solr_synonyms_2 + // Synonyms The synonyms that conform the synonym rule in Solr format. Synonyms string `json:"synonyms"` } @@ -75,3 +76,13 @@ func NewSynonymRule() *SynonymRule { return r } + +// true + +type SynonymRuleVariant interface { + SynonymRuleCaster() *SynonymRule +} + +func (s *SynonymRule) SynonymRuleCaster() *SynonymRule { + return s +} diff --git a/typedapi/types/synonymruleread.go b/typedapi/types/synonymruleread.go index 63b8669029..6edad2b6e3 100644 --- a/typedapi/types/synonymruleread.go +++ b/typedapi/types/synonymruleread.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // SynonymRuleRead type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/synonyms/_types/SynonymRule.ts#L38-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/synonyms/_types/SynonymRule.ts#L40-L49 type SynonymRuleRead struct { // Id Synonym Rule identifier Id string `json:"id"` @@ -75,3 +75,5 @@ func NewSynonymRuleRead() *SynonymRuleRead { return r } + +// false diff --git a/typedapi/types/synonymssetitem.go b/typedapi/types/synonymssetitem.go index 5afe5a4b5f..0fc7d1457c 100644 --- a/typedapi/types/synonymssetitem.go +++ b/typedapi/types/synonymssetitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // SynonymsSetItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/synonyms/get_synonyms_sets/SynonymsSetsGetResponse.ts#L30-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/synonyms/get_synonyms_sets/SynonymsSetsGetResponse.ts#L36-L45 type SynonymsSetItem struct { // Count Number of synonym rules that the synonym set contains Count int `json:"count"` @@ -86,3 +86,5 @@ func NewSynonymsSetItem() *SynonymsSetItem { return r } + +// false diff --git a/typedapi/types/synonymtokenfilter.go b/typedapi/types/synonymtokenfilter.go index 6f3ef466a3..103c5873d6 100644 --- a/typedapi/types/synonymtokenfilter.go +++ b/typedapi/types/synonymtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // SynonymTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L121-L131 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L121-L131 type SynonymTokenFilter struct { Expand *bool `json:"expand,omitempty"` Format *synonymformat.SynonymFormat `json:"format,omitempty"` @@ -192,3 +192,13 @@ func NewSynonymTokenFilter() *SynonymTokenFilter { return r } + +// true + +type SynonymTokenFilterVariant interface { + SynonymTokenFilterCaster() *SynonymTokenFilter +} + +func (s *SynonymTokenFilter) SynonymTokenFilterCaster() *SynonymTokenFilter { + return s +} diff --git a/typedapi/types/tablevaluescontainer.go b/typedapi/types/tablevaluescontainer.go index ded90aa779..55545aa141 100644 --- a/typedapi/types/tablevaluescontainer.go +++ b/typedapi/types/tablevaluescontainer.go @@ -16,23 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // TableValuesContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/esql/_types/TableValuesContainer.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/esql/_types/TableValuesContainer.ts#L22-L28 type TableValuesContainer struct { - Float64 [][]Float64 `json:"double,omitempty"` - Int [][]int `json:"integer,omitempty"` - Int64 [][]int64 `json:"long,omitempty"` - Keyword [][]string `json:"keyword,omitempty"` + AdditionalTableValuesContainerProperty map[string]json.RawMessage `json:"-"` + Float64 [][]Float64 `json:"double,omitempty"` + Int [][]int `json:"integer,omitempty"` + Int64 [][]int64 `json:"long,omitempty"` + Keyword [][]string `json:"keyword,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s TableValuesContainer) MarshalJSON() ([]byte, error) { + type opt TableValuesContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalTableValuesContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalTableValuesContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewTableValuesContainer returns a TableValuesContainer. func NewTableValuesContainer() *TableValuesContainer { - r := &TableValuesContainer{} + r := &TableValuesContainer{ + AdditionalTableValuesContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type TableValuesContainerVariant interface { + TableValuesContainerCaster() *TableValuesContainer +} + +func (s *TableValuesContainer) TableValuesContainerCaster() *TableValuesContainer { + return s +} diff --git a/typedapi/types/tablevaluesintegervalue.go b/typedapi/types/tablevaluesintegervalue.go index 10e7f95e9e..8828f387ea 100644 --- a/typedapi/types/tablevaluesintegervalue.go +++ b/typedapi/types/tablevaluesintegervalue.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // TableValuesIntegerValue type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/esql/_types/TableValuesContainer.ts#L30-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/esql/_types/TableValuesContainer.ts#L30-L30 type TableValuesIntegerValue []int + +type TableValuesIntegerValueVariant interface { + TableValuesIntegerValueCaster() *TableValuesIntegerValue +} diff --git a/typedapi/types/tablevalueskeywordvalue.go b/typedapi/types/tablevalueskeywordvalue.go index f8b1767566..24cf18fa6b 100644 --- a/typedapi/types/tablevalueskeywordvalue.go +++ b/typedapi/types/tablevalueskeywordvalue.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // TableValuesKeywordValue type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/esql/_types/TableValuesContainer.ts#L31-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/esql/_types/TableValuesContainer.ts#L31-L31 type TableValuesKeywordValue []string + +type TableValuesKeywordValueVariant interface { + TableValuesKeywordValueCaster() *TableValuesKeywordValue +} diff --git a/typedapi/types/tablevalueslongdouble.go b/typedapi/types/tablevalueslongdouble.go index bcb0d014f7..9a87856dd1 100644 --- a/typedapi/types/tablevalueslongdouble.go +++ b/typedapi/types/tablevalueslongdouble.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // TableValuesLongDouble type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/esql/_types/TableValuesContainer.ts#L33-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/esql/_types/TableValuesContainer.ts#L33-L33 type TableValuesLongDouble []Float64 + +type TableValuesLongDoubleVariant interface { + TableValuesLongDoubleCaster() *TableValuesLongDouble +} diff --git a/typedapi/types/tablevalueslongvalue.go b/typedapi/types/tablevalueslongvalue.go index c40744a0db..4ba7205aa0 100644 --- a/typedapi/types/tablevalueslongvalue.go +++ b/typedapi/types/tablevalueslongvalue.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // TableValuesLongValue type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/esql/_types/TableValuesContainer.ts#L32-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/esql/_types/TableValuesContainer.ts#L32-L32 type TableValuesLongValue []int64 + +type TableValuesLongValueVariant interface { + TableValuesLongValueCaster() *TableValuesLongValue +} diff --git a/typedapi/types/targetmeanencodingpreprocessor.go b/typedapi/types/targetmeanencodingpreprocessor.go index b8799a3321..693dd2ae2e 100644 --- a/typedapi/types/targetmeanencodingpreprocessor.go +++ b/typedapi/types/targetmeanencodingpreprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TargetMeanEncodingPreprocessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model/types.ts#L49-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model/types.ts#L49-L54 type TargetMeanEncodingPreprocessor struct { DefaultValue Float64 `json:"default_value"` FeatureName string `json:"feature_name"` @@ -110,8 +110,18 @@ func (s *TargetMeanEncodingPreprocessor) UnmarshalJSON(data []byte) error { // NewTargetMeanEncodingPreprocessor returns a TargetMeanEncodingPreprocessor. func NewTargetMeanEncodingPreprocessor() *TargetMeanEncodingPreprocessor { r := &TargetMeanEncodingPreprocessor{ - TargetMap: make(map[string]Float64, 0), + TargetMap: make(map[string]Float64), } return r } + +// true + +type TargetMeanEncodingPreprocessorVariant interface { + TargetMeanEncodingPreprocessorCaster() *TargetMeanEncodingPreprocessor +} + +func (s *TargetMeanEncodingPreprocessor) TargetMeanEncodingPreprocessorCaster() *TargetMeanEncodingPreprocessor { + return s +} diff --git a/typedapi/types/taskfailure.go b/typedapi/types/taskfailure.go index 72a83fcbef..6fcc4d912f 100644 --- a/typedapi/types/taskfailure.go +++ b/typedapi/types/taskfailure.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TaskFailure type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Errors.ts#L68-L73 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Errors.ts#L67-L72 type TaskFailure struct { NodeId string `json:"node_id"` Reason ErrorCause `json:"reason"` @@ -102,3 +102,5 @@ func NewTaskFailure() *TaskFailure { return r } + +// false diff --git a/typedapi/types/taskid.go b/typedapi/types/taskid.go index 9d57e6146e..b09c6e48b3 100644 --- a/typedapi/types/taskid.go +++ b/typedapi/types/taskid.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // string // int // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L133-L133 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L129-L129 type TaskId any diff --git a/typedapi/types/taskinfo.go b/typedapi/types/taskinfo.go index 3f882cff84..38fccd4ad5 100644 --- a/typedapi/types/taskinfo.go +++ b/typedapi/types/taskinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,11 +31,21 @@ import ( // TaskInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/tasks/_types/TaskInfo.ts#L32-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/tasks/_types/TaskInfo.ts#L32-L58 type TaskInfo struct { - Action string `json:"action"` - Cancellable bool `json:"cancellable"` - Cancelled *bool `json:"cancelled,omitempty"` + Action string `json:"action"` + Cancellable bool `json:"cancellable"` + Cancelled *bool `json:"cancelled,omitempty"` + // Description Human readable text that identifies the particular request that the task is + // performing. + // For example, it might identify the search request being performed by a search + // task. + // Other kinds of tasks have different descriptions, like `_reindex` which has + // the source and the destination, or `_bulk` which just has the number of + // requests and the destination indices. + // Many requests will have only an empty description because more detailed + // information about the request is not easily available or particularly helpful + // in identifying the request. Description *string `json:"description,omitempty"` Headers map[string]string `json:"headers"` Id int64 `json:"id"` @@ -44,7 +54,13 @@ type TaskInfo struct { RunningTime Duration `json:"running_time,omitempty"` RunningTimeInNanos int64 `json:"running_time_in_nanos"` StartTimeInMillis int64 `json:"start_time_in_millis"` - // Status Task status information can vary wildly from task to task. + // Status The internal status of the task, which varies from task to task. + // The format also varies. + // While the goal is to keep the status for a particular task consistent from + // version to version, this is not always possible because sometimes the + // implementation changes. + // Fields might be removed from the status for a particular request so any + // parsing you do of the status might break in minor releases. Status json.RawMessage `json:"status,omitempty"` Type string `json:"type"` } @@ -189,8 +205,10 @@ func (s *TaskInfo) UnmarshalJSON(data []byte) error { // NewTaskInfo returns a TaskInfo. func NewTaskInfo() *TaskInfo { r := &TaskInfo{ - Headers: make(map[string]string, 0), + Headers: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/taskinfos.go b/typedapi/types/taskinfos.go index 1c97dccadb..c3f87e1330 100644 --- a/typedapi/types/taskinfos.go +++ b/typedapi/types/taskinfos.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // []TaskInfo // map[string]ParentTaskInfo // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/tasks/_types/TaskListResponseBase.ts#L40-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/tasks/_types/TaskListResponseBase.ts#L40-L43 type TaskInfos any diff --git a/typedapi/types/tasksrecord.go b/typedapi/types/tasksrecord.go index 8cba1ccc01..6b8521a290 100644 --- a/typedapi/types/tasksrecord.go +++ b/typedapi/types/tasksrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TasksRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/tasks/types.ts#L22-L101 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/tasks/types.ts#L22-L101 type TasksRecord struct { // Action The task action. Action *string `json:"action,omitempty"` @@ -257,3 +257,5 @@ func NewTasksRecord() *TasksRecord { return r } + +// false diff --git a/typedapi/types/tdigest.go b/typedapi/types/tdigest.go index 1067f89927..e075156ba0 100644 --- a/typedapi/types/tdigest.go +++ b/typedapi/types/tdigest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TDigest type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L232-L237 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L232-L237 type TDigest struct { // Compression Limits the maximum number of nodes used by the underlying TDigest algorithm // to `20 * compression`, enabling control of memory usage and approximation @@ -81,3 +81,13 @@ func NewTDigest() *TDigest { return r } + +// true + +type TDigestVariant interface { + TDigestCaster() *TDigest +} + +func (s *TDigest) TDigestCaster() *TDigest { + return s +} diff --git a/typedapi/types/tdigestpercentileranksaggregate.go b/typedapi/types/tdigestpercentileranksaggregate.go index aa744eada0..db698de866 100644 --- a/typedapi/types/tdigestpercentileranksaggregate.go +++ b/typedapi/types/tdigestpercentileranksaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TDigestPercentileRanksAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L177-L178 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L177-L178 type TDigestPercentileRanksAggregate struct { Meta Metadata `json:"meta,omitempty"` Values Percentiles `json:"values"` @@ -64,7 +64,7 @@ func (s *TDigestPercentileRanksAggregate) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(source) switch rawMsg[0] { case '{': - o := make(KeyedPercentiles, 0) + o := make(map[string]string, 0) if err := localDec.Decode(&o); err != nil { return fmt.Errorf("%s | %w", "Values", err) } @@ -88,3 +88,5 @@ func NewTDigestPercentileRanksAggregate() *TDigestPercentileRanksAggregate { return r } + +// false diff --git a/typedapi/types/tdigestpercentilesaggregate.go b/typedapi/types/tdigestpercentilesaggregate.go index ec196af8cc..fe65853fca 100644 --- a/typedapi/types/tdigestpercentilesaggregate.go +++ b/typedapi/types/tdigestpercentilesaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TDigestPercentilesAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L174-L175 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L174-L175 type TDigestPercentilesAggregate struct { Meta Metadata `json:"meta,omitempty"` Values Percentiles `json:"values"` @@ -64,7 +64,7 @@ func (s *TDigestPercentilesAggregate) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(source) switch rawMsg[0] { case '{': - o := make(KeyedPercentiles, 0) + o := make(map[string]string, 0) if err := localDec.Decode(&o); err != nil { return fmt.Errorf("%s | %w", "Values", err) } @@ -88,3 +88,5 @@ func NewTDigestPercentilesAggregate() *TDigestPercentilesAggregate { return r } + +// false diff --git a/typedapi/types/template.go b/typedapi/types/template.go index a450ed61bc..53b4d64e47 100644 --- a/typedapi/types/template.go +++ b/typedapi/types/template.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // Template type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L33-L37 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L33-L37 type Template struct { Aliases map[string]Alias `json:"aliases"` Mappings TypeMapping `json:"mappings"` @@ -32,8 +32,10 @@ type Template struct { // NewTemplate returns a Template. func NewTemplate() *Template { r := &Template{ - Aliases: make(map[string]Alias, 0), + Aliases: make(map[string]Alias), } return r } + +// false diff --git a/typedapi/types/templateconfig.go b/typedapi/types/templateconfig.go index 09c1aa2132..5cab252ec7 100644 --- a/typedapi/types/templateconfig.go +++ b/typedapi/types/templateconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,12 +31,12 @@ import ( // TemplateConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/msearch_template/types.ts#L28-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/msearch_template/types.ts#L28-L54 type TemplateConfig struct { // Explain If `true`, returns detailed information about score calculation as part of // each hit. Explain *bool `json:"explain,omitempty"` - // Id ID of the search template to use. If no source is specified, + // Id The ID of the search template to use. If no `source` is specified, // this parameter is required. Id *string `json:"id,omitempty"` // Params Key-value pairs used to replace Mustache variables in the template. @@ -46,7 +46,8 @@ type TemplateConfig struct { // Profile If `true`, the query execution is profiled. Profile *bool `json:"profile,omitempty"` // Source An inline search template. Supports the same parameters as the search API's - // request body. Also supports Mustache variables. If no id is specified, this + // request body. It also supports Mustache variables. If no `id` is specified, + // this // parameter is required. Source *string `json:"source,omitempty"` } @@ -127,8 +128,18 @@ func (s *TemplateConfig) UnmarshalJSON(data []byte) error { // NewTemplateConfig returns a TemplateConfig. func NewTemplateConfig() *TemplateConfig { r := &TemplateConfig{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type TemplateConfigVariant interface { + TemplateConfigCaster() *TemplateConfig +} + +func (s *TemplateConfig) TemplateConfigCaster() *TemplateConfig { + return s +} diff --git a/typedapi/types/templatemapping.go b/typedapi/types/templatemapping.go index 1508ac7bfe..ad76213bed 100644 --- a/typedapi/types/templatemapping.go +++ b/typedapi/types/templatemapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TemplateMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/TemplateMapping.ts#L27-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/TemplateMapping.ts#L27-L34 type TemplateMapping struct { Aliases map[string]Alias `json:"aliases"` IndexPatterns []string `json:"index_patterns"` @@ -111,9 +111,11 @@ func (s *TemplateMapping) UnmarshalJSON(data []byte) error { // NewTemplateMapping returns a TemplateMapping. func NewTemplateMapping() *TemplateMapping { r := &TemplateMapping{ - Aliases: make(map[string]Alias, 0), - Settings: make(map[string]json.RawMessage, 0), + Aliases: make(map[string]Alias), + Settings: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/templatesrecord.go b/typedapi/types/templatesrecord.go index 7726b686e3..8f360442ac 100644 --- a/typedapi/types/templatesrecord.go +++ b/typedapi/types/templatesrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TemplatesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/templates/types.ts#L22-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/templates/types.ts#L22-L48 type TemplatesRecord struct { // ComposedOf The component templates that comprise the index template. ComposedOf *string `json:"composed_of,omitempty"` @@ -117,3 +117,5 @@ func NewTemplatesRecord() *TemplatesRecord { return r } + +// false diff --git a/typedapi/types/term.go b/typedapi/types/term.go index 10ef71a85e..640181698e 100644 --- a/typedapi/types/term.go +++ b/typedapi/types/term.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Term type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/termvectors/types.ts#L34-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/termvectors/types.ts#L34-L40 type Term struct { DocFreq *int `json:"doc_freq,omitempty"` Score *Float64 `json:"score,omitempty"` @@ -135,3 +135,5 @@ func NewTerm() *Term { return r } + +// false diff --git a/typedapi/types/terminateprocessor.go b/typedapi/types/terminateprocessor.go index 42ebcaa334..653dbd0677 100644 --- a/typedapi/types/terminateprocessor.go +++ b/typedapi/types/terminateprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,13 +31,13 @@ import ( // TerminateProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1483-L1483 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1524-L1524 type TerminateProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. Description *string `json:"description,omitempty"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // OnFailure Handle failures for the processor. @@ -75,16 +75,9 @@ func (s *TerminateProcessor) UnmarshalJSON(data []byte) error { s.Description = &o case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -128,3 +121,13 @@ func NewTerminateProcessor() *TerminateProcessor { return r } + +// true + +type TerminateProcessorVariant interface { + TerminateProcessorCaster() *TerminateProcessor +} + +func (s *TerminateProcessor) TerminateProcessorCaster() *TerminateProcessor { + return s +} diff --git a/typedapi/types/termquery.go b/typedapi/types/termquery.go index d986eb9d31..8231e8b451 100644 --- a/typedapi/types/termquery.go +++ b/typedapi/types/termquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TermQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L238-L255 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L238-L255 type TermQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -127,3 +127,13 @@ func NewTermQuery() *TermQuery { return r } + +// true + +type TermQueryVariant interface { + TermQueryCaster() *TermQuery +} + +func (s *TermQuery) TermQueryCaster() *TermQuery { + return s +} diff --git a/typedapi/types/termrangequery.go b/typedapi/types/termrangequery.go index 5602be74db..063b3a3505 100644 --- a/typedapi/types/termrangequery.go +++ b/typedapi/types/termrangequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // TermRangeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L174-L174 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L174-L174 type TermRangeQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -187,3 +187,13 @@ func NewTermRangeQuery() *TermRangeQuery { return r } + +// true + +type TermRangeQueryVariant interface { + TermRangeQueryCaster() *TermRangeQuery +} + +func (s *TermRangeQuery) TermRangeQueryCaster() *TermRangeQuery { + return s +} diff --git a/typedapi/types/termsaggregatebasedoubletermsbucket.go b/typedapi/types/termsaggregatebasedoubletermsbucket.go deleted file mode 100644 index b386b3a215..0000000000 --- a/typedapi/types/termsaggregatebasedoubletermsbucket.go +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// TermsAggregateBaseDoubleTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L417-L422 -type TermsAggregateBaseDoubleTermsBucket struct { - Buckets BucketsDoubleTermsBucket `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta Metadata `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` -} - -func (s *TermsAggregateBaseDoubleTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]DoubleTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []DoubleTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "doc_count_error_upper_bound": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) - } - s.DocCountErrorUpperBound = &value - case float64: - f := int64(v) - s.DocCountErrorUpperBound = &f - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - case "sum_other_doc_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "SumOtherDocCount", err) - } - s.SumOtherDocCount = &value - case float64: - f := int64(v) - s.SumOtherDocCount = &f - } - - } - } - return nil -} - -// NewTermsAggregateBaseDoubleTermsBucket returns a TermsAggregateBaseDoubleTermsBucket. -func NewTermsAggregateBaseDoubleTermsBucket() *TermsAggregateBaseDoubleTermsBucket { - r := &TermsAggregateBaseDoubleTermsBucket{} - - return r -} diff --git a/typedapi/types/termsaggregatebaselongtermsbucket.go b/typedapi/types/termsaggregatebaselongtermsbucket.go deleted file mode 100644 index 7ced53cb51..0000000000 --- a/typedapi/types/termsaggregatebaselongtermsbucket.go +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// TermsAggregateBaseLongTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L417-L422 -type TermsAggregateBaseLongTermsBucket struct { - Buckets BucketsLongTermsBucket `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta Metadata `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` -} - -func (s *TermsAggregateBaseLongTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]LongTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []LongTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "doc_count_error_upper_bound": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) - } - s.DocCountErrorUpperBound = &value - case float64: - f := int64(v) - s.DocCountErrorUpperBound = &f - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - case "sum_other_doc_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "SumOtherDocCount", err) - } - s.SumOtherDocCount = &value - case float64: - f := int64(v) - s.SumOtherDocCount = &f - } - - } - } - return nil -} - -// NewTermsAggregateBaseLongTermsBucket returns a TermsAggregateBaseLongTermsBucket. -func NewTermsAggregateBaseLongTermsBucket() *TermsAggregateBaseLongTermsBucket { - r := &TermsAggregateBaseLongTermsBucket{} - - return r -} diff --git a/typedapi/types/termsaggregatebasemultitermsbucket.go b/typedapi/types/termsaggregatebasemultitermsbucket.go deleted file mode 100644 index 7deae39f25..0000000000 --- a/typedapi/types/termsaggregatebasemultitermsbucket.go +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// TermsAggregateBaseMultiTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L417-L422 -type TermsAggregateBaseMultiTermsBucket struct { - Buckets BucketsMultiTermsBucket `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta Metadata `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` -} - -func (s *TermsAggregateBaseMultiTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]MultiTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []MultiTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "doc_count_error_upper_bound": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) - } - s.DocCountErrorUpperBound = &value - case float64: - f := int64(v) - s.DocCountErrorUpperBound = &f - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - case "sum_other_doc_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "SumOtherDocCount", err) - } - s.SumOtherDocCount = &value - case float64: - f := int64(v) - s.SumOtherDocCount = &f - } - - } - } - return nil -} - -// NewTermsAggregateBaseMultiTermsBucket returns a TermsAggregateBaseMultiTermsBucket. -func NewTermsAggregateBaseMultiTermsBucket() *TermsAggregateBaseMultiTermsBucket { - r := &TermsAggregateBaseMultiTermsBucket{} - - return r -} diff --git a/typedapi/types/termsaggregatebasestringtermsbucket.go b/typedapi/types/termsaggregatebasestringtermsbucket.go deleted file mode 100644 index 7b833efbc0..0000000000 --- a/typedapi/types/termsaggregatebasestringtermsbucket.go +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// TermsAggregateBaseStringTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L417-L422 -type TermsAggregateBaseStringTermsBucket struct { - Buckets BucketsStringTermsBucket `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta Metadata `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` -} - -func (s *TermsAggregateBaseStringTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]StringTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []StringTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "doc_count_error_upper_bound": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) - } - s.DocCountErrorUpperBound = &value - case float64: - f := int64(v) - s.DocCountErrorUpperBound = &f - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - case "sum_other_doc_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "SumOtherDocCount", err) - } - s.SumOtherDocCount = &value - case float64: - f := int64(v) - s.SumOtherDocCount = &f - } - - } - } - return nil -} - -// NewTermsAggregateBaseStringTermsBucket returns a TermsAggregateBaseStringTermsBucket. -func NewTermsAggregateBaseStringTermsBucket() *TermsAggregateBaseStringTermsBucket { - r := &TermsAggregateBaseStringTermsBucket{} - - return r -} diff --git a/typedapi/types/termsaggregatebasevoid.go b/typedapi/types/termsaggregatebasevoid.go deleted file mode 100644 index 3aa5b205ff..0000000000 --- a/typedapi/types/termsaggregatebasevoid.go +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// TermsAggregateBaseVoid type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L417-L422 -type TermsAggregateBaseVoid struct { - Buckets BucketsVoid `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta Metadata `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` -} - -func (s *TermsAggregateBaseVoid) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]any, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []any{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "doc_count_error_upper_bound": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) - } - s.DocCountErrorUpperBound = &value - case float64: - f := int64(v) - s.DocCountErrorUpperBound = &f - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - case "sum_other_doc_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "SumOtherDocCount", err) - } - s.SumOtherDocCount = &value - case float64: - f := int64(v) - s.SumOtherDocCount = &f - } - - } - } - return nil -} - -// NewTermsAggregateBaseVoid returns a TermsAggregateBaseVoid. -func NewTermsAggregateBaseVoid() *TermsAggregateBaseVoid { - r := &TermsAggregateBaseVoid{} - - return r -} diff --git a/typedapi/types/termsaggregation.go b/typedapi/types/termsaggregation.go index e8ef447c4c..852386d548 100644 --- a/typedapi/types/termsaggregation.go +++ b/typedapi/types/termsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -36,7 +36,7 @@ import ( // TermsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L963-L1031 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L963-L1031 type TermsAggregation struct { // CollectMode Determines how child aggregations should be calculated: breadth-first or // depth-first. @@ -326,3 +326,13 @@ func NewTermsAggregation() *TermsAggregation { return r } + +// true + +type TermsAggregationVariant interface { + TermsAggregationCaster() *TermsAggregation +} + +func (s *TermsAggregation) TermsAggregationCaster() *TermsAggregation { + return s +} diff --git a/typedapi/types/termsexclude.go b/typedapi/types/termsexclude.go index b947e38969..fd0718c8d8 100644 --- a/typedapi/types/termsexclude.go +++ b/typedapi/types/termsexclude.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // TermsExclude type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L1077-L1078 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L1077-L1078 type TermsExclude []string + +type TermsExcludeVariant interface { + TermsExcludeCaster() *TermsExclude +} diff --git a/typedapi/types/termsgrouping.go b/typedapi/types/termsgrouping.go index 268b504978..deabb80e88 100644 --- a/typedapi/types/termsgrouping.go +++ b/typedapi/types/termsgrouping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TermsGrouping type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/rollup/_types/Groupings.ts#L75-L82 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/rollup/_types/Groupings.ts#L75-L82 type TermsGrouping struct { // Fields The set of fields that you wish to collect terms for. // This array can contain fields that are both keyword and numerics. @@ -80,3 +80,13 @@ func NewTermsGrouping() *TermsGrouping { return r } + +// true + +type TermsGroupingVariant interface { + TermsGroupingCaster() *TermsGrouping +} + +func (s *TermsGrouping) TermsGroupingCaster() *TermsGrouping { + return s +} diff --git a/typedapi/types/termsinclude.go b/typedapi/types/termsinclude.go index 7f2cafca9d..e4d55d39cf 100644 --- a/typedapi/types/termsinclude.go +++ b/typedapi/types/termsinclude.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,5 +26,9 @@ package types // []string // TermsPartition // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L1074-L1075 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L1074-L1075 type TermsInclude any + +type TermsIncludeVariant interface { + TermsIncludeCaster() *TermsInclude +} diff --git a/typedapi/types/termslookup.go b/typedapi/types/termslookup.go index 764ccba5df..af83e9f74f 100644 --- a/typedapi/types/termslookup.go +++ b/typedapi/types/termslookup.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TermsLookup type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L270-L275 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L270-L275 type TermsLookup struct { Id string `json:"id"` Index string `json:"index"` @@ -84,3 +84,13 @@ func NewTermsLookup() *TermsLookup { return r } + +// true + +type TermsLookupVariant interface { + TermsLookupCaster() *TermsLookup +} + +func (s *TermsLookup) TermsLookupCaster() *TermsLookup { + return s +} diff --git a/typedapi/types/termspartition.go b/typedapi/types/termspartition.go index cabcf6fc22..44db05e6b2 100644 --- a/typedapi/types/termspartition.go +++ b/typedapi/types/termspartition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TermsPartition type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L1080-L1089 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L1080-L1089 type TermsPartition struct { // NumPartitions The number of partitions. NumPartitions int64 `json:"num_partitions"` @@ -95,3 +95,13 @@ func NewTermsPartition() *TermsPartition { return r } + +// true + +type TermsPartitionVariant interface { + TermsPartitionCaster() *TermsPartition +} + +func (s *TermsPartition) TermsPartitionCaster() *TermsPartition { + return s +} diff --git a/typedapi/types/termsquery.go b/typedapi/types/termsquery.go index 86c1b9f41c..0c4b042330 100644 --- a/typedapi/types/termsquery.go +++ b/typedapi/types/termsquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TermsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L257-L263 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L257-L263 type TermsQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -136,8 +136,18 @@ func (s TermsQuery) MarshalJSON() ([]byte, error) { // NewTermsQuery returns a TermsQuery. func NewTermsQuery() *TermsQuery { r := &TermsQuery{ - TermsQuery: make(map[string]TermsQueryField, 0), + TermsQuery: make(map[string]TermsQueryField), } return r } + +// true + +type TermsQueryVariant interface { + TermsQueryCaster() *TermsQuery +} + +func (s *TermsQuery) TermsQueryCaster() *TermsQuery { + return s +} diff --git a/typedapi/types/termsqueryfield.go b/typedapi/types/termsqueryfield.go index e01c10eb22..8b26a74804 100644 --- a/typedapi/types/termsqueryfield.go +++ b/typedapi/types/termsqueryfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // []FieldValue // TermsLookup // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L265-L268 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L265-L268 type TermsQueryField any + +type TermsQueryFieldVariant interface { + TermsQueryFieldCaster() *TermsQueryField +} diff --git a/typedapi/types/termssetquery.go b/typedapi/types/termssetquery.go index cbcd86d031..e78ed6dc40 100644 --- a/typedapi/types/termssetquery.go +++ b/typedapi/types/termssetquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TermsSetQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L277-L299 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L277-L299 type TermsSetQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -127,3 +127,13 @@ func NewTermsSetQuery() *TermsSetQuery { return r } + +// true + +type TermsSetQueryVariant interface { + TermsSetQueryCaster() *TermsSetQuery +} + +func (s *TermsSetQuery) TermsSetQueryCaster() *TermsSetQuery { + return s +} diff --git a/typedapi/types/termsuggest.go b/typedapi/types/termsuggest.go index f019948643..aa512c3cd1 100644 --- a/typedapi/types/termsuggest.go +++ b/typedapi/types/termsuggest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TermSuggest type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L64-L69 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L64-L69 type TermSuggest struct { Length int `json:"length"` Offset int `json:"offset"` @@ -125,3 +125,5 @@ func NewTermSuggest() *TermSuggest { return r } + +// false diff --git a/typedapi/types/termsuggester.go b/typedapi/types/termsuggester.go index c3ab072e0e..f9f690110c 100644 --- a/typedapi/types/termsuggester.go +++ b/typedapi/types/termsuggester.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -35,7 +35,7 @@ import ( // TermSuggester type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L506-L568 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L506-L568 type TermSuggester struct { // Analyzer The analyzer to analyze the suggest text with. // Defaults to the search analyzer of the suggest field. @@ -300,3 +300,13 @@ func NewTermSuggester() *TermSuggester { return r } + +// true + +type TermSuggesterVariant interface { + TermSuggesterCaster() *TermSuggester +} + +func (s *TermSuggester) TermSuggesterCaster() *TermSuggester { + return s +} diff --git a/typedapi/types/termsuggestoption.go b/typedapi/types/termsuggestoption.go index ebacc88f14..6415af95ef 100644 --- a/typedapi/types/termsuggestoption.go +++ b/typedapi/types/termsuggestoption.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TermSuggestOption type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/suggester.ts#L93-L99 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/suggester.ts#L93-L99 type TermSuggestOption struct { CollateMatch *bool `json:"collate_match,omitempty"` Freq int64 `json:"freq"` @@ -135,3 +135,5 @@ func NewTermSuggestOption() *TermSuggestOption { return r } + +// false diff --git a/typedapi/types/termvector.go b/typedapi/types/termvector.go index 046fed883c..0c10be7687 100644 --- a/typedapi/types/termvector.go +++ b/typedapi/types/termvector.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // TermVector type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/termvectors/types.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/termvectors/types.ts#L23-L26 type TermVector struct { FieldStatistics *FieldStatistics `json:"field_statistics,omitempty"` Terms map[string]Term `json:"terms"` @@ -31,8 +31,10 @@ type TermVector struct { // NewTermVector returns a TermVector. func NewTermVector() *TermVector { r := &TermVector{ - Terms: make(map[string]Term, 0), + Terms: make(map[string]Term), } return r } + +// false diff --git a/typedapi/types/termvectorsfilter.go b/typedapi/types/termvectorsfilter.go index 250dd7f523..4bb0abdefc 100644 --- a/typedapi/types/termvectorsfilter.go +++ b/typedapi/types/termvectorsfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,15 +31,15 @@ import ( // TermVectorsFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/termvectors/types.ts#L49-L86 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/termvectors/types.ts#L49-L86 type TermVectorsFilter struct { // MaxDocFreq Ignore words which occur in more than this many docs. // Defaults to unbounded. MaxDocFreq *int `json:"max_doc_freq,omitempty"` - // MaxNumTerms Maximum number of terms that must be returned per field. + // MaxNumTerms The maximum number of terms that must be returned per field. MaxNumTerms *int `json:"max_num_terms,omitempty"` // MaxTermFreq Ignore words with more than this frequency in the source doc. - // Defaults to unbounded. + // It defaults to unbounded. MaxTermFreq *int `json:"max_term_freq,omitempty"` // MaxWordLength The maximum word length above which words will be ignored. // Defaults to unbounded. @@ -190,3 +190,13 @@ func NewTermVectorsFilter() *TermVectorsFilter { return r } + +// true + +type TermVectorsFilterVariant interface { + TermVectorsFilterCaster() *TermVectorsFilter +} + +func (s *TermVectorsFilter) TermVectorsFilterCaster() *TermVectorsFilter { + return s +} diff --git a/typedapi/types/termvectorsresult.go b/typedapi/types/termvectorsresult.go index 8738a03854..de75fbc07a 100644 --- a/typedapi/types/termvectorsresult.go +++ b/typedapi/types/termvectorsresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TermVectorsResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/mtermvectors/types.ts#L96-L104 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/mtermvectors/types.ts#L96-L104 type TermVectorsResult struct { Error *ErrorCause `json:"error,omitempty"` Found *bool `json:"found,omitempty"` @@ -122,8 +122,10 @@ func (s *TermVectorsResult) UnmarshalJSON(data []byte) error { // NewTermVectorsResult returns a TermVectorsResult. func NewTermVectorsResult() *TermVectorsResult { r := &TermVectorsResult{ - TermVectors: make(map[string]TermVector, 0), + TermVectors: make(map[string]TermVector), } return r } + +// false diff --git a/typedapi/types/termvectorstoken.go b/typedapi/types/termvectorstoken.go index dfd5fd1ae9..1869d96a9a 100644 --- a/typedapi/types/termvectorstoken.go +++ b/typedapi/types/termvectorstoken.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TermVectorsToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/termvectors/types.ts#L42-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/termvectors/types.ts#L42-L47 type TermVectorsToken struct { EndOffset *int `json:"end_offset,omitempty"` Payload *string `json:"payload,omitempty"` @@ -125,3 +125,5 @@ func NewTermVectorsToken() *TermVectorsToken { return r } + +// false diff --git a/typedapi/types/testpopulation.go b/typedapi/types/testpopulation.go index f765749086..2fca9ab485 100644 --- a/typedapi/types/testpopulation.go +++ b/typedapi/types/testpopulation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TestPopulation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L319-L329 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L319-L329 type TestPopulation struct { // Field The field to aggregate. Field string `json:"field"` @@ -80,3 +80,13 @@ func NewTestPopulation() *TestPopulation { return r } + +// true + +type TestPopulationVariant interface { + TestPopulationCaster() *TestPopulation +} + +func (s *TestPopulation) TestPopulationCaster() *TestPopulation { + return s +} diff --git a/typedapi/types/textclassificationinferenceoptions.go b/typedapi/types/textclassificationinferenceoptions.go index a1ca40e5b0..d98a805d66 100644 --- a/typedapi/types/textclassificationinferenceoptions.go +++ b/typedapi/types/textclassificationinferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TextClassificationInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L189-L199 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L173-L183 type TextClassificationInferenceOptions struct { // ClassificationLabels Classification labels to apply other than the stored labels. Must have the // same deminsions as the default configured labels @@ -109,3 +109,13 @@ func NewTextClassificationInferenceOptions() *TextClassificationInferenceOptions return r } + +// true + +type TextClassificationInferenceOptionsVariant interface { + TextClassificationInferenceOptionsCaster() *TextClassificationInferenceOptions +} + +func (s *TextClassificationInferenceOptions) TextClassificationInferenceOptionsCaster() *TextClassificationInferenceOptions { + return s +} diff --git a/typedapi/types/textclassificationinferenceupdateoptions.go b/typedapi/types/textclassificationinferenceupdateoptions.go index f1aa526689..9426502de2 100644 --- a/typedapi/types/textclassificationinferenceupdateoptions.go +++ b/typedapi/types/textclassificationinferenceupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TextClassificationInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L363-L372 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L351-L360 type TextClassificationInferenceUpdateOptions struct { // ClassificationLabels Classification labels to apply other than the stored labels. Must have the // same deminsions as the default configured labels @@ -109,3 +109,13 @@ func NewTextClassificationInferenceUpdateOptions() *TextClassificationInferenceU return r } + +// true + +type TextClassificationInferenceUpdateOptionsVariant interface { + TextClassificationInferenceUpdateOptionsCaster() *TextClassificationInferenceUpdateOptions +} + +func (s *TextClassificationInferenceUpdateOptions) TextClassificationInferenceUpdateOptionsCaster() *TextClassificationInferenceUpdateOptions { + return s +} diff --git a/typedapi/types/textembedding.go b/typedapi/types/textembedding.go index ecc95e9aae..fe38ef5845 100644 --- a/typedapi/types/textembedding.go +++ b/typedapi/types/textembedding.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TextEmbedding type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Knn.ts#L79-L82 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Knn.ts#L94-L97 type TextEmbedding struct { ModelId string `json:"model_id"` ModelText string `json:"model_text"` @@ -87,3 +87,13 @@ func NewTextEmbedding() *TextEmbedding { return r } + +// true + +type TextEmbeddingVariant interface { + TextEmbeddingCaster() *TextEmbedding +} + +func (s *TextEmbedding) TextEmbeddingCaster() *TextEmbedding { + return s +} diff --git a/typedapi/types/textembeddingbyteresult.go b/typedapi/types/textembeddingbyteresult.go index 269ebdd286..651675eba2 100644 --- a/typedapi/types/textembeddingbyteresult.go +++ b/typedapi/types/textembeddingbyteresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TextEmbeddingByteResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/_types/Results.ts#L46-L51 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/_types/Results.ts#L53-L58 type TextEmbeddingByteResult struct { Embedding []byte `json:"embedding"` } @@ -66,3 +66,5 @@ func NewTextEmbeddingByteResult() *TextEmbeddingByteResult { return r } + +// false diff --git a/typedapi/types/textembeddinginferenceoptions.go b/typedapi/types/textembeddinginferenceoptions.go index e3db291284..88cd5c94e5 100644 --- a/typedapi/types/textembeddinginferenceoptions.go +++ b/typedapi/types/textembeddinginferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TextEmbeddingInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L237-L245 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L221-L231 type TextEmbeddingInferenceOptions struct { // EmbeddingSize The number of dimensions in the embedding output EmbeddingSize *int `json:"embedding_size,omitempty"` @@ -40,6 +40,7 @@ type TextEmbeddingInferenceOptions struct { ResultsField *string `json:"results_field,omitempty"` // Tokenization The tokenization options Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` + Vocabulary Vocabulary `json:"vocabulary"` } func (s *TextEmbeddingInferenceOptions) UnmarshalJSON(data []byte) error { @@ -90,6 +91,11 @@ func (s *TextEmbeddingInferenceOptions) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Tokenization", err) } + case "vocabulary": + if err := dec.Decode(&s.Vocabulary); err != nil { + return fmt.Errorf("%s | %w", "Vocabulary", err) + } + } } return nil @@ -101,3 +107,13 @@ func NewTextEmbeddingInferenceOptions() *TextEmbeddingInferenceOptions { return r } + +// true + +type TextEmbeddingInferenceOptionsVariant interface { + TextEmbeddingInferenceOptionsCaster() *TextEmbeddingInferenceOptions +} + +func (s *TextEmbeddingInferenceOptions) TextEmbeddingInferenceOptionsCaster() *TextEmbeddingInferenceOptions { + return s +} diff --git a/typedapi/types/textembeddinginferenceupdateoptions.go b/typedapi/types/textembeddinginferenceupdateoptions.go index bfeab4242d..92ce61fed7 100644 --- a/typedapi/types/textembeddinginferenceupdateoptions.go +++ b/typedapi/types/textembeddinginferenceupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TextEmbeddingInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L392-L396 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L380-L384 type TextEmbeddingInferenceUpdateOptions struct { // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. @@ -82,3 +82,13 @@ func NewTextEmbeddingInferenceUpdateOptions() *TextEmbeddingInferenceUpdateOptio return r } + +// true + +type TextEmbeddingInferenceUpdateOptionsVariant interface { + TextEmbeddingInferenceUpdateOptionsCaster() *TextEmbeddingInferenceUpdateOptions +} + +func (s *TextEmbeddingInferenceUpdateOptions) TextEmbeddingInferenceUpdateOptionsCaster() *TextEmbeddingInferenceUpdateOptions { + return s +} diff --git a/typedapi/types/textembeddingresult.go b/typedapi/types/textembeddingresult.go index a90555cf0d..78a5b666dc 100644 --- a/typedapi/types/textembeddingresult.go +++ b/typedapi/types/textembeddingresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TextEmbeddingResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/inference/_types/Results.ts#L53-L58 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/_types/Results.ts#L60-L65 type TextEmbeddingResult struct { Embedding []float32 `json:"embedding"` } @@ -66,3 +66,5 @@ func NewTextEmbeddingResult() *TextEmbeddingResult { return r } + +// false diff --git a/typedapi/types/textexpansioninferenceoptions.go b/typedapi/types/textexpansioninferenceoptions.go index a1d573c80c..f3332d102c 100644 --- a/typedapi/types/textexpansioninferenceoptions.go +++ b/typedapi/types/textexpansioninferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,13 +31,14 @@ import ( // TextExpansionInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L247-L253 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L233-L240 type TextExpansionInferenceOptions struct { // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. ResultsField *string `json:"results_field,omitempty"` // Tokenization The tokenization options Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` + Vocabulary Vocabulary `json:"vocabulary"` } func (s *TextExpansionInferenceOptions) UnmarshalJSON(data []byte) error { @@ -72,6 +73,11 @@ func (s *TextExpansionInferenceOptions) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Tokenization", err) } + case "vocabulary": + if err := dec.Decode(&s.Vocabulary); err != nil { + return fmt.Errorf("%s | %w", "Vocabulary", err) + } + } } return nil @@ -83,3 +89,13 @@ func NewTextExpansionInferenceOptions() *TextExpansionInferenceOptions { return r } + +// true + +type TextExpansionInferenceOptionsVariant interface { + TextExpansionInferenceOptionsCaster() *TextExpansionInferenceOptions +} + +func (s *TextExpansionInferenceOptions) TextExpansionInferenceOptionsCaster() *TextExpansionInferenceOptions { + return s +} diff --git a/typedapi/types/textexpansioninferenceupdateoptions.go b/typedapi/types/textexpansioninferenceupdateoptions.go index 5f4e6e3e63..7a2a07d8e3 100644 --- a/typedapi/types/textexpansioninferenceupdateoptions.go +++ b/typedapi/types/textexpansioninferenceupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TextExpansionInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L398-L402 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L386-L390 type TextExpansionInferenceUpdateOptions struct { // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. @@ -82,3 +82,13 @@ func NewTextExpansionInferenceUpdateOptions() *TextExpansionInferenceUpdateOptio return r } + +// true + +type TextExpansionInferenceUpdateOptionsVariant interface { + TextExpansionInferenceUpdateOptionsCaster() *TextExpansionInferenceUpdateOptions +} + +func (s *TextExpansionInferenceUpdateOptions) TextExpansionInferenceUpdateOptionsCaster() *TextExpansionInferenceUpdateOptions { + return s +} diff --git a/typedapi/types/textexpansionquery.go b/typedapi/types/textexpansionquery.go index ae3170087c..fb5a3451f8 100644 --- a/typedapi/types/textexpansionquery.go +++ b/typedapi/types/textexpansionquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TextExpansionQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/TextExpansionQuery.ts#L23-L36 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/TextExpansionQuery.ts#L23-L36 type TextExpansionQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -131,3 +131,13 @@ func NewTextExpansionQuery() *TextExpansionQuery { return r } + +// true + +type TextExpansionQueryVariant interface { + TextExpansionQueryCaster() *TextExpansionQuery +} + +func (s *TextExpansionQuery) TextExpansionQueryCaster() *TextExpansionQuery { + return s +} diff --git a/typedapi/types/textindexprefixes.go b/typedapi/types/textindexprefixes.go index c1940804c3..dd85eab3ac 100644 --- a/typedapi/types/textindexprefixes.go +++ b/typedapi/types/textindexprefixes.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TextIndexPrefixes type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L265-L268 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L281-L284 type TextIndexPrefixes struct { MaxChars int `json:"max_chars"` MinChars int `json:"min_chars"` @@ -95,3 +95,13 @@ func NewTextIndexPrefixes() *TextIndexPrefixes { return r } + +// true + +type TextIndexPrefixesVariant interface { + TextIndexPrefixesCaster() *TextIndexPrefixes +} + +func (s *TextIndexPrefixes) TextIndexPrefixesCaster() *TextIndexPrefixes { + return s +} diff --git a/typedapi/types/textproperty.go b/typedapi/types/textproperty.go index a651aa2a30..d6f09bf737 100644 --- a/typedapi/types/textproperty.go +++ b/typedapi/types/textproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption" ) // TextProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L270-L287 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L286-L303 type TextProperty struct { Analyzer *string `json:"analyzer,omitempty"` Boost *Float64 `json:"boost,omitempty"` @@ -51,16 +52,17 @@ type TextProperty struct { IndexPhrases *bool `json:"index_phrases,omitempty"` IndexPrefixes *TextIndexPrefixes `json:"index_prefixes,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Norms *bool `json:"norms,omitempty"` - PositionIncrementGap *int `json:"position_increment_gap,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - SearchAnalyzer *string `json:"search_analyzer,omitempty"` - SearchQuoteAnalyzer *string `json:"search_quote_analyzer,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - TermVector *termvectoroption.TermVectorOption `json:"term_vector,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Norms *bool `json:"norms,omitempty"` + PositionIncrementGap *int `json:"position_increment_gap,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SearchAnalyzer *string `json:"search_analyzer,omitempty"` + SearchQuoteAnalyzer *string `json:"search_quote_analyzer,omitempty"` + Similarity *string `json:"similarity,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TermVector *termvectoroption.TermVectorOption `json:"term_vector,omitempty"` + Type string `json:"type,omitempty"` } func (s *TextProperty) UnmarshalJSON(data []byte) error { @@ -179,301 +181,313 @@ func (s *TextProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -590,301 +604,313 @@ func (s *TextProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -940,6 +966,11 @@ func (s *TextProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "term_vector": if err := dec.Decode(&s.TermVector); err != nil { return fmt.Errorf("%s | %w", "TermVector", err) @@ -980,6 +1011,7 @@ func (s TextProperty) MarshalJSON() ([]byte, error) { SearchQuoteAnalyzer: s.SearchQuoteAnalyzer, Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TermVector: s.TermVector, Type: s.Type, } @@ -992,10 +1024,20 @@ func (s TextProperty) MarshalJSON() ([]byte, error) { // NewTextProperty returns a TextProperty. func NewTextProperty() *TextProperty { r := &TextProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type TextPropertyVariant interface { + TextPropertyCaster() *TextProperty +} + +func (s *TextProperty) TextPropertyCaster() *TextProperty { + return s +} diff --git a/typedapi/types/textsimilarityreranker.go b/typedapi/types/textsimilarityreranker.go index bc0c14ba48..203685d973 100644 --- a/typedapi/types/textsimilarityreranker.go +++ b/typedapi/types/textsimilarityreranker.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TextSimilarityReranker type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Retriever.ts#L88-L99 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Retriever.ts#L93-L104 type TextSimilarityReranker struct { // Field The document field to be used for text similarity comparisons. This field // should contain the text that will be evaluated against the inference_text @@ -50,7 +50,7 @@ type TextSimilarityReranker struct { RankWindowSize *int `json:"rank_window_size,omitempty"` // Retriever The nested retriever which will produce the first-level results, that will // later be used for reranking. - Retriever *RetrieverContainer `json:"retriever,omitempty"` + Retriever RetrieverContainer `json:"retriever"` } func (s *TextSimilarityReranker) UnmarshalJSON(data []byte) error { @@ -168,3 +168,13 @@ func NewTextSimilarityReranker() *TextSimilarityReranker { return r } + +// true + +type TextSimilarityRerankerVariant interface { + TextSimilarityRerankerCaster() *TextSimilarityReranker +} + +func (s *TextSimilarityReranker) TextSimilarityRerankerCaster() *TextSimilarityReranker { + return s +} diff --git a/typedapi/types/texttoanalyze.go b/typedapi/types/texttoanalyze.go index 7d2acc82fb..3f81081993 100644 --- a/typedapi/types/texttoanalyze.go +++ b/typedapi/types/texttoanalyze.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // TextToAnalyze type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/analyze/types.ts#L69-L69 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/analyze/types.ts#L69-L69 type TextToAnalyze []string + +type TextToAnalyzeVariant interface { + TextToAnalyzeCaster() *TextToAnalyze +} diff --git a/typedapi/types/thaianalyzer.go b/typedapi/types/thaianalyzer.go index 1425580773..6453c57b8e 100644 --- a/typedapi/types/thaianalyzer.go +++ b/typedapi/types/thaianalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ThaiAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L306-L310 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L317-L321 type ThaiAnalyzer struct { Stopwords []string `json:"stopwords,omitempty"` StopwordsPath *string `json:"stopwords_path,omitempty"` @@ -111,3 +111,13 @@ func NewThaiAnalyzer() *ThaiAnalyzer { return r } + +// true + +type ThaiAnalyzerVariant interface { + ThaiAnalyzerCaster() *ThaiAnalyzer +} + +func (s *ThaiAnalyzer) ThaiAnalyzerCaster() *ThaiAnalyzer { + return s +} diff --git a/typedapi/types/thaitokenizer.go b/typedapi/types/thaitokenizer.go index 94d75d5967..97fe89cb0f 100644 --- a/typedapi/types/thaitokenizer.go +++ b/typedapi/types/thaitokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // ThaiTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L126-L128 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L126-L128 type ThaiTokenizer struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewThaiTokenizer() *ThaiTokenizer { return r } + +// true + +type ThaiTokenizerVariant interface { + ThaiTokenizerCaster() *ThaiTokenizer +} + +func (s *ThaiTokenizer) ThaiTokenizerCaster() *ThaiTokenizer { + return s +} diff --git a/typedapi/types/threadcount.go b/typedapi/types/threadcount.go index f9184302cc..cfa79e5147 100644 --- a/typedapi/types/threadcount.go +++ b/typedapi/types/threadcount.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ThreadCount type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L1075-L1100 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L1075-L1100 type ThreadCount struct { // Active Number of active threads in the thread pool. Active *int64 `json:"active,omitempty"` @@ -163,3 +163,5 @@ func NewThreadCount() *ThreadCount { return r } + +// false diff --git a/typedapi/types/threadpoolrecord.go b/typedapi/types/threadpoolrecord.go index 10e902e94e..7d22e9a2d1 100644 --- a/typedapi/types/threadpoolrecord.go +++ b/typedapi/types/threadpoolrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ThreadPoolRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/thread_pool/types.ts#L22-L124 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/thread_pool/types.ts#L22-L124 type ThreadPoolRecord struct { // Active The number of active threads in the current thread pool. Active *string `json:"active,omitempty"` @@ -336,3 +336,5 @@ func NewThreadPoolRecord() *ThreadPoolRecord { return r } + +// false diff --git a/typedapi/types/throttlestate.go b/typedapi/types/throttlestate.go index c1fbd6a933..ae7f4e1141 100644 --- a/typedapi/types/throttlestate.go +++ b/typedapi/types/throttlestate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ThrottleState type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Action.ts#L120-L123 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Action.ts#L120-L123 type ThrottleState struct { Reason string `json:"reason"` Timestamp DateTime `json:"timestamp"` @@ -80,3 +80,13 @@ func NewThrottleState() *ThrottleState { return r } + +// true + +type ThrottleStateVariant interface { + ThrottleStateCaster() *ThrottleState +} + +func (s *ThrottleState) ThrottleStateCaster() *ThrottleState { + return s +} diff --git a/typedapi/types/timehttphistogram.go b/typedapi/types/timehttphistogram.go index 9adb660b84..360d4313fe 100644 --- a/typedapi/types/timehttphistogram.go +++ b/typedapi/types/timehttphistogram.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TimeHttpHistogram type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L708-L712 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L708-L712 type TimeHttpHistogram struct { Count int64 `json:"count"` GeMillis *int64 `json:"ge_millis,omitempty"` @@ -109,3 +109,5 @@ func NewTimeHttpHistogram() *TimeHttpHistogram { return r } + +// false diff --git a/typedapi/types/timeofmonth.go b/typedapi/types/timeofmonth.go index 2a4b1e358b..d8cddcb6d2 100644 --- a/typedapi/types/timeofmonth.go +++ b/typedapi/types/timeofmonth.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // TimeOfMonth type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Schedule.ts#L110-L113 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Schedule.ts#L111-L114 type TimeOfMonth struct { At []string `json:"at"` On []int `json:"on"` @@ -34,3 +34,13 @@ func NewTimeOfMonth() *TimeOfMonth { return r } + +// true + +type TimeOfMonthVariant interface { + TimeOfMonthCaster() *TimeOfMonth +} + +func (s *TimeOfMonth) TimeOfMonthCaster() *TimeOfMonth { + return s +} diff --git a/typedapi/types/timeofweek.go b/typedapi/types/timeofweek.go index 7fa8b73bd9..b1535da4fe 100644 --- a/typedapi/types/timeofweek.go +++ b/typedapi/types/timeofweek.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // TimeOfWeek type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Schedule.ts#L115-L118 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Schedule.ts#L116-L119 type TimeOfWeek struct { At []string `json:"at"` On []day.Day `json:"on"` @@ -38,3 +38,13 @@ func NewTimeOfWeek() *TimeOfWeek { return r } + +// true + +type TimeOfWeekVariant interface { + TimeOfWeekCaster() *TimeOfWeek +} + +func (s *TimeOfWeek) TimeOfWeekCaster() *TimeOfWeek { + return s +} diff --git a/typedapi/types/timeofyear.go b/typedapi/types/timeofyear.go index 89b4308341..8a47fce155 100644 --- a/typedapi/types/timeofyear.go +++ b/typedapi/types/timeofyear.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // TimeOfYear type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Schedule.ts#L120-L124 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Schedule.ts#L121-L125 type TimeOfYear struct { At []string `json:"at"` Int []month.Month `json:"int"` @@ -39,3 +39,13 @@ func NewTimeOfYear() *TimeOfYear { return r } + +// true + +type TimeOfYearVariant interface { + TimeOfYearCaster() *TimeOfYear +} + +func (s *TimeOfYear) TimeOfYearCaster() *TimeOfYear { + return s +} diff --git a/typedapi/types/timeseriesaggregate.go b/typedapi/types/timeseriesaggregate.go index 774d644781..f34881b7da 100644 --- a/typedapi/types/timeseriesaggregate.go +++ b/typedapi/types/timeseriesaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TimeSeriesAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L730-L731 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L730-L731 type TimeSeriesAggregate struct { Buckets BucketsTimeSeriesBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewTimeSeriesAggregate() *TimeSeriesAggregate { return r } + +// false diff --git a/typedapi/types/timeseriesaggregation.go b/typedapi/types/timeseriesaggregation.go index ac43e7c232..1c3eec7cbc 100644 --- a/typedapi/types/timeseriesaggregation.go +++ b/typedapi/types/timeseriesaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TimeSeriesAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L1033-L1046 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L1033-L1046 type TimeSeriesAggregation struct { // Keyed Set to `true` to associate a unique string key with each bucket and returns // the ranges as a hash rather than an array. @@ -96,3 +96,13 @@ func NewTimeSeriesAggregation() *TimeSeriesAggregation { return r } + +// true + +type TimeSeriesAggregationVariant interface { + TimeSeriesAggregationCaster() *TimeSeriesAggregation +} + +func (s *TimeSeriesAggregation) TimeSeriesAggregationCaster() *TimeSeriesAggregation { + return s +} diff --git a/typedapi/types/timeseriesbucket.go b/typedapi/types/timeseriesbucket.go index 55c909c81e..db32c16615 100644 --- a/typedapi/types/timeseriesbucket.go +++ b/typedapi/types/timeseriesbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // TimeSeriesBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L733-L735 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L733-L735 type TimeSeriesBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -634,9 +634,11 @@ func (s TimeSeriesBucket) MarshalJSON() ([]byte, error) { // NewTimeSeriesBucket returns a TimeSeriesBucket. func NewTimeSeriesBucket() *TimeSeriesBucket { r := &TimeSeriesBucket{ - Aggregations: make(map[string]Aggregate, 0), - Key: make(map[string]FieldValue, 0), + Aggregations: make(map[string]Aggregate), + Key: make(map[string]FieldValue), } return r } + +// false diff --git a/typedapi/types/timesync.go b/typedapi/types/timesync.go index 45aedfc0a8..b05dbacfd8 100644 --- a/typedapi/types/timesync.go +++ b/typedapi/types/timesync.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TimeSync type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/_types/Transform.ts#L177-L189 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/_types/Transform.ts#L177-L189 type TimeSync struct { // Delay The time delay between the current time and the latest input data time. Delay Duration `json:"delay,omitempty"` @@ -78,3 +78,13 @@ func NewTimeSync() *TimeSync { return r } + +// true + +type TimeSyncVariant interface { + TimeSyncCaster() *TimeSync +} + +func (s *TimeSync) TimeSyncCaster() *TimeSync { + return s +} diff --git a/typedapi/types/timingstats.go b/typedapi/types/timingstats.go index 5834f1c42c..83aad5282c 100644 --- a/typedapi/types/timingstats.go +++ b/typedapi/types/timingstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TimingStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L562-L567 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L564-L569 type TimingStats struct { // ElapsedTime Runtime of the analysis in milliseconds. ElapsedTime int64 `json:"elapsed_time"` @@ -74,3 +74,5 @@ func NewTimingStats() *TimingStats { return r } + +// false diff --git a/typedapi/types/tokencountproperty.go b/typedapi/types/tokencountproperty.go index f0fa7c5642..8f9daf952a 100644 --- a/typedapi/types/tokencountproperty.go +++ b/typedapi/types/tokencountproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // TokenCountProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/specialized.ts#L85-L92 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/specialized.ts#L94-L101 type TokenCountProperty struct { Analyzer *string `json:"analyzer,omitempty"` Boost *Float64 `json:"boost,omitempty"` @@ -45,11 +46,12 @@ type TokenCountProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *Float64 `json:"null_value,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *Float64 `json:"null_value,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { @@ -163,301 +165,313 @@ func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -536,301 +550,313 @@ func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -850,6 +876,11 @@ func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -877,6 +908,7 @@ func (s TokenCountProperty) MarshalJSON() ([]byte, error) { NullValue: s.NullValue, Properties: s.Properties, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, Type: s.Type, } @@ -888,10 +920,20 @@ func (s TokenCountProperty) MarshalJSON() ([]byte, error) { // NewTokenCountProperty returns a TokenCountProperty. func NewTokenCountProperty() *TokenCountProperty { r := &TokenCountProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type TokenCountPropertyVariant interface { + TokenCountPropertyCaster() *TokenCountProperty +} + +func (s *TokenCountProperty) TokenCountPropertyCaster() *TokenCountProperty { + return s +} diff --git a/typedapi/types/tokendetail.go b/typedapi/types/tokendetail.go index c217ec1d99..ae6f588f07 100644 --- a/typedapi/types/tokendetail.go +++ b/typedapi/types/tokendetail.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TokenDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/analyze/types.ts#L71-L74 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/analyze/types.ts#L71-L74 type TokenDetail struct { Name string `json:"name"` Tokens []ExplainAnalyzeToken `json:"tokens"` @@ -80,3 +80,5 @@ func NewTokenDetail() *TokenDetail { return r } + +// false diff --git a/typedapi/types/tokenfilter.go b/typedapi/types/tokenfilter.go index 1c23f61f33..7685d0ecb2 100644 --- a/typedapi/types/tokenfilter.go +++ b/typedapi/types/tokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // string // TokenFilterDefinition // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L345-L350 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L345-L350 type TokenFilter any + +type TokenFilterVariant interface { + TokenFilterCaster() *TokenFilter +} diff --git a/typedapi/types/tokenfilterdefinition.go b/typedapi/types/tokenfilterdefinition.go index d10d151429..7c304bc981 100644 --- a/typedapi/types/tokenfilterdefinition.go +++ b/typedapi/types/tokenfilterdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -70,5 +70,9 @@ package types // PhoneticTokenFilter // DictionaryDecompounderTokenFilter // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L352-L404 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L352-L404 type TokenFilterDefinition any + +type TokenFilterDefinitionVariant interface { + TokenFilterDefinitionCaster() *TokenFilterDefinition +} diff --git a/typedapi/types/tokenizationconfigcontainer.go b/typedapi/types/tokenizationconfigcontainer.go index f450b73e9f..179145e566 100644 --- a/typedapi/types/tokenizationconfigcontainer.go +++ b/typedapi/types/tokenizationconfigcontainer.go @@ -16,25 +16,74 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // TokenizationConfigContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L110-L129 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L110-L131 type TokenizationConfigContainer struct { + AdditionalTokenizationConfigContainerProperty map[string]json.RawMessage `json:"-"` // Bert Indicates BERT tokenization and its options Bert *NlpBertTokenizationConfig `json:"bert,omitempty"` + // BertJa Indicates BERT Japanese tokenization and its options + BertJa *NlpBertTokenizationConfig `json:"bert_ja,omitempty"` // Mpnet Indicates MPNET tokenization and its options Mpnet *NlpBertTokenizationConfig `json:"mpnet,omitempty"` // Roberta Indicates RoBERTa tokenization and its options Roberta *NlpRobertaTokenizationConfig `json:"roberta,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s TokenizationConfigContainer) MarshalJSON() ([]byte, error) { + type opt TokenizationConfigContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalTokenizationConfigContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalTokenizationConfigContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewTokenizationConfigContainer returns a TokenizationConfigContainer. func NewTokenizationConfigContainer() *TokenizationConfigContainer { - r := &TokenizationConfigContainer{} + r := &TokenizationConfigContainer{ + AdditionalTokenizationConfigContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type TokenizationConfigContainerVariant interface { + TokenizationConfigContainerCaster() *TokenizationConfigContainer +} + +func (s *TokenizationConfigContainer) TokenizationConfigContainerCaster() *TokenizationConfigContainer { + return s +} diff --git a/typedapi/types/tokenizer.go b/typedapi/types/tokenizer.go index 11b4976ee9..d22e322783 100644 --- a/typedapi/types/tokenizer.go +++ b/typedapi/types/tokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // string // TokenizerDefinition // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L140-L145 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L140-L145 type Tokenizer any + +type TokenizerVariant interface { + TokenizerCaster() *Tokenizer +} diff --git a/typedapi/types/tokenizerdefinition.go b/typedapi/types/tokenizerdefinition.go index d6596fda2b..c45163150f 100644 --- a/typedapi/types/tokenizerdefinition.go +++ b/typedapi/types/tokenizerdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -41,5 +41,9 @@ package types // KuromojiTokenizer // NoriTokenizer // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L147-L170 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L147-L170 type TokenizerDefinition any + +type TokenizerDefinitionVariant interface { + TokenizerDefinitionCaster() *TokenizerDefinition +} diff --git a/typedapi/types/tokenpruningconfig.go b/typedapi/types/tokenpruningconfig.go index d3475d3ddd..39b78c41e2 100644 --- a/typedapi/types/tokenpruningconfig.go +++ b/typedapi/types/tokenpruningconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TokenPruningConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/TokenPruningConfig.ts#L22-L35 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/TokenPruningConfig.ts#L22-L35 type TokenPruningConfig struct { // OnlyScorePrunedTokens Whether to only score pruned tokens, vs only scoring kept tokens. OnlyScorePrunedTokens *bool `json:"only_score_pruned_tokens,omitempty"` @@ -116,3 +116,13 @@ func NewTokenPruningConfig() *TokenPruningConfig { return r } + +// true + +type TokenPruningConfigVariant interface { + TokenPruningConfigCaster() *TokenPruningConfig +} + +func (s *TokenPruningConfig) TokenPruningConfigCaster() *TokenPruningConfig { + return s +} diff --git a/typedapi/types/toolcall.go b/typedapi/types/toolcall.go new file mode 100644 index 0000000000..7984f2eca6 --- /dev/null +++ b/typedapi/types/toolcall.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ToolCall type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/chat_completion_unified/UnifiedRequest.ts#L122-L138 +type ToolCall struct { + // Function The function that the model called. + Function ToolCallFunction `json:"function"` + // Id The identifier of the tool call. + Id string `json:"id"` + // Type The type of the tool call. + Type string `json:"type"` +} + +func (s *ToolCall) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "function": + if err := dec.Decode(&s.Function); err != nil { + return fmt.Errorf("%s | %w", "Function", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewToolCall returns a ToolCall. +func NewToolCall() *ToolCall { + r := &ToolCall{} + + return r +} + +// true + +type ToolCallVariant interface { + ToolCallCaster() *ToolCall +} + +func (s *ToolCall) ToolCallCaster() *ToolCall { + return s +} diff --git a/typedapi/types/toolcallfunction.go b/typedapi/types/toolcallfunction.go new file mode 100644 index 0000000000..928cb8ff96 --- /dev/null +++ b/typedapi/types/toolcallfunction.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ToolCallFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/chat_completion_unified/UnifiedRequest.ts#L108-L120 +type ToolCallFunction struct { + // Arguments The arguments to call the function with in JSON format. + Arguments string `json:"arguments"` + // Name The name of the function to call. + Name string `json:"name"` +} + +func (s *ToolCallFunction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "arguments": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Arguments", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Arguments = o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + } + } + return nil +} + +// NewToolCallFunction returns a ToolCallFunction. +func NewToolCallFunction() *ToolCallFunction { + r := &ToolCallFunction{} + + return r +} + +// true + +type ToolCallFunctionVariant interface { + ToolCallFunctionCaster() *ToolCallFunction +} + +func (s *ToolCallFunction) ToolCallFunctionCaster() *ToolCallFunction { + return s +} diff --git a/typedapi/types/topclassentry.go b/typedapi/types/topclassentry.go index 7e79193159..a0ad79e232 100644 --- a/typedapi/types/topclassentry.go +++ b/typedapi/types/topclassentry.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TopClassEntry type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L440-L444 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L428-L432 type TopClassEntry struct { ClassName string `json:"class_name"` ClassProbability Float64 `json:"class_probability"` @@ -108,3 +108,5 @@ func NewTopClassEntry() *TopClassEntry { return r } + +// false diff --git a/typedapi/types/tophit.go b/typedapi/types/tophit.go index 2568b0a5cc..6f91633b4b 100644 --- a/typedapi/types/tophit.go +++ b/typedapi/types/tophit.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TopHit type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/text_structure/find_structure/types.ts#L35-L38 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/text_structure/_types/Structure.ts#L35-L38 type TopHit struct { Count int64 `json:"count"` Value json.RawMessage `json:"value,omitempty"` @@ -83,3 +83,5 @@ func NewTopHit() *TopHit { return r } + +// false diff --git a/typedapi/types/tophitsaggregate.go b/typedapi/types/tophitsaggregate.go index ea88782f5d..2d0a81c04b 100644 --- a/typedapi/types/tophitsaggregate.go +++ b/typedapi/types/tophitsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TopHitsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L747-L753 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L747-L753 type TopHitsAggregate struct { Hits HitsMetadata `json:"hits"` Meta Metadata `json:"meta,omitempty"` @@ -72,3 +72,5 @@ func NewTopHitsAggregate() *TopHitsAggregate { return r } + +// false diff --git a/typedapi/types/tophitsaggregation.go b/typedapi/types/tophitsaggregation.go index 6961e6c7f7..ded52f28ea 100644 --- a/typedapi/types/tophitsaggregation.go +++ b/typedapi/types/tophitsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TopHitsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L346-L406 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L346-L406 type TopHitsAggregation struct { // DocvalueFields Fields for which to return doc values. DocvalueFields []FieldAndFormat `json:"docvalue_fields,omitempty"` @@ -291,8 +291,18 @@ func (s *TopHitsAggregation) UnmarshalJSON(data []byte) error { // NewTopHitsAggregation returns a TopHitsAggregation. func NewTopHitsAggregation() *TopHitsAggregation { r := &TopHitsAggregation{ - ScriptFields: make(map[string]ScriptField, 0), + ScriptFields: make(map[string]ScriptField), } return r } + +// true + +type TopHitsAggregationVariant interface { + TopHitsAggregationCaster() *TopHitsAggregation +} + +func (s *TopHitsAggregation) TopHitsAggregationCaster() *TopHitsAggregation { + return s +} diff --git a/typedapi/types/topleftbottomrightgeobounds.go b/typedapi/types/topleftbottomrightgeobounds.go index 06de81e7d8..7a55f74c91 100644 --- a/typedapi/types/topleftbottomrightgeobounds.go +++ b/typedapi/types/topleftbottomrightgeobounds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TopLeftBottomRightGeoBounds type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Geo.ts#L161-L164 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Geo.ts#L161-L164 type TopLeftBottomRightGeoBounds struct { BottomRight GeoLocation `json:"bottom_right"` TopLeft GeoLocation `json:"top_left"` @@ -152,3 +152,13 @@ func NewTopLeftBottomRightGeoBounds() *TopLeftBottomRightGeoBounds { return r } + +// true + +type TopLeftBottomRightGeoBoundsVariant interface { + TopLeftBottomRightGeoBoundsCaster() *TopLeftBottomRightGeoBounds +} + +func (s *TopLeftBottomRightGeoBounds) TopLeftBottomRightGeoBoundsCaster() *TopLeftBottomRightGeoBounds { + return s +} diff --git a/typedapi/types/topmetrics.go b/typedapi/types/topmetrics.go index 20bddbd642..e622db9059 100644 --- a/typedapi/types/topmetrics.go +++ b/typedapi/types/topmetrics.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // TopMetrics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L832-L836 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L832-L836 type TopMetrics struct { Metrics map[string]FieldValue `json:"metrics"` Sort []FieldValue `json:"sort"` @@ -31,8 +31,10 @@ type TopMetrics struct { // NewTopMetrics returns a TopMetrics. func NewTopMetrics() *TopMetrics { r := &TopMetrics{ - Metrics: make(map[string]FieldValue, 0), + Metrics: make(map[string]FieldValue), } return r } + +// false diff --git a/typedapi/types/topmetricsaggregate.go b/typedapi/types/topmetricsaggregate.go index e126478562..591c67f25f 100644 --- a/typedapi/types/topmetricsaggregate.go +++ b/typedapi/types/topmetricsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TopMetricsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L827-L830 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L827-L830 type TopMetricsAggregate struct { Meta Metadata `json:"meta,omitempty"` Top []TopMetrics `json:"top"` @@ -72,3 +72,5 @@ func NewTopMetricsAggregate() *TopMetricsAggregate { return r } + +// false diff --git a/typedapi/types/topmetricsaggregation.go b/typedapi/types/topmetricsaggregation.go index f4255fa5d9..d0ab2bc46a 100644 --- a/typedapi/types/topmetricsaggregation.go +++ b/typedapi/types/topmetricsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TopMetricsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L408-L425 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L408-L425 type TopMetricsAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -136,3 +136,13 @@ func NewTopMetricsAggregation() *TopMetricsAggregation { return r } + +// true + +type TopMetricsAggregationVariant interface { + TopMetricsAggregationCaster() *TopMetricsAggregation +} + +func (s *TopMetricsAggregation) TopMetricsAggregationCaster() *TopMetricsAggregation { + return s +} diff --git a/typedapi/types/topmetricsvalue.go b/typedapi/types/topmetricsvalue.go index 9ae146e32d..92349e96cc 100644 --- a/typedapi/types/topmetricsvalue.go +++ b/typedapi/types/topmetricsvalue.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TopMetricsValue type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L427-L432 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L427-L432 type TopMetricsValue struct { // Field A field to return as a metric. Field string `json:"field"` @@ -67,3 +67,13 @@ func NewTopMetricsValue() *TopMetricsValue { return r } + +// true + +type TopMetricsValueVariant interface { + TopMetricsValueCaster() *TopMetricsValue +} + +func (s *TopMetricsValue) TopMetricsValueCaster() *TopMetricsValue { + return s +} diff --git a/typedapi/types/toprightbottomleftgeobounds.go b/typedapi/types/toprightbottomleftgeobounds.go index 1a06070c17..12f46ee851 100644 --- a/typedapi/types/toprightbottomleftgeobounds.go +++ b/typedapi/types/toprightbottomleftgeobounds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TopRightBottomLeftGeoBounds type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Geo.ts#L166-L169 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Geo.ts#L166-L169 type TopRightBottomLeftGeoBounds struct { BottomLeft GeoLocation `json:"bottom_left"` TopRight GeoLocation `json:"top_right"` @@ -152,3 +152,13 @@ func NewTopRightBottomLeftGeoBounds() *TopRightBottomLeftGeoBounds { return r } + +// true + +type TopRightBottomLeftGeoBoundsVariant interface { + TopRightBottomLeftGeoBoundsCaster() *TopRightBottomLeftGeoBounds +} + +func (s *TopRightBottomLeftGeoBounds) TopRightBottomLeftGeoBoundsCaster() *TopRightBottomLeftGeoBounds { + return s +} diff --git a/typedapi/types/totalfeatureimportance.go b/typedapi/types/totalfeatureimportance.go index 54df49136a..f4b060c14d 100644 --- a/typedapi/types/totalfeatureimportance.go +++ b/typedapi/types/totalfeatureimportance.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TotalFeatureImportance type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L232-L239 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L289-L296 type TotalFeatureImportance struct { // Classes If the trained model is a classification model, feature importance statistics // are gathered per target class value. @@ -83,3 +83,5 @@ func NewTotalFeatureImportance() *TotalFeatureImportance { return r } + +// false diff --git a/typedapi/types/totalfeatureimportanceclass.go b/typedapi/types/totalfeatureimportanceclass.go index de17c4da37..0f3eaa4346 100644 --- a/typedapi/types/totalfeatureimportanceclass.go +++ b/typedapi/types/totalfeatureimportanceclass.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TotalFeatureImportanceClass type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L241-L246 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L298-L303 type TotalFeatureImportanceClass struct { // ClassName The target class value. Could be a string, boolean, or number. ClassName string `json:"class_name"` @@ -75,3 +75,5 @@ func NewTotalFeatureImportanceClass() *TotalFeatureImportanceClass { return r } + +// false diff --git a/typedapi/types/totalfeatureimportancestatistics.go b/typedapi/types/totalfeatureimportancestatistics.go index 13c4361a61..e71a727d4a 100644 --- a/typedapi/types/totalfeatureimportancestatistics.go +++ b/typedapi/types/totalfeatureimportancestatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TotalFeatureImportanceStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L248-L255 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L305-L312 type TotalFeatureImportanceStatistics struct { // Max The maximum importance value across all the training data for this feature. Max int `json:"max"` @@ -117,3 +117,5 @@ func NewTotalFeatureImportanceStatistics() *TotalFeatureImportanceStatistics { return r } + +// false diff --git a/typedapi/types/totaluserprofiles.go b/typedapi/types/totaluserprofiles.go index cd40ba7c83..c75adf2e6c 100644 --- a/typedapi/types/totaluserprofiles.go +++ b/typedapi/types/totaluserprofiles.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TotalUserProfiles type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/suggest_user_profiles/Response.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/suggest_user_profiles/Response.ts#L24-L27 type TotalUserProfiles struct { Relation string `json:"relation"` Value int64 `json:"value"` @@ -83,3 +83,5 @@ func NewTotalUserProfiles() *TotalUserProfiles { return r } + +// false diff --git a/typedapi/types/trackhits.go b/typedapi/types/trackhits.go index bc1050fa2a..b36f23d540 100644 --- a/typedapi/types/trackhits.go +++ b/typedapi/types/trackhits.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,9 @@ package types // bool // int // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/search/_types/hits.ts#L144-L152 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/search/_types/hits.ts#L143-L151 type TrackHits any + +type TrackHitsVariant interface { + TrackHitsCaster() *TrackHits +} diff --git a/typedapi/types/trainedmodel.go b/typedapi/types/trainedmodel.go index f7e1974179..bf876f7661 100644 --- a/typedapi/types/trainedmodel.go +++ b/typedapi/types/trainedmodel.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // TrainedModel type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model/types.ts#L60-L72 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model/types.ts#L60-L72 type TrainedModel struct { // Ensemble The definition for an ensemble model Ensemble *Ensemble `json:"ensemble,omitempty"` @@ -42,3 +42,13 @@ func NewTrainedModel() *TrainedModel { return r } + +// true + +type TrainedModelVariant interface { + TrainedModelCaster() *TrainedModel +} + +func (s *TrainedModel) TrainedModelCaster() *TrainedModel { + return s +} diff --git a/typedapi/types/trainedmodelassignment.go b/typedapi/types/trainedmodelassignment.go index 0b6b3f26ef..2b55de5ce6 100644 --- a/typedapi/types/trainedmodelassignment.go +++ b/typedapi/types/trainedmodelassignment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,11 +33,13 @@ import ( // TrainedModelAssignment type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L399-L414 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L459-L476 type TrainedModelAssignment struct { + AdaptiveAllocations *AdaptiveAllocationsSettings `json:"adaptive_allocations,omitempty"` // AssignmentState The overall assignment state. AssignmentState deploymentassignmentstate.DeploymentAssignmentState `json:"assignment_state"` MaxAssignedAllocations *int `json:"max_assigned_allocations,omitempty"` + Reason *string `json:"reason,omitempty"` // RoutingTable The allocation state for each node. RoutingTable map[string]TrainedModelAssignmentRoutingTable `json:"routing_table"` // StartTime The timestamp when the deployment started. @@ -60,6 +62,11 @@ func (s *TrainedModelAssignment) UnmarshalJSON(data []byte) error { switch t { + case "adaptive_allocations": + if err := dec.Decode(&s.AdaptiveAllocations); err != nil { + return fmt.Errorf("%s | %w", "AdaptiveAllocations", err) + } + case "assignment_state": if err := dec.Decode(&s.AssignmentState); err != nil { return fmt.Errorf("%s | %w", "AssignmentState", err) @@ -81,6 +88,18 @@ func (s *TrainedModelAssignment) UnmarshalJSON(data []byte) error { s.MaxAssignedAllocations = &f } + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + case "routing_table": if s.RoutingTable == nil { s.RoutingTable = make(map[string]TrainedModelAssignmentRoutingTable, 0) @@ -107,8 +126,10 @@ func (s *TrainedModelAssignment) UnmarshalJSON(data []byte) error { // NewTrainedModelAssignment returns a TrainedModelAssignment. func NewTrainedModelAssignment() *TrainedModelAssignment { r := &TrainedModelAssignment{ - RoutingTable: make(map[string]TrainedModelAssignmentRoutingTable, 0), + RoutingTable: make(map[string]TrainedModelAssignmentRoutingTable), } return r } + +// false diff --git a/typedapi/types/trainedmodelassignmentroutingtable.go b/typedapi/types/trainedmodelassignmentroutingtable.go index 76ba432e7f..d0902aaa14 100644 --- a/typedapi/types/trainedmodelassignmentroutingtable.go +++ b/typedapi/types/trainedmodelassignmentroutingtable.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,13 +33,13 @@ import ( // TrainedModelAssignmentRoutingTable type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L370-L388 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L430-L448 type TrainedModelAssignmentRoutingTable struct { // CurrentAllocations Current number of allocations. CurrentAllocations int `json:"current_allocations"` // Reason The reason for the current state. It is usually populated only when the // `routing_state` is `failed`. - Reason string `json:"reason"` + Reason *string `json:"reason,omitempty"` // RoutingState The current routing state. RoutingState routingstate.RoutingState `json:"routing_state"` // TargetAllocations Target number of allocations. @@ -87,7 +87,7 @@ func (s *TrainedModelAssignmentRoutingTable) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Reason = o + s.Reason = &o case "routing_state": if err := dec.Decode(&s.RoutingState); err != nil { @@ -121,3 +121,5 @@ func NewTrainedModelAssignmentRoutingTable() *TrainedModelAssignmentRoutingTable return r } + +// false diff --git a/typedapi/types/trainedmodelassignmenttaskparameters.go b/typedapi/types/trainedmodelassignmenttaskparameters.go index 97c10149a0..dc394589d7 100644 --- a/typedapi/types/trainedmodelassignmenttaskparameters.go +++ b/typedapi/types/trainedmodelassignmenttaskparameters.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,19 +33,21 @@ import ( // TrainedModelAssignmentTaskParameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L312-L345 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L369-L405 type TrainedModelAssignmentTaskParameters struct { // CacheSize The size of the trained model cache. - CacheSize ByteSize `json:"cache_size"` + CacheSize ByteSize `json:"cache_size,omitempty"` // DeploymentId The unique identifier for the trained model deployment. DeploymentId string `json:"deployment_id"` // ModelBytes The size of the trained model in bytes. - ModelBytes int `json:"model_bytes"` + ModelBytes ByteSize `json:"model_bytes"` // ModelId The unique identifier for the trained model. ModelId string `json:"model_id"` // NumberOfAllocations The total number of allocations this model is assigned across ML nodes. - NumberOfAllocations int `json:"number_of_allocations"` - Priority trainingpriority.TrainingPriority `json:"priority"` + NumberOfAllocations int `json:"number_of_allocations"` + PerAllocationMemoryBytes ByteSize `json:"per_allocation_memory_bytes"` + PerDeploymentMemoryBytes ByteSize `json:"per_deployment_memory_bytes"` + Priority trainingpriority.TrainingPriority `json:"priority"` // QueueCapacity Number of inference requests are allowed in the queue at a time. QueueCapacity int `json:"queue_capacity"` // ThreadsPerAllocation Number of threads per allocation. @@ -78,19 +80,8 @@ func (s *TrainedModelAssignmentTaskParameters) UnmarshalJSON(data []byte) error } case "model_bytes": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "ModelBytes", err) - } - s.ModelBytes = value - case float64: - f := int(v) - s.ModelBytes = f + if err := dec.Decode(&s.ModelBytes); err != nil { + return fmt.Errorf("%s | %w", "ModelBytes", err) } case "model_id": @@ -114,6 +105,16 @@ func (s *TrainedModelAssignmentTaskParameters) UnmarshalJSON(data []byte) error s.NumberOfAllocations = f } + case "per_allocation_memory_bytes": + if err := dec.Decode(&s.PerAllocationMemoryBytes); err != nil { + return fmt.Errorf("%s | %w", "PerAllocationMemoryBytes", err) + } + + case "per_deployment_memory_bytes": + if err := dec.Decode(&s.PerDeploymentMemoryBytes); err != nil { + return fmt.Errorf("%s | %w", "PerDeploymentMemoryBytes", err) + } + case "priority": if err := dec.Decode(&s.Priority); err != nil { return fmt.Errorf("%s | %w", "Priority", err) @@ -162,3 +163,5 @@ func NewTrainedModelAssignmentTaskParameters() *TrainedModelAssignmentTaskParame return r } + +// false diff --git a/typedapi/types/trainedmodelconfig.go b/typedapi/types/trainedmodelconfig.go index 1ea6e1acc2..a84c300a45 100644 --- a/typedapi/types/trainedmodelconfig.go +++ b/typedapi/types/trainedmodelconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // TrainedModelConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L164-L199 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L203-L239 type TrainedModelConfig struct { CompressedDefinition *string `json:"compressed_definition,omitempty"` // CreateTime The time when the trained model was created. @@ -64,8 +64,9 @@ type TrainedModelConfig struct { // created by data frame analytics contain analysis_config and input objects. Metadata *TrainedModelConfigMetadata `json:"metadata,omitempty"` // ModelId Identifier for the trained model. - ModelId string `json:"model_id"` - ModelSizeBytes ByteSize `json:"model_size_bytes,omitempty"` + ModelId string `json:"model_id"` + ModelPackage *ModelPackageConfig `json:"model_package,omitempty"` + ModelSizeBytes ByteSize `json:"model_size_bytes,omitempty"` // ModelType The model type ModelType *trainedmodeltype.TrainedModelType `json:"model_type,omitempty"` PrefixStrings *TrainedModelPrefixStrings `json:"prefix_strings,omitempty"` @@ -223,6 +224,11 @@ func (s *TrainedModelConfig) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "ModelId", err) } + case "model_package": + if err := dec.Decode(&s.ModelPackage); err != nil { + return fmt.Errorf("%s | %w", "ModelPackage", err) + } + case "model_size_bytes": if err := dec.Decode(&s.ModelSizeBytes); err != nil { return fmt.Errorf("%s | %w", "ModelSizeBytes", err) @@ -256,8 +262,10 @@ func (s *TrainedModelConfig) UnmarshalJSON(data []byte) error { // NewTrainedModelConfig returns a TrainedModelConfig. func NewTrainedModelConfig() *TrainedModelConfig { r := &TrainedModelConfig{ - DefaultFieldMap: make(map[string]string, 0), + DefaultFieldMap: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/trainedmodelconfiginput.go b/typedapi/types/trainedmodelconfiginput.go index c3a55f8bf1..50323d865b 100644 --- a/typedapi/types/trainedmodelconfiginput.go +++ b/typedapi/types/trainedmodelconfiginput.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // TrainedModelConfigInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L201-L204 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L241-L244 type TrainedModelConfigInput struct { // FieldNames An array of input field names for the model. FieldNames []string `json:"field_names"` @@ -34,3 +34,5 @@ func NewTrainedModelConfigInput() *TrainedModelConfigInput { return r } + +// false diff --git a/typedapi/types/trainedmodelconfigmetadata.go b/typedapi/types/trainedmodelconfigmetadata.go index adba79a2f6..bacc5df966 100644 --- a/typedapi/types/trainedmodelconfigmetadata.go +++ b/typedapi/types/trainedmodelconfigmetadata.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // TrainedModelConfigMetadata type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L206-L214 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L246-L254 type TrainedModelConfigMetadata struct { // FeatureImportanceBaseline An object that contains the baseline for feature importance values. For // regression analysis, it is a single value. For classification analysis, there @@ -42,8 +42,10 @@ type TrainedModelConfigMetadata struct { // NewTrainedModelConfigMetadata returns a TrainedModelConfigMetadata. func NewTrainedModelConfigMetadata() *TrainedModelConfigMetadata { r := &TrainedModelConfigMetadata{ - FeatureImportanceBaseline: make(map[string]string, 0), + FeatureImportanceBaseline: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/trainedmodeldeploymentallocationstatus.go b/typedapi/types/trainedmodeldeploymentallocationstatus.go index 61cb219415..5b49d07b6e 100644 --- a/typedapi/types/trainedmodeldeploymentallocationstatus.go +++ b/typedapi/types/trainedmodeldeploymentallocationstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // TrainedModelDeploymentAllocationStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L390-L397 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L450-L457 type TrainedModelDeploymentAllocationStatus struct { // AllocationCount The current number of nodes where the model is allocated. AllocationCount int `json:"allocation_count"` @@ -106,3 +106,5 @@ func NewTrainedModelDeploymentAllocationStatus() *TrainedModelDeploymentAllocati return r } + +// false diff --git a/typedapi/types/trainedmodeldeploymentnodesstats.go b/typedapi/types/trainedmodeldeploymentnodesstats.go index 50b206b90c..40c266c455 100644 --- a/typedapi/types/trainedmodeldeploymentnodesstats.go +++ b/typedapi/types/trainedmodeldeploymentnodesstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,34 +31,42 @@ import ( // TrainedModelDeploymentNodesStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L132-L162 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L156-L201 type TrainedModelDeploymentNodesStats struct { // AverageInferenceTimeMs The average time for each inference call to complete on this node. - AverageInferenceTimeMs Float64 `json:"average_inference_time_ms"` + AverageInferenceTimeMs Float64 `json:"average_inference_time_ms,omitempty"` + // AverageInferenceTimeMsExcludingCacheHits The average time for each inference call to complete on this node, excluding + // cache + AverageInferenceTimeMsExcludingCacheHits Float64 `json:"average_inference_time_ms_excluding_cache_hits,omitempty"` + AverageInferenceTimeMsLastMinute Float64 `json:"average_inference_time_ms_last_minute,omitempty"` // ErrorCount The number of errors when evaluating the trained model. - ErrorCount int `json:"error_count"` + ErrorCount *int `json:"error_count,omitempty"` + InferenceCacheHitCount *int64 `json:"inference_cache_hit_count,omitempty"` + InferenceCacheHitCountLastMinute *int64 `json:"inference_cache_hit_count_last_minute,omitempty"` // InferenceCount The total number of inference calls made against this node for this model. - InferenceCount int `json:"inference_count"` + InferenceCount *int64 `json:"inference_count,omitempty"` // LastAccess The epoch time stamp of the last inference call for the model on this node. - LastAccess int64 `json:"last_access"` + LastAccess *int64 `json:"last_access,omitempty"` // Node Information pertaining to the node. - Node DiscoveryNode `json:"node"` + Node DiscoveryNode `json:"node,omitempty"` // NumberOfAllocations The number of allocations assigned to this node. - NumberOfAllocations int `json:"number_of_allocations"` + NumberOfAllocations *int `json:"number_of_allocations,omitempty"` // NumberOfPendingRequests The number of inference requests queued to be processed. - NumberOfPendingRequests int `json:"number_of_pending_requests"` + NumberOfPendingRequests *int `json:"number_of_pending_requests,omitempty"` + PeakThroughputPerMinute int64 `json:"peak_throughput_per_minute"` // RejectionExecutionCount The number of inference requests that were not processed because the queue // was full. - RejectionExecutionCount int `json:"rejection_execution_count"` + RejectionExecutionCount *int `json:"rejection_execution_count,omitempty"` // RoutingState The current routing state and reason for the current routing state for this // allocation. RoutingState TrainedModelAssignmentRoutingTable `json:"routing_state"` // StartTime The epoch timestamp when the allocation started. - StartTime int64 `json:"start_time"` + StartTime *int64 `json:"start_time,omitempty"` // ThreadsPerAllocation The number of threads used by each allocation during inference. - ThreadsPerAllocation int `json:"threads_per_allocation"` + ThreadsPerAllocation *int `json:"threads_per_allocation,omitempty"` + ThroughputLastMinute int `json:"throughput_last_minute"` // TimeoutCount The number of inference requests that timed out before being processed. - TimeoutCount int `json:"timeout_count"` + TimeoutCount *int `json:"timeout_count,omitempty"` } func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { @@ -81,6 +89,16 @@ func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "AverageInferenceTimeMs", err) } + case "average_inference_time_ms_excluding_cache_hits": + if err := dec.Decode(&s.AverageInferenceTimeMsExcludingCacheHits); err != nil { + return fmt.Errorf("%s | %w", "AverageInferenceTimeMsExcludingCacheHits", err) + } + + case "average_inference_time_ms_last_minute": + if err := dec.Decode(&s.AverageInferenceTimeMsLastMinute); err != nil { + return fmt.Errorf("%s | %w", "AverageInferenceTimeMsLastMinute", err) + } + case "error_count": var tmp any @@ -91,41 +109,60 @@ func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "ErrorCount", err) } - s.ErrorCount = value + s.ErrorCount = &value case float64: f := int(v) - s.ErrorCount = f + s.ErrorCount = &f } - case "inference_count": + case "inference_cache_hit_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InferenceCacheHitCount", err) + } + s.InferenceCacheHitCount = &value + case float64: + f := int64(v) + s.InferenceCacheHitCount = &f + } + case "inference_cache_hit_count_last_minute": var tmp any dec.Decode(&tmp) switch v := tmp.(type) { case string: - value, err := strconv.Atoi(v) + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - return fmt.Errorf("%s | %w", "InferenceCount", err) + return fmt.Errorf("%s | %w", "InferenceCacheHitCountLastMinute", err) } - s.InferenceCount = value + s.InferenceCacheHitCountLastMinute = &value case float64: - f := int(v) - s.InferenceCount = f + f := int64(v) + s.InferenceCacheHitCountLastMinute = &f } - case "last_access": + case "inference_count": var tmp any dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseInt(v, 10, 64) if err != nil { - return fmt.Errorf("%s | %w", "LastAccess", err) + return fmt.Errorf("%s | %w", "InferenceCount", err) } - s.LastAccess = value + s.InferenceCount = &value case float64: f := int64(v) - s.LastAccess = f + s.InferenceCount = &f + } + + case "last_access": + if err := dec.Decode(&s.LastAccess); err != nil { + return fmt.Errorf("%s | %w", "LastAccess", err) } case "node": @@ -143,10 +180,10 @@ func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "NumberOfAllocations", err) } - s.NumberOfAllocations = value + s.NumberOfAllocations = &value case float64: f := int(v) - s.NumberOfAllocations = f + s.NumberOfAllocations = &f } case "number_of_pending_requests": @@ -159,10 +196,25 @@ func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "NumberOfPendingRequests", err) } - s.NumberOfPendingRequests = value + s.NumberOfPendingRequests = &value case float64: f := int(v) - s.NumberOfPendingRequests = f + s.NumberOfPendingRequests = &f + } + + case "peak_throughput_per_minute": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PeakThroughputPerMinute", err) + } + s.PeakThroughputPerMinute = value + case float64: + f := int64(v) + s.PeakThroughputPerMinute = f } case "rejection_execution_count": @@ -175,10 +227,10 @@ func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "RejectionExecutionCount", err) } - s.RejectionExecutionCount = value + s.RejectionExecutionCount = &value case float64: f := int(v) - s.RejectionExecutionCount = f + s.RejectionExecutionCount = &f } case "routing_state": @@ -201,10 +253,26 @@ func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "ThreadsPerAllocation", err) } - s.ThreadsPerAllocation = value + s.ThreadsPerAllocation = &value case float64: f := int(v) - s.ThreadsPerAllocation = f + s.ThreadsPerAllocation = &f + } + + case "throughput_last_minute": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ThroughputLastMinute", err) + } + s.ThroughputLastMinute = value + case float64: + f := int(v) + s.ThroughputLastMinute = f } case "timeout_count": @@ -217,10 +285,10 @@ func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "TimeoutCount", err) } - s.TimeoutCount = value + s.TimeoutCount = &value case float64: f := int(v) - s.TimeoutCount = f + s.TimeoutCount = &f } } @@ -234,3 +302,5 @@ func NewTrainedModelDeploymentNodesStats() *TrainedModelDeploymentNodesStats { return r } + +// false diff --git a/typedapi/types/trainedmodeldeploymentstats.go b/typedapi/types/trainedmodeldeploymentstats.go index 2af113d1f4..0cbd180749 100644 --- a/typedapi/types/trainedmodeldeploymentstats.go +++ b/typedapi/types/trainedmodeldeploymentstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,47 +29,51 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentassignmentstate" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/trainingpriority" ) // TrainedModelDeploymentStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L61-L101 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L62-L107 type TrainedModelDeploymentStats struct { + AdaptiveAllocations *AdaptiveAllocationsSettings `json:"adaptive_allocations,omitempty"` // AllocationStatus The detailed allocation status for the deployment. - AllocationStatus TrainedModelDeploymentAllocationStatus `json:"allocation_status"` - CacheSize ByteSize `json:"cache_size,omitempty"` + AllocationStatus *TrainedModelDeploymentAllocationStatus `json:"allocation_status,omitempty"` + CacheSize ByteSize `json:"cache_size,omitempty"` // DeploymentId The unique identifier for the trained model deployment. DeploymentId string `json:"deployment_id"` // ErrorCount The sum of `error_count` for all nodes in the deployment. - ErrorCount int `json:"error_count"` + ErrorCount *int `json:"error_count,omitempty"` // InferenceCount The sum of `inference_count` for all nodes in the deployment. - InferenceCount int `json:"inference_count"` + InferenceCount *int `json:"inference_count,omitempty"` // ModelId The unique identifier for the trained model. ModelId string `json:"model_id"` // Nodes The deployment stats for each node that currently has the model allocated. // In serverless, stats are reported for a single unnamed virtual node. Nodes []TrainedModelDeploymentNodesStats `json:"nodes"` // NumberOfAllocations The number of allocations requested. - NumberOfAllocations int `json:"number_of_allocations"` + NumberOfAllocations *int `json:"number_of_allocations,omitempty"` + PeakThroughputPerMinute int64 `json:"peak_throughput_per_minute"` + Priority trainingpriority.TrainingPriority `json:"priority"` // QueueCapacity The number of inference requests that can be queued before new requests are // rejected. - QueueCapacity int `json:"queue_capacity"` + QueueCapacity *int `json:"queue_capacity,omitempty"` // Reason The reason for the current deployment state. Usually only populated when // the model is not deployed to a node. - Reason string `json:"reason"` + Reason *string `json:"reason,omitempty"` // RejectedExecutionCount The sum of `rejected_execution_count` for all nodes in the deployment. // Individual nodes reject an inference request if the inference queue is full. // The queue size is controlled by the `queue_capacity` setting in the start // trained model deployment API. - RejectedExecutionCount int `json:"rejected_execution_count"` + RejectedExecutionCount *int `json:"rejected_execution_count,omitempty"` // StartTime The epoch timestamp when the deployment started. StartTime int64 `json:"start_time"` // State The overall state of the deployment. - State deploymentassignmentstate.DeploymentAssignmentState `json:"state"` + State *deploymentassignmentstate.DeploymentAssignmentState `json:"state,omitempty"` // ThreadsPerAllocation The number of threads used be each allocation during inference. - ThreadsPerAllocation int `json:"threads_per_allocation"` + ThreadsPerAllocation *int `json:"threads_per_allocation,omitempty"` // TimeoutCount The sum of `timeout_count` for all nodes in the deployment. - TimeoutCount int `json:"timeout_count"` + TimeoutCount *int `json:"timeout_count,omitempty"` } func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { @@ -87,6 +91,11 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { switch t { + case "adaptive_allocations": + if err := dec.Decode(&s.AdaptiveAllocations); err != nil { + return fmt.Errorf("%s | %w", "AdaptiveAllocations", err) + } + case "allocation_status": if err := dec.Decode(&s.AllocationStatus); err != nil { return fmt.Errorf("%s | %w", "AllocationStatus", err) @@ -112,10 +121,10 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "ErrorCount", err) } - s.ErrorCount = value + s.ErrorCount = &value case float64: f := int(v) - s.ErrorCount = f + s.ErrorCount = &f } case "inference_count": @@ -128,10 +137,10 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "InferenceCount", err) } - s.InferenceCount = value + s.InferenceCount = &value case float64: f := int(v) - s.InferenceCount = f + s.InferenceCount = &f } case "model_id": @@ -154,10 +163,30 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "NumberOfAllocations", err) } - s.NumberOfAllocations = value + s.NumberOfAllocations = &value case float64: f := int(v) - s.NumberOfAllocations = f + s.NumberOfAllocations = &f + } + + case "peak_throughput_per_minute": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PeakThroughputPerMinute", err) + } + s.PeakThroughputPerMinute = value + case float64: + f := int64(v) + s.PeakThroughputPerMinute = f + } + + case "priority": + if err := dec.Decode(&s.Priority); err != nil { + return fmt.Errorf("%s | %w", "Priority", err) } case "queue_capacity": @@ -170,10 +199,10 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "QueueCapacity", err) } - s.QueueCapacity = value + s.QueueCapacity = &value case float64: f := int(v) - s.QueueCapacity = f + s.QueueCapacity = &f } case "reason": @@ -186,7 +215,7 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Reason = o + s.Reason = &o case "rejected_execution_count": @@ -198,10 +227,10 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "RejectedExecutionCount", err) } - s.RejectedExecutionCount = value + s.RejectedExecutionCount = &value case float64: f := int(v) - s.RejectedExecutionCount = f + s.RejectedExecutionCount = &f } case "start_time": @@ -224,10 +253,10 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "ThreadsPerAllocation", err) } - s.ThreadsPerAllocation = value + s.ThreadsPerAllocation = &value case float64: f := int(v) - s.ThreadsPerAllocation = f + s.ThreadsPerAllocation = &f } case "timeout_count": @@ -240,10 +269,10 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "TimeoutCount", err) } - s.TimeoutCount = value + s.TimeoutCount = &value case float64: f := int(v) - s.TimeoutCount = f + s.TimeoutCount = &f } } @@ -257,3 +286,5 @@ func NewTrainedModelDeploymentStats() *TrainedModelDeploymentStats { return r } + +// false diff --git a/typedapi/types/trainedmodelentities.go b/typedapi/types/trainedmodelentities.go index ab614fa924..127319340d 100644 --- a/typedapi/types/trainedmodelentities.go +++ b/typedapi/types/trainedmodelentities.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TrainedModelEntities type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L433-L439 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L421-L427 type TrainedModelEntities struct { ClassName string `json:"class_name"` ClassProbability Float64 `json:"class_probability"` @@ -138,3 +138,5 @@ func NewTrainedModelEntities() *TrainedModelEntities { return r } + +// false diff --git a/typedapi/types/trainedmodelinferenceclassimportance.go b/typedapi/types/trainedmodelinferenceclassimportance.go index 6fbc194307..9ad9458033 100644 --- a/typedapi/types/trainedmodelinferenceclassimportance.go +++ b/typedapi/types/trainedmodelinferenceclassimportance.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TrainedModelInferenceClassImportance type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L446-L449 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L434-L437 type TrainedModelInferenceClassImportance struct { ClassName string `json:"class_name"` Importance Float64 `json:"importance"` @@ -91,3 +91,5 @@ func NewTrainedModelInferenceClassImportance() *TrainedModelInferenceClassImport return r } + +// false diff --git a/typedapi/types/trainedmodelinferencefeatureimportance.go b/typedapi/types/trainedmodelinferencefeatureimportance.go index 9f38f9cb13..8a80c8ff75 100644 --- a/typedapi/types/trainedmodelinferencefeatureimportance.go +++ b/typedapi/types/trainedmodelinferencefeatureimportance.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TrainedModelInferenceFeatureImportance type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L451-L455 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L439-L443 type TrainedModelInferenceFeatureImportance struct { Classes []TrainedModelInferenceClassImportance `json:"classes,omitempty"` FeatureName string `json:"feature_name"` @@ -97,3 +97,5 @@ func NewTrainedModelInferenceFeatureImportance() *TrainedModelInferenceFeatureIm return r } + +// false diff --git a/typedapi/types/trainedmodelinferencestats.go b/typedapi/types/trainedmodelinferencestats.go index 78dff29868..96242eb9e8 100644 --- a/typedapi/types/trainedmodelinferencestats.go +++ b/typedapi/types/trainedmodelinferencestats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TrainedModelInferenceStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L103-L123 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L127-L147 type TrainedModelInferenceStats struct { // CacheMissCount The number of times the model was loaded for inference and was not retrieved // from the cache. @@ -147,3 +147,5 @@ func NewTrainedModelInferenceStats() *TrainedModelInferenceStats { return r } + +// false diff --git a/typedapi/types/trainedmodellocation.go b/typedapi/types/trainedmodellocation.go index 85047138bb..3729ee942a 100644 --- a/typedapi/types/trainedmodellocation.go +++ b/typedapi/types/trainedmodellocation.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // TrainedModelLocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L416-L418 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L478-L480 type TrainedModelLocation struct { Index TrainedModelLocationIndex `json:"index"` } @@ -33,3 +33,5 @@ func NewTrainedModelLocation() *TrainedModelLocation { return r } + +// false diff --git a/typedapi/types/trainedmodellocationindex.go b/typedapi/types/trainedmodellocationindex.go index 4ebe5c00c2..ef73073afd 100644 --- a/typedapi/types/trainedmodellocationindex.go +++ b/typedapi/types/trainedmodellocationindex.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TrainedModelLocationIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L420-L422 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L482-L484 type TrainedModelLocationIndex struct { Name string `json:"name"` } @@ -66,3 +66,5 @@ func NewTrainedModelLocationIndex() *TrainedModelLocationIndex { return r } + +// false diff --git a/typedapi/types/trainedmodelprefixstrings.go b/typedapi/types/trainedmodelprefixstrings.go index 67b50bf903..c57ba2107a 100644 --- a/typedapi/types/trainedmodelprefixstrings.go +++ b/typedapi/types/trainedmodelprefixstrings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TrainedModelPrefixStrings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L424-L433 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L486-L495 type TrainedModelPrefixStrings struct { // Ingest String prepended to input at ingest Ingest *string `json:"ingest,omitempty"` @@ -89,3 +89,13 @@ func NewTrainedModelPrefixStrings() *TrainedModelPrefixStrings { return r } + +// true + +type TrainedModelPrefixStringsVariant interface { + TrainedModelPrefixStringsCaster() *TrainedModelPrefixStrings +} + +func (s *TrainedModelPrefixStrings) TrainedModelPrefixStringsCaster() *TrainedModelPrefixStrings { + return s +} diff --git a/typedapi/types/trainedmodelsizestats.go b/typedapi/types/trainedmodelsizestats.go index b9ae226a77..00bd86e1e0 100644 --- a/typedapi/types/trainedmodelsizestats.go +++ b/typedapi/types/trainedmodelsizestats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TrainedModelSizeStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L125-L130 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L149-L154 type TrainedModelSizeStats struct { // ModelSizeBytes The size of the model in bytes. ModelSizeBytes ByteSize `json:"model_size_bytes"` @@ -74,3 +74,5 @@ func NewTrainedModelSizeStats() *TrainedModelSizeStats { return r } + +// false diff --git a/typedapi/types/trainedmodelsrecord.go b/typedapi/types/trainedmodelsrecord.go index 95c702a5ae..854075499a 100644 --- a/typedapi/types/trainedmodelsrecord.go +++ b/typedapi/types/trainedmodelsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TrainedModelsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/ml_trained_models/types.ts#L23-L115 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/ml_trained_models/types.ts#L23-L115 type TrainedModelsRecord struct { // CreateTime The time the model was created. CreateTime DateTime `json:"create_time,omitempty"` @@ -286,3 +286,5 @@ func NewTrainedModelsRecord() *TrainedModelsRecord { return r } + +// false diff --git a/typedapi/types/trainedmodelstats.go b/typedapi/types/trainedmodelstats.go index 6aaf4a3cc1..79557a9498 100644 --- a/typedapi/types/trainedmodelstats.go +++ b/typedapi/types/trainedmodelstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TrainedModelStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/TrainedModel.ts#L41-L59 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/TrainedModel.ts#L42-L60 type TrainedModelStats struct { // DeploymentStats A collection of deployment stats, which is present when the models are // deployed. @@ -117,8 +117,10 @@ func (s *TrainedModelStats) UnmarshalJSON(data []byte) error { // NewTrainedModelStats returns a TrainedModelStats. func NewTrainedModelStats() *TrainedModelStats { r := &TrainedModelStats{ - Ingest: make(map[string]json.RawMessage, 0), + Ingest: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/trainedmodeltree.go b/typedapi/types/trainedmodeltree.go index 196c1f03cb..f24059886e 100644 --- a/typedapi/types/trainedmodeltree.go +++ b/typedapi/types/trainedmodeltree.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TrainedModelTree type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model/types.ts#L74-L79 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model/types.ts#L74-L79 type TrainedModelTree struct { ClassificationLabels []string `json:"classification_labels,omitempty"` FeatureNames []string `json:"feature_names"` @@ -92,3 +92,13 @@ func NewTrainedModelTree() *TrainedModelTree { return r } + +// true + +type TrainedModelTreeVariant interface { + TrainedModelTreeCaster() *TrainedModelTree +} + +func (s *TrainedModelTree) TrainedModelTreeCaster() *TrainedModelTree { + return s +} diff --git a/typedapi/types/trainedmodeltreenode.go b/typedapi/types/trainedmodeltreenode.go index 829baa9863..a597275989 100644 --- a/typedapi/types/trainedmodeltreenode.go +++ b/typedapi/types/trainedmodeltreenode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TrainedModelTreeNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model/types.ts#L81-L91 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model/types.ts#L81-L91 type TrainedModelTreeNode struct { DecisionType *string `json:"decision_type,omitempty"` DefaultLeft *bool `json:"default_left,omitempty"` @@ -208,3 +208,13 @@ func NewTrainedModelTreeNode() *TrainedModelTreeNode { return r } + +// true + +type TrainedModelTreeNodeVariant interface { + TrainedModelTreeNodeCaster() *TrainedModelTreeNode +} + +func (s *TrainedModelTreeNode) TrainedModelTreeNodeCaster() *TrainedModelTreeNode { + return s +} diff --git a/typedapi/types/transformauthorization.go b/typedapi/types/transformauthorization.go index 5d1aeae541..2ca5a45bd3 100644 --- a/typedapi/types/transformauthorization.go +++ b/typedapi/types/transformauthorization.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TransformAuthorization type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/Authorization.ts#L59-L71 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/Authorization.ts#L59-L71 type TransformAuthorization struct { // ApiKey If an API key was used for the most recent update to the transform, its name // and identifier are listed in the response. @@ -92,3 +92,5 @@ func NewTransformAuthorization() *TransformAuthorization { return r } + +// false diff --git a/typedapi/types/transformcontainer.go b/typedapi/types/transformcontainer.go index 7f51859f28..f76f8f2312 100644 --- a/typedapi/types/transformcontainer.go +++ b/typedapi/types/transformcontainer.go @@ -16,22 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // TransformContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Transform.ts#L27-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Transform.ts#L27-L34 type TransformContainer struct { - Chain []TransformContainer `json:"chain,omitempty"` - Script *ScriptTransform `json:"script,omitempty"` - Search *SearchTransform `json:"search,omitempty"` + AdditionalTransformContainerProperty map[string]json.RawMessage `json:"-"` + Chain []TransformContainer `json:"chain,omitempty"` + Script *ScriptTransform `json:"script,omitempty"` + Search *SearchTransform `json:"search,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s TransformContainer) MarshalJSON() ([]byte, error) { + type opt TransformContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalTransformContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalTransformContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewTransformContainer returns a TransformContainer. func NewTransformContainer() *TransformContainer { - r := &TransformContainer{} + r := &TransformContainer{ + AdditionalTransformContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type TransformContainerVariant interface { + TransformContainerCaster() *TransformContainer +} + +func (s *TransformContainer) TransformContainerCaster() *TransformContainer { + return s +} diff --git a/typedapi/types/transformdestination.go b/typedapi/types/transformdestination.go index 13e792040e..12ce974e5d 100644 --- a/typedapi/types/transformdestination.go +++ b/typedapi/types/transformdestination.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TransformDestination type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/_types/Transform.ts#L34-L45 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/_types/Transform.ts#L34-L45 type TransformDestination struct { // Index The destination index for the transform. The mappings of the destination // index are deduced based on the source @@ -86,3 +86,13 @@ func NewTransformDestination() *TransformDestination { return r } + +// true + +type TransformDestinationVariant interface { + TransformDestinationCaster() *TransformDestination +} + +func (s *TransformDestination) TransformDestinationCaster() *TransformDestination { + return s +} diff --git a/typedapi/types/transformindexerstats.go b/typedapi/types/transformindexerstats.go index 377bee8df5..2f3259b4d3 100644 --- a/typedapi/types/transformindexerstats.go +++ b/typedapi/types/transformindexerstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TransformIndexerStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/get_transform_stats/types.ts#L56-L74 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/get_transform_stats/types.ts#L56-L74 type TransformIndexerStats struct { DeleteTimeInMs *int64 `json:"delete_time_in_ms,omitempty"` DocumentsDeleted *int64 `json:"documents_deleted,omitempty"` @@ -285,3 +285,5 @@ func NewTransformIndexerStats() *TransformIndexerStats { return r } + +// false diff --git a/typedapi/types/transformprogress.go b/typedapi/types/transformprogress.go index e8e3bd26c7..d51f45d5c1 100644 --- a/typedapi/types/transformprogress.go +++ b/typedapi/types/transformprogress.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TransformProgress type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/get_transform_stats/types.ts#L48-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/get_transform_stats/types.ts#L48-L54 type TransformProgress struct { DocsIndexed int64 `json:"docs_indexed"` DocsProcessed int64 `json:"docs_processed"` @@ -142,3 +142,5 @@ func NewTransformProgress() *TransformProgress { return r } + +// false diff --git a/typedapi/types/transformsource.go b/typedapi/types/transformsource.go index 5a25bf326e..534f804d93 100644 --- a/typedapi/types/transformsource.go +++ b/typedapi/types/transformsource.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TransformSource type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/_types/Transform.ts#L146-L165 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/_types/Transform.ts#L146-L165 type TransformSource struct { // Index The source indices for the transform. It can be a single index, an index // pattern (for example, `"my-index-*""`), an @@ -101,3 +101,13 @@ func NewTransformSource() *TransformSource { return r } + +// true + +type TransformSourceVariant interface { + TransformSourceCaster() *TransformSource +} + +func (s *TransformSource) TransformSourceCaster() *TransformSource { + return s +} diff --git a/typedapi/types/transformsrecord.go b/typedapi/types/transformsrecord.go index cf63437a59..d5334d0c8c 100644 --- a/typedapi/types/transformsrecord.go +++ b/typedapi/types/transformsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TransformsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cat/transforms/types.ts#L22-L197 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cat/transforms/types.ts#L22-L197 type TransformsRecord struct { // ChangesLastDetectionTime The timestamp when changes were last detected in the source indices. ChangesLastDetectionTime *string `json:"changes_last_detection_time,omitempty"` @@ -533,3 +533,5 @@ func NewTransformsRecord() *TransformsRecord { return r } + +// false diff --git a/typedapi/types/transformstats.go b/typedapi/types/transformstats.go index 4dd76795f3..8257064fe3 100644 --- a/typedapi/types/transformstats.go +++ b/typedapi/types/transformstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TransformStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/get_transform_stats/types.ts#L31-L42 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/get_transform_stats/types.ts#L31-L42 type TransformStats struct { Checkpointing Checkpointing `json:"checkpointing"` Health *TransformStatsHealth `json:"health,omitempty"` @@ -117,3 +117,5 @@ func NewTransformStats() *TransformStats { return r } + +// false diff --git a/typedapi/types/transformstatshealth.go b/typedapi/types/transformstatshealth.go index 28b1fa54db..5aa1bfa949 100644 --- a/typedapi/types/transformstatshealth.go +++ b/typedapi/types/transformstatshealth.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // TransformStatsHealth type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/get_transform_stats/types.ts#L44-L46 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/get_transform_stats/types.ts#L44-L46 type TransformStatsHealth struct { Status healthstatus.HealthStatus `json:"status"` } @@ -37,3 +37,5 @@ func NewTransformStatsHealth() *TransformStatsHealth { return r } + +// false diff --git a/typedapi/types/transformsummary.go b/typedapi/types/transformsummary.go index 54506ac8ac..91bed3aa4e 100644 --- a/typedapi/types/transformsummary.go +++ b/typedapi/types/transformsummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TransformSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/transform/get_transform/types.ts#L33-L61 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/transform/get_transform/types.ts#L33-L61 type TransformSummary struct { // Authorization The security privileges that the transform uses to run its queries. If // Elastic Stack security features were disabled at the time of the most recent @@ -164,3 +164,5 @@ func NewTransformSummary() *TransformSummary { return r } + +// false diff --git a/typedapi/types/translog.go b/typedapi/types/translog.go index b891ae61b2..375c78cc2a 100644 --- a/typedapi/types/translog.go +++ b/typedapi/types/translog.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // Translog type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L341-L363 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L355-L377 type Translog struct { // Durability Whether or not to `fsync` and commit the translog after every index, delete, // update, or bulk request. @@ -101,3 +101,13 @@ func NewTranslog() *Translog { return r } + +// true + +type TranslogVariant interface { + TranslogCaster() *Translog +} + +func (s *Translog) TranslogCaster() *Translog { + return s +} diff --git a/typedapi/types/translogretention.go b/typedapi/types/translogretention.go index bdb7121bc3..504df959c2 100644 --- a/typedapi/types/translogretention.go +++ b/typedapi/types/translogretention.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TranslogRetention type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/_types/IndexSettings.ts#L382-L401 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/_types/IndexSettings.ts#L396-L415 type TranslogRetention struct { // Age This controls the maximum duration for which translog files are kept by each // shard. Keeping more @@ -90,3 +90,13 @@ func NewTranslogRetention() *TranslogRetention { return r } + +// true + +type TranslogRetentionVariant interface { + TranslogRetentionCaster() *TranslogRetention +} + +func (s *TranslogRetention) TranslogRetentionCaster() *TranslogRetention { + return s +} diff --git a/typedapi/types/translogstats.go b/typedapi/types/translogstats.go index 866bb7c0c0..bec60f0ed4 100644 --- a/typedapi/types/translogstats.go +++ b/typedapi/types/translogstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TranslogStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L397-L405 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L400-L408 type TranslogStats struct { EarliestLastModifiedAge int64 `json:"earliest_last_modified_age"` Operations int64 `json:"operations"` @@ -168,3 +168,5 @@ func NewTranslogStats() *TranslogStats { return r } + +// false diff --git a/typedapi/types/translogstatus.go b/typedapi/types/translogstatus.go index 5603871a09..4fd3b22bea 100644 --- a/typedapi/types/translogstatus.go +++ b/typedapi/types/translogstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TranslogStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/recovery/types.ts#L102-L109 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/recovery/types.ts#L102-L109 type TranslogStatus struct { Percent Percentage `json:"percent"` Recovered int64 `json:"recovered"` @@ -127,3 +127,5 @@ func NewTranslogStatus() *TranslogStatus { return r } + +// false diff --git a/typedapi/types/transport.go b/typedapi/types/transport.go index 250fa10703..ab5320bb85 100644 --- a/typedapi/types/transport.go +++ b/typedapi/types/transport.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Transport type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L1118-L1161 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L1118-L1161 type Transport struct { // InboundHandlingTimeHistogram The distribution of the time spent handling each inbound message on a // transport thread, represented as a histogram. @@ -219,3 +219,5 @@ func NewTransport() *Transport { return r } + +// false diff --git a/typedapi/types/transporthistogram.go b/typedapi/types/transporthistogram.go index f5663b2f0e..959f22e4fe 100644 --- a/typedapi/types/transporthistogram.go +++ b/typedapi/types/transporthistogram.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TransportHistogram type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/nodes/_types/Stats.ts#L1163-L1177 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/nodes/_types/Stats.ts#L1163-L1177 type TransportHistogram struct { // Count The number of times a transport thread took a period of time within the // bounds of this bucket to handle an inbound message. @@ -115,3 +115,5 @@ func NewTransportHistogram() *TransportHistogram { return r } + +// false diff --git a/typedapi/types/triggercontainer.go b/typedapi/types/triggercontainer.go index 8a306effc9..485b1f0807 100644 --- a/typedapi/types/triggercontainer.go +++ b/typedapi/types/triggercontainer.go @@ -16,20 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // TriggerContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Trigger.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Trigger.ts#L23-L28 type TriggerContainer struct { - Schedule *ScheduleContainer `json:"schedule,omitempty"` + AdditionalTriggerContainerProperty map[string]json.RawMessage `json:"-"` + Schedule *ScheduleContainer `json:"schedule,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s TriggerContainer) MarshalJSON() ([]byte, error) { + type opt TriggerContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalTriggerContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalTriggerContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewTriggerContainer returns a TriggerContainer. func NewTriggerContainer() *TriggerContainer { - r := &TriggerContainer{} + r := &TriggerContainer{ + AdditionalTriggerContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type TriggerContainerVariant interface { + TriggerContainerCaster() *TriggerContainer +} + +func (s *TriggerContainer) TriggerContainerCaster() *TriggerContainer { + return s +} diff --git a/typedapi/types/triggereventcontainer.go b/typedapi/types/triggereventcontainer.go index ba3308f9f3..d8284f5e9c 100644 --- a/typedapi/types/triggereventcontainer.go +++ b/typedapi/types/triggereventcontainer.go @@ -16,20 +16,59 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types +import ( + "encoding/json" + "fmt" +) + // TriggerEventContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Trigger.ts#L32-L37 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Trigger.ts#L32-L37 type TriggerEventContainer struct { - Schedule *ScheduleTriggerEvent `json:"schedule,omitempty"` + AdditionalTriggerEventContainerProperty map[string]json.RawMessage `json:"-"` + Schedule *ScheduleTriggerEvent `json:"schedule,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s TriggerEventContainer) MarshalJSON() ([]byte, error) { + type opt TriggerEventContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalTriggerEventContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalTriggerEventContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewTriggerEventContainer returns a TriggerEventContainer. func NewTriggerEventContainer() *TriggerEventContainer { - r := &TriggerEventContainer{} + r := &TriggerEventContainer{ + AdditionalTriggerEventContainerProperty: make(map[string]json.RawMessage), + } return r } + +// false diff --git a/typedapi/types/triggereventresult.go b/typedapi/types/triggereventresult.go index f0f88fce25..b0a707a6fd 100644 --- a/typedapi/types/triggereventresult.go +++ b/typedapi/types/triggereventresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TriggerEventResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Trigger.ts#L39-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Trigger.ts#L39-L43 type TriggerEventResult struct { Manual TriggerEventContainer `json:"manual"` TriggeredTime DateTime `json:"triggered_time"` @@ -86,3 +86,5 @@ func NewTriggerEventResult() *TriggerEventResult { return r } + +// false diff --git a/typedapi/types/trimprocessor.go b/typedapi/types/trimprocessor.go index df882516ec..2ac03c1854 100644 --- a/typedapi/types/trimprocessor.go +++ b/typedapi/types/trimprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TrimProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1485-L1501 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1526-L1542 type TrimProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type TrimProcessor struct { // Field The string-valued field to trim whitespace from. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without @@ -88,16 +88,9 @@ func (s *TrimProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -160,3 +153,13 @@ func NewTrimProcessor() *TrimProcessor { return r } + +// true + +type TrimProcessorVariant interface { + TrimProcessorCaster() *TrimProcessor +} + +func (s *TrimProcessor) TrimProcessorCaster() *TrimProcessor { + return s +} diff --git a/typedapi/types/trimtokenfilter.go b/typedapi/types/trimtokenfilter.go index 67adac70a8..abbdcf5238 100644 --- a/typedapi/types/trimtokenfilter.go +++ b/typedapi/types/trimtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // TrimTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L327-L329 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L327-L329 type TrimTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewTrimTokenFilter() *TrimTokenFilter { return r } + +// true + +type TrimTokenFilterVariant interface { + TrimTokenFilterCaster() *TrimTokenFilter +} + +func (s *TrimTokenFilter) TrimTokenFilterCaster() *TrimTokenFilter { + return s +} diff --git a/typedapi/types/truncatetokenfilter.go b/typedapi/types/truncatetokenfilter.go index 5d18701a18..060e9e399b 100644 --- a/typedapi/types/truncatetokenfilter.go +++ b/typedapi/types/truncatetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TruncateTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L331-L334 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L331-L334 type TruncateTokenFilter struct { Length *int `json:"length,omitempty"` Type string `json:"type,omitempty"` @@ -104,3 +104,13 @@ func NewTruncateTokenFilter() *TruncateTokenFilter { return r } + +// true + +type TruncateTokenFilterVariant interface { + TruncateTokenFilterCaster() *TruncateTokenFilter +} + +func (s *TruncateTokenFilter) TruncateTokenFilterCaster() *TruncateTokenFilter { + return s +} diff --git a/typedapi/types/ttestaggregate.go b/typedapi/types/ttestaggregate.go index eca41887cf..21d033e5a0 100644 --- a/typedapi/types/ttestaggregate.go +++ b/typedapi/types/ttestaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TTestAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L838-L845 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L838-L845 type TTestAggregate struct { Meta Metadata `json:"meta,omitempty"` Value *Float64 `json:"value,omitempty"` @@ -86,3 +86,5 @@ func NewTTestAggregate() *TTestAggregate { return r } + +// false diff --git a/typedapi/types/ttestaggregation.go b/typedapi/types/ttestaggregation.go index 815da80ca1..3739df844a 100644 --- a/typedapi/types/ttestaggregation.go +++ b/typedapi/types/ttestaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -26,7 +26,7 @@ import ( // TTestAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L303-L317 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L303-L317 type TTestAggregation struct { // A Test population A. A *TestPopulation `json:"a,omitempty"` @@ -42,3 +42,13 @@ func NewTTestAggregation() *TTestAggregation { return r } + +// true + +type TTestAggregationVariant interface { + TTestAggregationCaster() *TTestAggregation +} + +func (s *TTestAggregation) TTestAggregationCaster() *TTestAggregation { + return s +} diff --git a/typedapi/types/turkishanalyzer.go b/typedapi/types/turkishanalyzer.go index 2658387db8..9bd606a47a 100644 --- a/typedapi/types/turkishanalyzer.go +++ b/typedapi/types/turkishanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TurkishAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L299-L304 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L310-L315 type TurkishAnalyzer struct { StemExclusion []string `json:"stem_exclusion,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -118,3 +118,13 @@ func NewTurkishAnalyzer() *TurkishAnalyzer { return r } + +// true + +type TurkishAnalyzerVariant interface { + TurkishAnalyzerCaster() *TurkishAnalyzer +} + +func (s *TurkishAnalyzer) TurkishAnalyzerCaster() *TurkishAnalyzer { + return s +} diff --git a/typedapi/types/typefieldmappings.go b/typedapi/types/typefieldmappings.go index bf0634f186..48c1d0814b 100644 --- a/typedapi/types/typefieldmappings.go +++ b/typedapi/types/typefieldmappings.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // TypeFieldMappings type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/get_field_mapping/types.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/get_field_mapping/types.ts#L24-L26 type TypeFieldMappings struct { Mappings map[string]FieldMapping `json:"mappings"` } @@ -30,8 +30,10 @@ type TypeFieldMappings struct { // NewTypeFieldMappings returns a TypeFieldMappings. func NewTypeFieldMappings() *TypeFieldMappings { r := &TypeFieldMappings{ - Mappings: make(map[string]FieldMapping, 0), + Mappings: make(map[string]FieldMapping), } return r } + +// false diff --git a/typedapi/types/typemapping.go b/typedapi/types/typemapping.go index b1b3d8c0ee..53023e21c0 100644 --- a/typedapi/types/typemapping.go +++ b/typedapi/types/typemapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/subobjects" ) // TypeMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/TypeMapping.ts#L34-L57 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/TypeMapping.ts#L34-L57 type TypeMapping struct { AllField *AllField `json:"all_field,omitempty"` DataStreamTimestamp_ *DataStreamTimestamp `json:"_data_stream_timestamp,omitempty"` @@ -51,7 +52,7 @@ type TypeMapping struct { Runtime map[string]RuntimeField `json:"runtime,omitempty"` Size_ *SizeField `json:"_size,omitempty"` Source_ *SourceField `json:"_source,omitempty"` - Subobjects *bool `json:"subobjects,omitempty"` + Subobjects *subobjects.Subobjects `json:"subobjects,omitempty"` } func (s *TypeMapping) UnmarshalJSON(data []byte) error { @@ -170,301 +171,313 @@ func (s *TypeMapping) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -494,17 +507,8 @@ func (s *TypeMapping) UnmarshalJSON(data []byte) error { } case "subobjects": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "Subobjects", err) - } - s.Subobjects = &value - case bool: - s.Subobjects = &v + if err := dec.Decode(&s.Subobjects); err != nil { + return fmt.Errorf("%s | %w", "Subobjects", err) } } @@ -515,9 +519,19 @@ func (s *TypeMapping) UnmarshalJSON(data []byte) error { // NewTypeMapping returns a TypeMapping. func NewTypeMapping() *TypeMapping { r := &TypeMapping{ - Properties: make(map[string]Property, 0), - Runtime: make(map[string]RuntimeField, 0), + Properties: make(map[string]Property), + Runtime: make(map[string]RuntimeField), } return r } + +// true + +type TypeMappingVariant interface { + TypeMappingCaster() *TypeMapping +} + +func (s *TypeMapping) TypeMappingCaster() *TypeMapping { + return s +} diff --git a/typedapi/types/typequery.go b/typedapi/types/typequery.go index 326ff6ae24..e9bb3e7ec2 100644 --- a/typedapi/types/typequery.go +++ b/typedapi/types/typequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // TypeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L301-L303 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L301-L303 type TypeQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -109,3 +109,13 @@ func NewTypeQuery() *TypeQuery { return r } + +// true + +type TypeQueryVariant interface { + TypeQueryCaster() *TypeQuery +} + +func (s *TypeQuery) TypeQueryCaster() *TypeQuery { + return s +} diff --git a/typedapi/types/uaxemailurltokenizer.go b/typedapi/types/uaxemailurltokenizer.go index a590cef0cf..42439ebff9 100644 --- a/typedapi/types/uaxemailurltokenizer.go +++ b/typedapi/types/uaxemailurltokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // UaxEmailUrlTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L130-L133 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L130-L133 type UaxEmailUrlTokenizer struct { MaxTokenLength *int `json:"max_token_length,omitempty"` Type string `json:"type,omitempty"` @@ -104,3 +104,13 @@ func NewUaxEmailUrlTokenizer() *UaxEmailUrlTokenizer { return r } + +// true + +type UaxEmailUrlTokenizerVariant interface { + UaxEmailUrlTokenizerCaster() *UaxEmailUrlTokenizer +} + +func (s *UaxEmailUrlTokenizer) UaxEmailUrlTokenizerCaster() *UaxEmailUrlTokenizer { + return s +} diff --git a/typedapi/types/unassignedinformation.go b/typedapi/types/unassignedinformation.go index f39fe8a280..da6fb6fd4c 100644 --- a/typedapi/types/unassignedinformation.go +++ b/typedapi/types/unassignedinformation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // UnassignedInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/cluster/allocation_explain/types.ts#L128-L136 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/cluster/allocation_explain/types.ts#L128-L136 type UnassignedInformation struct { AllocationStatus *string `json:"allocation_status,omitempty"` At DateTime `json:"at"` @@ -146,3 +146,5 @@ func NewUnassignedInformation() *UnassignedInformation { return r } + +// false diff --git a/typedapi/types/uniquetokenfilter.go b/typedapi/types/uniquetokenfilter.go index c7fd37465a..0cce483d98 100644 --- a/typedapi/types/uniquetokenfilter.go +++ b/typedapi/types/uniquetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // UniqueTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L336-L339 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L336-L339 type UniqueTokenFilter struct { OnlyOnSamePosition *bool `json:"only_on_same_position,omitempty"` Type string `json:"type,omitempty"` @@ -102,3 +102,13 @@ func NewUniqueTokenFilter() *UniqueTokenFilter { return r } + +// true + +type UniqueTokenFilterVariant interface { + UniqueTokenFilterCaster() *UniqueTokenFilter +} + +func (s *UniqueTokenFilter) UniqueTokenFilterCaster() *UniqueTokenFilter { + return s +} diff --git a/typedapi/types/unmappedraretermsaggregate.go b/typedapi/types/unmappedraretermsaggregate.go index 3b9a465f99..e3879f1084 100644 --- a/typedapi/types/unmappedraretermsaggregate.go +++ b/typedapi/types/unmappedraretermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // UnmappedRareTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L493-L499 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L493-L499 type UnmappedRareTermsAggregate struct { Buckets BucketsVoid `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewUnmappedRareTermsAggregate() *UnmappedRareTermsAggregate { return r } + +// false diff --git a/typedapi/types/unmappedsampleraggregate.go b/typedapi/types/unmappedsampleraggregate.go index 22161f43d7..237b60a643 100644 --- a/typedapi/types/unmappedsampleraggregate.go +++ b/typedapi/types/unmappedsampleraggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // UnmappedSamplerAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L561-L562 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L561-L562 type UnmappedSamplerAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -631,8 +631,10 @@ func (s UnmappedSamplerAggregate) MarshalJSON() ([]byte, error) { // NewUnmappedSamplerAggregate returns a UnmappedSamplerAggregate. func NewUnmappedSamplerAggregate() *UnmappedSamplerAggregate { r := &UnmappedSamplerAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/unmappedsignificanttermsaggregate.go b/typedapi/types/unmappedsignificanttermsaggregate.go index 03c1c65184..a92e6cf9a2 100644 --- a/typedapi/types/unmappedsignificanttermsaggregate.go +++ b/typedapi/types/unmappedsignificanttermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // UnmappedSignificantTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L690-L696 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L690-L696 type UnmappedSignificantTermsAggregate struct { BgCount *int64 `json:"bg_count,omitempty"` Buckets BucketsVoid `json:"buckets"` @@ -121,3 +121,5 @@ func NewUnmappedSignificantTermsAggregate() *UnmappedSignificantTermsAggregate { return r } + +// false diff --git a/typedapi/types/unmappedtermsaggregate.go b/typedapi/types/unmappedtermsaggregate.go index 4c4c77a8b4..76fc756776 100644 --- a/typedapi/types/unmappedtermsaggregate.go +++ b/typedapi/types/unmappedtermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // UnmappedTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L463-L469 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L463-L469 type UnmappedTermsAggregate struct { Buckets BucketsVoid `json:"buckets"` DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` @@ -121,3 +121,5 @@ func NewUnmappedTermsAggregate() *UnmappedTermsAggregate { return r } + +// false diff --git a/typedapi/types/unrateddocument.go b/typedapi/types/unrateddocument.go index f4e3e78edf..c93c4ef7f4 100644 --- a/typedapi/types/unrateddocument.go +++ b/typedapi/types/unrateddocument.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // UnratedDocument type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/rank_eval/types.ts#L150-L153 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/rank_eval/types.ts#L150-L153 type UnratedDocument struct { Id_ string `json:"_id"` Index_ string `json:"_index"` @@ -72,3 +72,5 @@ func NewUnratedDocument() *UnratedDocument { return r } + +// false diff --git a/typedapi/types/unsignedlongnumberproperty.go b/typedapi/types/unsignedlongnumberproperty.go index 83192146c5..a380a3bfa0 100644 --- a/typedapi/types/unsignedlongnumberproperty.go +++ b/typedapi/types/unsignedlongnumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // UnsignedLongNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L177-L180 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L181-L184 type UnsignedLongNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,12 +48,13 @@ type UnsignedLongNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *uint64 `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *uint64 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -161,301 +163,313 @@ func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -542,301 +556,313 @@ func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -861,6 +887,11 @@ func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -909,6 +940,7 @@ func (s UnsignedLongNumberProperty) MarshalJSON() ([]byte, error) { Properties: s.Properties, Script: s.Script, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -922,10 +954,20 @@ func (s UnsignedLongNumberProperty) MarshalJSON() ([]byte, error) { // NewUnsignedLongNumberProperty returns a UnsignedLongNumberProperty. func NewUnsignedLongNumberProperty() *UnsignedLongNumberProperty { r := &UnsignedLongNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type UnsignedLongNumberPropertyVariant interface { + UnsignedLongNumberPropertyCaster() *UnsignedLongNumberProperty +} + +func (s *UnsignedLongNumberProperty) UnsignedLongNumberPropertyCaster() *UnsignedLongNumberProperty { + return s +} diff --git a/typedapi/types/untypeddecayfunction.go b/typedapi/types/untypeddecayfunction.go index 787169f95e..53bc9b3a7f 100644 --- a/typedapi/types/untypeddecayfunction.go +++ b/typedapi/types/untypeddecayfunction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,7 +29,7 @@ import ( // UntypedDecayFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/compound.ts#L204-L207 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/compound.ts#L204-L207 type UntypedDecayFunction struct { DecayFunctionBase map[string]DecayPlacement `json:"-"` // MultiValueMode Determines how the distance is calculated when a field used for computing the @@ -69,8 +69,18 @@ func (s UntypedDecayFunction) MarshalJSON() ([]byte, error) { // NewUntypedDecayFunction returns a UntypedDecayFunction. func NewUntypedDecayFunction() *UntypedDecayFunction { r := &UntypedDecayFunction{ - DecayFunctionBase: make(map[string]DecayPlacement, 0), + DecayFunctionBase: make(map[string]DecayPlacement), } return r } + +// true + +type UntypedDecayFunctionVariant interface { + UntypedDecayFunctionCaster() *UntypedDecayFunction +} + +func (s *UntypedDecayFunction) UntypedDecayFunctionCaster() *UntypedDecayFunction { + return s +} diff --git a/typedapi/types/untypeddistancefeaturequery.go b/typedapi/types/untypeddistancefeaturequery.go index 03289be3ba..5af97435e7 100644 --- a/typedapi/types/untypeddistancefeaturequery.go +++ b/typedapi/types/untypeddistancefeaturequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // UntypedDistanceFeatureQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/specialized.ts#L61-L64 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/specialized.ts#L61-L64 type UntypedDistanceFeatureQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -131,3 +131,13 @@ func NewUntypedDistanceFeatureQuery() *UntypedDistanceFeatureQuery { return r } + +// true + +type UntypedDistanceFeatureQueryVariant interface { + UntypedDistanceFeatureQueryCaster() *UntypedDistanceFeatureQuery +} + +func (s *UntypedDistanceFeatureQuery) UntypedDistanceFeatureQueryCaster() *UntypedDistanceFeatureQuery { + return s +} diff --git a/typedapi/types/untypedrangequery.go b/typedapi/types/untypedrangequery.go index 6a9fa50287..71baccc469 100644 --- a/typedapi/types/untypedrangequery.go +++ b/typedapi/types/untypedrangequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // UntypedRangeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L150-L159 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L150-L159 type UntypedRangeQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -160,3 +160,13 @@ func NewUntypedRangeQuery() *UntypedRangeQuery { return r } + +// true + +type UntypedRangeQueryVariant interface { + UntypedRangeQueryCaster() *UntypedRangeQuery +} + +func (s *UntypedRangeQuery) UntypedRangeQueryCaster() *UntypedRangeQuery { + return s +} diff --git a/typedapi/types/updateaction.go b/typedapi/types/updateaction.go index da26f61e61..6dc7a12b6f 100644 --- a/typedapi/types/updateaction.go +++ b/typedapi/types/updateaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,26 +31,26 @@ import ( // UpdateAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/bulk/types.ts#L169-L205 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/bulk/types.ts#L182-L217 type UpdateAction struct { - // DetectNoop Set to false to disable setting 'result' in the response - // to 'noop' if no change to the document occurred. + // DetectNoop If true, the `result` in the response is set to 'noop' when no changes to the + // document occur. DetectNoop *bool `json:"detect_noop,omitempty"` // Doc A partial update to an existing document. Doc json.RawMessage `json:"doc,omitempty"` - // DocAsUpsert Set to true to use the contents of 'doc' as the value of 'upsert' + // DocAsUpsert Set to `true` to use the contents of `doc` as the value of `upsert`. DocAsUpsert *bool `json:"doc_as_upsert,omitempty"` - // Script Script to execute to update the document. + // Script The script to run to update the document. Script *Script `json:"script,omitempty"` - // ScriptedUpsert Set to true to execute the script whether or not the document exists. + // ScriptedUpsert Set to `true` to run the script whether or not the document exists. ScriptedUpsert *bool `json:"scripted_upsert,omitempty"` - // Source_ Set to false to disable source retrieval. You can also specify a - // comma-separated - // list of the fields you want to retrieve. + // Source_ If `false`, source retrieval is turned off. + // You can also specify a comma-separated list of the fields you want to + // retrieve. Source_ SourceConfig `json:"_source,omitempty"` - // Upsert If the document does not already exist, the contents of 'upsert' are inserted - // as a - // new document. If the document exists, the 'script' is executed. + // Upsert If the document does not already exist, the contents of `upsert` are inserted + // as a new document. + // If the document exists, the `script` is run. Upsert json.RawMessage `json:"upsert,omitempty"` } @@ -173,3 +173,13 @@ func NewUpdateAction() *UpdateAction { return r } + +// true + +type UpdateActionVariant interface { + UpdateActionCaster() *UpdateAction +} + +func (s *UpdateAction) UpdateActionCaster() *UpdateAction { + return s +} diff --git a/typedapi/types/updatebyqueryrethrottlenode.go b/typedapi/types/updatebyqueryrethrottlenode.go index c5f1451c17..fced4d22f4 100644 --- a/typedapi/types/updatebyqueryrethrottlenode.go +++ b/typedapi/types/updatebyqueryrethrottlenode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // UpdateByQueryRethrottleNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/update_by_query_rethrottle/UpdateByQueryRethrottleNode.ts#L25-L27 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/update_by_query_rethrottle/UpdateByQueryRethrottleNode.ts#L25-L27 type UpdateByQueryRethrottleNode struct { Attributes map[string]string `json:"attributes"` Host string `json:"host"` @@ -107,9 +107,11 @@ func (s *UpdateByQueryRethrottleNode) UnmarshalJSON(data []byte) error { // NewUpdateByQueryRethrottleNode returns a UpdateByQueryRethrottleNode. func NewUpdateByQueryRethrottleNode() *UpdateByQueryRethrottleNode { r := &UpdateByQueryRethrottleNode{ - Attributes: make(map[string]string, 0), - Tasks: make(map[string]TaskInfo, 0), + Attributes: make(map[string]string), + Tasks: make(map[string]TaskInfo), } return r } + +// false diff --git a/typedapi/types/updateoperation.go b/typedapi/types/updateoperation.go index 0bf72f3b25..3acd69b78b 100644 --- a/typedapi/types/updateoperation.go +++ b/typedapi/types/updateoperation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,18 +33,20 @@ import ( // UpdateOperation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/bulk/types.ts#L136-L143 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_global/bulk/types.ts#L146-L156 type UpdateOperation struct { // Id_ The document ID. Id_ *string `json:"_id,omitempty"` IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` IfSeqNo *int64 `json:"if_seq_no,omitempty"` - // Index_ Name of the index or index alias to perform the action on. + // Index_ The name of the index or index alias to perform the action on. Index_ *string `json:"_index,omitempty"` - // RequireAlias If `true`, the request’s actions must target an index alias. - RequireAlias *bool `json:"require_alias,omitempty"` - RetryOnConflict *int `json:"retry_on_conflict,omitempty"` - // Routing Custom value used to route operations to a specific shard. + // RequireAlias If `true`, the request's actions must target an index alias. + RequireAlias *bool `json:"require_alias,omitempty"` + // RetryOnConflict The number of times an update should be retried in the case of a version + // conflict. + RetryOnConflict *int `json:"retry_on_conflict,omitempty"` + // Routing A custom value used to route operations to a specific shard. Routing *string `json:"routing,omitempty"` Version *int64 `json:"version,omitempty"` VersionType *versiontype.VersionType `json:"version_type,omitempty"` @@ -151,3 +153,13 @@ func NewUpdateOperation() *UpdateOperation { return r } + +// true + +type UpdateOperationVariant interface { + UpdateOperationCaster() *UpdateOperation +} + +func (s *UpdateOperation) UpdateOperationCaster() *UpdateOperation { + return s +} diff --git a/typedapi/types/uppercaseprocessor.go b/typedapi/types/uppercaseprocessor.go index 4bd12de1ab..b1917a7538 100644 --- a/typedapi/types/uppercaseprocessor.go +++ b/typedapi/types/uppercaseprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // UppercaseProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1503-L1519 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1544-L1560 type UppercaseProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type UppercaseProcessor struct { // Field The field to make uppercase. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly @@ -88,16 +88,9 @@ func (s *UppercaseProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -160,3 +153,13 @@ func NewUppercaseProcessor() *UppercaseProcessor { return r } + +// true + +type UppercaseProcessorVariant interface { + UppercaseProcessorCaster() *UppercaseProcessor +} + +func (s *UppercaseProcessor) UppercaseProcessorCaster() *UppercaseProcessor { + return s +} diff --git a/typedapi/types/uppercasetokenfilter.go b/typedapi/types/uppercasetokenfilter.go index 53b2734d9a..23787ab59a 100644 --- a/typedapi/types/uppercasetokenfilter.go +++ b/typedapi/types/uppercasetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // UppercaseTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L341-L343 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L341-L343 type UppercaseTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewUppercaseTokenFilter() *UppercaseTokenFilter { return r } + +// true + +type UppercaseTokenFilterVariant interface { + UppercaseTokenFilterCaster() *UppercaseTokenFilter +} + +func (s *UppercaseTokenFilter) UppercaseTokenFilterCaster() *UppercaseTokenFilter { + return s +} diff --git a/typedapi/types/uripartsprocessor.go b/typedapi/types/uripartsprocessor.go index a2feb4f492..55d4610b09 100644 --- a/typedapi/types/uripartsprocessor.go +++ b/typedapi/types/uripartsprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // UriPartsProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1539-L1565 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1580-L1606 type UriPartsProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type UriPartsProcessor struct { // Field Field containing the URI string. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without @@ -93,16 +93,9 @@ func (s *UriPartsProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -193,3 +186,13 @@ func NewUriPartsProcessor() *UriPartsProcessor { return r } + +// true + +type UriPartsProcessorVariant interface { + UriPartsProcessorCaster() *UriPartsProcessor +} + +func (s *UriPartsProcessor) UriPartsProcessorCaster() *UriPartsProcessor { + return s +} diff --git a/typedapi/types/urldecodeprocessor.go b/typedapi/types/urldecodeprocessor.go index 86c736b5bc..e06ae7272c 100644 --- a/typedapi/types/urldecodeprocessor.go +++ b/typedapi/types/urldecodeprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // UrlDecodeProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L1521-L1537 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L1562-L1578 type UrlDecodeProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -39,7 +39,7 @@ type UrlDecodeProcessor struct { // Field The field to decode. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly @@ -88,16 +88,9 @@ func (s *UrlDecodeProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -160,3 +153,13 @@ func NewUrlDecodeProcessor() *UrlDecodeProcessor { return r } + +// true + +type UrlDecodeProcessorVariant interface { + UrlDecodeProcessorCaster() *UrlDecodeProcessor +} + +func (s *UrlDecodeProcessor) UrlDecodeProcessorCaster() *UrlDecodeProcessor { + return s +} diff --git a/typedapi/types/usagephase.go b/typedapi/types/usagephase.go new file mode 100644 index 0000000000..95f1df9271 --- /dev/null +++ b/typedapi/types/usagephase.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// UsagePhase type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L154-L157 +type UsagePhase struct { + Actions []string `json:"actions"` + MinAge int64 `json:"min_age"` +} + +func (s *UsagePhase) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return fmt.Errorf("%s | %w", "Actions", err) + } + + case "min_age": + if err := dec.Decode(&s.MinAge); err != nil { + return fmt.Errorf("%s | %w", "MinAge", err) + } + + } + } + return nil +} + +// NewUsagePhase returns a UsagePhase. +func NewUsagePhase() *UsagePhase { + r := &UsagePhase{} + + return r +} + +// false diff --git a/typedapi/types/usagephases.go b/typedapi/types/usagephases.go new file mode 100644 index 0000000000..cd80a03881 --- /dev/null +++ b/typedapi/types/usagephases.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +// UsagePhases type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L159-L165 +type UsagePhases struct { + Cold *UsagePhase `json:"cold,omitempty"` + Delete *UsagePhase `json:"delete,omitempty"` + Frozen *UsagePhase `json:"frozen,omitempty"` + Hot *UsagePhase `json:"hot,omitempty"` + Warm *UsagePhase `json:"warm,omitempty"` +} + +// NewUsagePhases returns a UsagePhases. +func NewUsagePhases() *UsagePhases { + r := &UsagePhases{} + + return r +} + +// false diff --git a/typedapi/types/usagestatsindex.go b/typedapi/types/usagestatsindex.go index 997d5fe775..b1405b4e95 100644 --- a/typedapi/types/usagestatsindex.go +++ b/typedapi/types/usagestatsindex.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // UsageStatsIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L41-L43 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L41-L43 type UsageStatsIndex struct { Shards []UsageStatsShards `json:"shards"` } @@ -33,3 +33,5 @@ func NewUsageStatsIndex() *UsageStatsIndex { return r } + +// false diff --git a/typedapi/types/usagestatsshards.go b/typedapi/types/usagestatsshards.go index 6bf9a0affb..843900afbe 100644 --- a/typedapi/types/usagestatsshards.go +++ b/typedapi/types/usagestatsshards.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // UsageStatsShards type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L45-L50 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L45-L50 type UsageStatsShards struct { Routing ShardRouting `json:"routing"` Stats IndicesShardsStats `json:"stats"` @@ -92,3 +92,5 @@ func NewUsageStatsShards() *UsageStatsShards { return r } + +// false diff --git a/typedapi/types/user.go b/typedapi/types/user.go index bf65f55f72..3b89068285 100644 --- a/typedapi/types/user.go +++ b/typedapi/types/user.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // User type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/User.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/User.ts#L23-L31 type User struct { Email *string `json:"email,omitempty"` Enabled bool `json:"enabled"` @@ -119,3 +119,5 @@ func NewUser() *User { return r } + +// false diff --git a/typedapi/types/useragentprocessor.go b/typedapi/types/useragentprocessor.go index 84d70b50d8..6b685b874e 100644 --- a/typedapi/types/useragentprocessor.go +++ b/typedapi/types/useragentprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // UserAgentProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ingest/_types/Processors.ts#L474-L505 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Processors.ts#L515-L546 type UserAgentProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -43,7 +43,7 @@ type UserAgentProcessor struct { // Field The field containing the user agent string. Field string `json:"field"` // If Conditionally execute the processor. - If *string `json:"if,omitempty"` + If *Script `json:"if,omitempty"` // IgnoreFailure Ignore failures for the processor. IgnoreFailure *bool `json:"ignore_failure,omitempty"` // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without @@ -113,16 +113,9 @@ func (s *UserAgentProcessor) UnmarshalJSON(data []byte) error { } case "if": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.If); err != nil { return fmt.Errorf("%s | %w", "If", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.If = &o case "ignore_failure": var tmp any @@ -202,3 +195,13 @@ func NewUserAgentProcessor() *UserAgentProcessor { return r } + +// true + +type UserAgentProcessorVariant interface { + UserAgentProcessorCaster() *UserAgentProcessor +} + +func (s *UserAgentProcessor) UserAgentProcessorCaster() *UserAgentProcessor { + return s +} diff --git a/typedapi/types/userindicesprivileges.go b/typedapi/types/userindicesprivileges.go index 770a1e21a1..a7fcb65865 100644 --- a/typedapi/types/userindicesprivileges.go +++ b/typedapi/types/userindicesprivileges.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // UserIndicesPrivileges type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/Privileges.ts#L255-L277 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/Privileges.ts#L292-L314 type UserIndicesPrivileges struct { // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that // cover restricted indices. Implicitly, restricted indices have limited @@ -91,8 +91,19 @@ func (s *UserIndicesPrivileges) UnmarshalJSON(data []byte) error { } case "names": - if err := dec.Decode(&s.Names); err != nil { - return fmt.Errorf("%s | %w", "Names", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } } case "privileges": @@ -119,7 +130,7 @@ func (s *UserIndicesPrivileges) UnmarshalJSON(data []byte) error { switch t { - case "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": o := NewQuery() localDec := json.NewDecoder(bytes.NewReader(message)) if err := localDec.Decode(&o); err != nil { @@ -159,3 +170,5 @@ func NewUserIndicesPrivileges() *UserIndicesPrivileges { return r } + +// false diff --git a/typedapi/types/userprofile.go b/typedapi/types/userprofile.go index a0dd27d5ca..3f20baeb20 100644 --- a/typedapi/types/userprofile.go +++ b/typedapi/types/userprofile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // UserProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/UserProfile.ts#L41-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/UserProfile.ts#L41-L47 type UserProfile struct { Data map[string]json.RawMessage `json:"data"` Enabled *bool `json:"enabled,omitempty"` @@ -103,9 +103,11 @@ func (s *UserProfile) UnmarshalJSON(data []byte) error { // NewUserProfile returns a UserProfile. func NewUserProfile() *UserProfile { r := &UserProfile{ - Data: make(map[string]json.RawMessage, 0), - Labels: make(map[string]json.RawMessage, 0), + Data: make(map[string]json.RawMessage), + Labels: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/userprofilehitmetadata.go b/typedapi/types/userprofilehitmetadata.go index ce510a6af8..8fec6c6066 100644 --- a/typedapi/types/userprofilehitmetadata.go +++ b/typedapi/types/userprofilehitmetadata.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // UserProfileHitMetadata type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/UserProfile.ts#L27-L30 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/UserProfile.ts#L27-L30 type UserProfileHitMetadata struct { PrimaryTerm_ int64 `json:"_primary_term"` SeqNo_ int64 `json:"_seq_no"` @@ -83,3 +83,5 @@ func NewUserProfileHitMetadata() *UserProfileHitMetadata { return r } + +// false diff --git a/typedapi/types/userprofileuser.go b/typedapi/types/userprofileuser.go index 0d1be8141d..d69fb31ee0 100644 --- a/typedapi/types/userprofileuser.go +++ b/typedapi/types/userprofileuser.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // UserProfileUser type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/UserProfile.ts#L32-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/UserProfile.ts#L32-L39 type UserProfileUser struct { Email *string `json:"email,omitempty"` FullName *string `json:"full_name,omitempty"` @@ -104,3 +104,5 @@ func NewUserProfileUser() *UserProfileUser { return r } + +// false diff --git a/typedapi/types/userprofilewithmetadata.go b/typedapi/types/userprofilewithmetadata.go index 80288ffc8b..027687344a 100644 --- a/typedapi/types/userprofilewithmetadata.go +++ b/typedapi/types/userprofilewithmetadata.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // UserProfileWithMetadata type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/_types/UserProfile.ts#L49-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/_types/UserProfile.ts#L49-L52 type UserProfileWithMetadata struct { Data map[string]json.RawMessage `json:"data"` Doc_ UserProfileHitMetadata `json:"_doc"` @@ -125,9 +125,11 @@ func (s *UserProfileWithMetadata) UnmarshalJSON(data []byte) error { // NewUserProfileWithMetadata returns a UserProfileWithMetadata. func NewUserProfileWithMetadata() *UserProfileWithMetadata { r := &UserProfileWithMetadata{ - Data: make(map[string]json.RawMessage, 0), - Labels: make(map[string]json.RawMessage, 0), + Data: make(map[string]json.RawMessage), + Labels: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/userquerycontainer.go b/typedapi/types/userquerycontainer.go index f2d8ca9047..ba837d8596 100644 --- a/typedapi/types/userquerycontainer.go +++ b/typedapi/types/userquerycontainer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,8 +30,9 @@ import ( // UserQueryContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/query_user/types.ts#L37-L101 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/query_user/types.ts#L37-L101 type UserQueryContainer struct { + AdditionalUserQueryContainerProperty map[string]json.RawMessage `json:"-"` // Bool matches users matching boolean combinations of other queries. Bool *BoolQuery `json:"bool,omitempty"` // Exists Returns users that contain an indexed value for a field. @@ -159,20 +160,73 @@ func (s *UserQueryContainer) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Wildcard", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalUserQueryContainerProperty == nil { + s.AdditionalUserQueryContainerProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalUserQueryContainerProperty", err) + } + s.AdditionalUserQueryContainerProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s UserQueryContainer) MarshalJSON() ([]byte, error) { + type opt UserQueryContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalUserQueryContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalUserQueryContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewUserQueryContainer returns a UserQueryContainer. func NewUserQueryContainer() *UserQueryContainer { r := &UserQueryContainer{ - Match: make(map[string]MatchQuery, 0), - Prefix: make(map[string]PrefixQuery, 0), - Range: make(map[string]RangeQuery, 0), - Term: make(map[string]TermQuery, 0), - Wildcard: make(map[string]WildcardQuery, 0), + AdditionalUserQueryContainerProperty: make(map[string]json.RawMessage), + Match: make(map[string]MatchQuery), + Prefix: make(map[string]PrefixQuery), + Range: make(map[string]RangeQuery), + Term: make(map[string]TermQuery), + Wildcard: make(map[string]WildcardQuery), } return r } + +// true + +type UserQueryContainerVariant interface { + UserQueryContainerCaster() *UserQueryContainer +} + +func (s *UserQueryContainer) UserQueryContainerCaster() *UserQueryContainer { + return s +} diff --git a/typedapi/types/userrealm.go b/typedapi/types/userrealm.go index 5e155af95b..12079a7dd7 100644 --- a/typedapi/types/userrealm.go +++ b/typedapi/types/userrealm.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // UserRealm type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/security/get_token/types.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/security/get_token/types.ts#L50-L53 type UserRealm struct { Name string `json:"name"` Type string `json:"type"` @@ -80,3 +80,5 @@ func NewUserRealm() *UserRealm { return r } + +// false diff --git a/typedapi/types/validation.go b/typedapi/types/validation.go index 487f431ec9..4b8c1b524c 100644 --- a/typedapi/types/validation.go +++ b/typedapi/types/validation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -28,5 +28,9 @@ package types // IncludedInValidation // RegexValidation // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/connector/_types/Connector.ts#L50-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/connector/_types/Connector.ts#L50-L56 type Validation any + +type ValidationVariant interface { + ValidationCaster() *Validation +} diff --git a/typedapi/types/validationloss.go b/typedapi/types/validationloss.go index 10ad4dd806..f20a668945 100644 --- a/typedapi/types/validationloss.go +++ b/typedapi/types/validationloss.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ValidationLoss type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/DataframeAnalytics.ts#L569-L574 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/DataframeAnalytics.ts#L571-L576 type ValidationLoss struct { // FoldValues Validation loss values for every added decision tree during the forest // growing procedure. @@ -83,3 +83,5 @@ func NewValidationLoss() *ValidationLoss { return r } + +// false diff --git a/typedapi/types/valuecountaggregate.go b/typedapi/types/valuecountaggregate.go index 305c4fd60e..531f4b0e8c 100644 --- a/typedapi/types/valuecountaggregate.go +++ b/typedapi/types/valuecountaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ValueCountAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L231-L236 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L231-L236 type ValueCountAggregate struct { Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to @@ -89,3 +89,5 @@ func NewValueCountAggregate() *ValueCountAggregate { return r } + +// false diff --git a/typedapi/types/valuecountaggregation.go b/typedapi/types/valuecountaggregation.go index 6fe51b6157..d9c741e9d7 100644 --- a/typedapi/types/valuecountaggregation.go +++ b/typedapi/types/valuecountaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ValueCountAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L434-L434 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L434-L434 type ValueCountAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -95,3 +95,13 @@ func NewValueCountAggregation() *ValueCountAggregation { return r } + +// true + +type ValueCountAggregationVariant interface { + ValueCountAggregationCaster() *ValueCountAggregation +} + +func (s *ValueCountAggregation) ValueCountAggregationCaster() *ValueCountAggregation { + return s +} diff --git a/typedapi/types/variablewidthhistogramaggregate.go b/typedapi/types/variablewidthhistogramaggregate.go index 3608f4f0fc..e77e701a78 100644 --- a/typedapi/types/variablewidthhistogramaggregate.go +++ b/typedapi/types/variablewidthhistogramaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // VariableWidthHistogramAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L402-L404 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L402-L404 type VariableWidthHistogramAggregate struct { Buckets BucketsVariableWidthHistogramBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewVariableWidthHistogramAggregate() *VariableWidthHistogramAggregate { return r } + +// false diff --git a/typedapi/types/variablewidthhistogramaggregation.go b/typedapi/types/variablewidthhistogramaggregation.go index e8ed2ee9d4..ebfcb20bc4 100644 --- a/typedapi/types/variablewidthhistogramaggregation.go +++ b/typedapi/types/variablewidthhistogramaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // VariableWidthHistogramAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/bucket.ts#L1091-L1115 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/bucket.ts#L1091-L1115 type VariableWidthHistogramAggregation struct { // Buckets The target number of buckets. Buckets *int `json:"buckets,omitempty"` @@ -132,3 +132,13 @@ func NewVariableWidthHistogramAggregation() *VariableWidthHistogramAggregation { return r } + +// true + +type VariableWidthHistogramAggregationVariant interface { + VariableWidthHistogramAggregationCaster() *VariableWidthHistogramAggregation +} + +func (s *VariableWidthHistogramAggregation) VariableWidthHistogramAggregationCaster() *VariableWidthHistogramAggregation { + return s +} diff --git a/typedapi/types/variablewidthhistogrambucket.go b/typedapi/types/variablewidthhistogrambucket.go index 136ba8876c..1947dbba0f 100644 --- a/typedapi/types/variablewidthhistogrambucket.go +++ b/typedapi/types/variablewidthhistogrambucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,7 +32,7 @@ import ( // VariableWidthHistogramBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L406-L413 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L406-L413 type VariableWidthHistogramBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -715,8 +715,10 @@ func (s VariableWidthHistogramBucket) MarshalJSON() ([]byte, error) { // NewVariableWidthHistogramBucket returns a VariableWidthHistogramBucket. func NewVariableWidthHistogramBucket() *VariableWidthHistogramBucket { r := &VariableWidthHistogramBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/vector.go b/typedapi/types/vector.go index 53f031cb05..1e4e98a025 100644 --- a/typedapi/types/vector.go +++ b/typedapi/types/vector.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Vector type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L452-L456 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L460-L464 type Vector struct { Available bool `json:"available"` DenseVectorDimsAvgCount int `json:"dense_vector_dims_avg_count"` @@ -142,3 +142,5 @@ func NewVector() *Vector { return r } + +// false diff --git a/typedapi/types/verifyindex.go b/typedapi/types/verifyindex.go index 91dea4038d..623dbc691e 100644 --- a/typedapi/types/verifyindex.go +++ b/typedapi/types/verifyindex.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // VerifyIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/indices/recovery/types.ts#L111-L116 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/indices/recovery/types.ts#L111-L116 type VerifyIndex struct { CheckIndexTime Duration `json:"check_index_time,omitempty"` CheckIndexTimeInMillis int64 `json:"check_index_time_in_millis"` @@ -84,3 +84,5 @@ func NewVerifyIndex() *VerifyIndex { return r } + +// false diff --git a/typedapi/types/versionproperty.go b/typedapi/types/versionproperty.go index c4638fc4c2..b9c51fbfb4 100644 --- a/typedapi/types/versionproperty.go +++ b/typedapi/types/versionproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // VersionProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L289-L291 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L305-L307 type VersionProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -41,10 +42,11 @@ type VersionProperty struct { Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *VersionProperty) UnmarshalJSON(data []byte) error { @@ -116,301 +118,313 @@ func (s *VersionProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -459,301 +473,313 @@ func (s *VersionProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -773,6 +799,11 @@ func (s *VersionProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -787,15 +818,16 @@ func (s *VersionProperty) UnmarshalJSON(data []byte) error { func (s VersionProperty) MarshalJSON() ([]byte, error) { type innerVersionProperty VersionProperty tmp := innerVersionProperty{ - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Properties: s.Properties, - Store: s.Store, - Type: s.Type, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "version" @@ -806,10 +838,20 @@ func (s VersionProperty) MarshalJSON() ([]byte, error) { // NewVersionProperty returns a VersionProperty. func NewVersionProperty() *VersionProperty { r := &VersionProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type VersionPropertyVariant interface { + VersionPropertyCaster() *VersionProperty +} + +func (s *VersionProperty) VersionPropertyCaster() *VersionProperty { + return s +} diff --git a/typedapi/types/vertex.go b/typedapi/types/vertex.go index ff4a3765a1..92c8609a02 100644 --- a/typedapi/types/vertex.go +++ b/typedapi/types/vertex.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Vertex type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/graph/_types/Vertex.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/graph/_types/Vertex.ts#L23-L28 type Vertex struct { Depth int64 `json:"depth"` Field string `json:"field"` @@ -113,3 +113,5 @@ func NewVertex() *Vertex { return r } + +// false diff --git a/typedapi/types/vertexdefinition.go b/typedapi/types/vertexdefinition.go index 266c0c8371..bc5922e7f3 100644 --- a/typedapi/types/vertexdefinition.go +++ b/typedapi/types/vertexdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // VertexDefinition type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/graph/_types/Vertex.ts#L30-L59 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/graph/_types/Vertex.ts#L30-L59 type VertexDefinition struct { // Exclude Prevents the specified terms from being included in the results. Exclude []string `json:"exclude,omitempty"` @@ -138,3 +138,13 @@ func NewVertexDefinition() *VertexDefinition { return r } + +// true + +type VertexDefinitionVariant interface { + VertexDefinitionCaster() *VertexDefinition +} + +func (s *VertexDefinition) VertexDefinitionCaster() *VertexDefinition { + return s +} diff --git a/typedapi/types/vertexinclude.go b/typedapi/types/vertexinclude.go index 1990546d31..f6862a7eb4 100644 --- a/typedapi/types/vertexinclude.go +++ b/typedapi/types/vertexinclude.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,14 +31,26 @@ import ( // VertexInclude type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/graph/_types/Vertex.ts#L61-L64 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/graph/_types/Vertex.ts#L61-L65 type VertexInclude struct { - Boost Float64 `json:"boost"` - Term string `json:"term"` + Boost *Float64 `json:"boost,omitempty"` + Term string `json:"term"` } func (s *VertexInclude) UnmarshalJSON(data []byte) error { + if !bytes.HasPrefix(data, []byte(`{`)) { + if !bytes.HasPrefix(data, []byte(`"`)) { + data = append([]byte{'"'}, data...) + data = append(data, []byte{'"'}...) + } + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Term) + if err != nil { + return err + } + return nil + } + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -62,10 +74,10 @@ func (s *VertexInclude) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Boost", err) } f := Float64(value) - s.Boost = f + s.Boost = &f case float64: f := Float64(v) - s.Boost = f + s.Boost = &f } case "term": @@ -91,3 +103,13 @@ func NewVertexInclude() *VertexInclude { return r } + +// true + +type VertexIncludeVariant interface { + VertexIncludeCaster() *VertexInclude +} + +func (s *VertexInclude) VertexIncludeCaster() *VertexInclude { + return s +} diff --git a/typedapi/types/vocabulary.go b/typedapi/types/vocabulary.go index 5216bb849c..340c36b9c4 100644 --- a/typedapi/types/vocabulary.go +++ b/typedapi/types/vocabulary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // Vocabulary type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L233-L235 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L217-L219 type Vocabulary struct { Index string `json:"index"` } @@ -66,3 +66,13 @@ func NewVocabulary() *Vocabulary { return r } + +// true + +type VocabularyVariant interface { + VocabularyCaster() *Vocabulary +} + +func (s *Vocabulary) VocabularyCaster() *Vocabulary { + return s +} diff --git a/typedapi/types/void.go b/typedapi/types/void.go new file mode 100755 index 0000000000..8fcf1dd08f --- /dev/null +++ b/typedapi/types/void.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +type Void any diff --git a/typedapi/types/waitforactiveshards.go b/typedapi/types/waitforactiveshards.go index 92b8972143..8dfc751de0 100644 --- a/typedapi/types/waitforactiveshards.go +++ b/typedapi/types/waitforactiveshards.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -25,5 +25,5 @@ package types // int // waitforactiveshardoptions.WaitForActiveShardOptions // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/common.ts#L143-L144 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/common.ts#L139-L140 type WaitForActiveShards any diff --git a/typedapi/types/waitforsnapshotaction.go b/typedapi/types/waitforsnapshotaction.go index de9e006f68..264cdd2dfd 100644 --- a/typedapi/types/waitforsnapshotaction.go +++ b/typedapi/types/waitforsnapshotaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // WaitForSnapshotAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ilm/_types/Phase.ts#L148-L150 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ilm/_types/Phase.ts#L145-L147 type WaitForSnapshotAction struct { Policy string `json:"policy"` } @@ -74,3 +74,13 @@ func NewWaitForSnapshotAction() *WaitForSnapshotAction { return r } + +// true + +type WaitForSnapshotActionVariant interface { + WaitForSnapshotActionCaster() *WaitForSnapshotAction +} + +func (s *WaitForSnapshotAction) WaitForSnapshotActionCaster() *WaitForSnapshotAction { + return s +} diff --git a/typedapi/types/warmerstats.go b/typedapi/types/warmerstats.go index 785401d33e..6c542a0620 100644 --- a/typedapi/types/warmerstats.go +++ b/typedapi/types/warmerstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // WarmerStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Stats.ts#L407-L412 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Stats.ts#L410-L415 type WarmerStats struct { Current int64 `json:"current"` Total int64 `json:"total"` @@ -105,3 +105,5 @@ func NewWarmerStats() *WarmerStats { return r } + +// false diff --git a/typedapi/types/watch.go b/typedapi/types/watch.go index 4593c57ddd..d567c37753 100644 --- a/typedapi/types/watch.go +++ b/typedapi/types/watch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // Watch type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Watch.ts#L37-L47 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Watch.ts#L37-L47 type Watch struct { Actions map[string]WatcherAction `json:"actions"` Condition WatcherCondition `json:"condition"` @@ -114,8 +114,18 @@ func (s *Watch) UnmarshalJSON(data []byte) error { // NewWatch returns a Watch. func NewWatch() *Watch { r := &Watch{ - Actions: make(map[string]WatcherAction, 0), + Actions: make(map[string]WatcherAction), } return r } + +// true + +type WatchVariant interface { + WatchCaster() *Watch +} + +func (s *Watch) WatchCaster() *Watch { + return s +} diff --git a/typedapi/types/watcher.go b/typedapi/types/watcher.go index 7040c80319..bec5e9f24c 100644 --- a/typedapi/types/watcher.go +++ b/typedapi/types/watcher.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Watcher type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L458-L462 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L466-L470 type Watcher struct { Available bool `json:"available"` Count Counter `json:"count"` @@ -109,3 +109,5 @@ func NewWatcher() *Watcher { return r } + +// false diff --git a/typedapi/types/watcheraction.go b/typedapi/types/watcheraction.go index f8426c631d..2b933861c1 100644 --- a/typedapi/types/watcheraction.go +++ b/typedapi/types/watcheraction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // WatcherAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Action.ts#L35-L54 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Action.ts#L35-L54 type WatcherAction struct { ActionType *actiontype.ActionType `json:"action_type,omitempty"` Condition *WatcherCondition `json:"condition,omitempty"` @@ -165,3 +165,13 @@ func NewWatcherAction() *WatcherAction { return r } + +// true + +type WatcherActionVariant interface { + WatcherActionCaster() *WatcherAction +} + +func (s *WatcherAction) WatcherActionCaster() *WatcherAction { + return s +} diff --git a/typedapi/types/watcheractions.go b/typedapi/types/watcheractions.go index e0aa02faad..9da6e12b22 100644 --- a/typedapi/types/watcheractions.go +++ b/typedapi/types/watcheractions.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // WatcherActions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L394-L396 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L402-L404 type WatcherActions struct { Actions map[string]WatcherActionTotals `json:"actions"` } @@ -30,8 +30,10 @@ type WatcherActions struct { // NewWatcherActions returns a WatcherActions. func NewWatcherActions() *WatcherActions { r := &WatcherActions{ - Actions: make(map[string]WatcherActionTotals, 0), + Actions: make(map[string]WatcherActionTotals), } return r } + +// false diff --git a/typedapi/types/watcheractiontotals.go b/typedapi/types/watcheractiontotals.go index ecfe35b029..27fda0c794 100644 --- a/typedapi/types/watcheractiontotals.go +++ b/typedapi/types/watcheractiontotals.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // WatcherActionTotals type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L410-L413 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L418-L421 type WatcherActionTotals struct { Total Duration `json:"total"` TotalTimeInMs int64 `json:"total_time_in_ms"` @@ -72,3 +72,5 @@ func NewWatcherActionTotals() *WatcherActionTotals { return r } + +// false diff --git a/typedapi/types/watchercondition.go b/typedapi/types/watchercondition.go index 42a0334392..196fba4e95 100644 --- a/typedapi/types/watchercondition.go +++ b/typedapi/types/watchercondition.go @@ -16,31 +16,75 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types import ( + "encoding/json" + "fmt" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditionop" ) // WatcherCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Conditions.ts#L50-L62 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Conditions.ts#L50-L62 type WatcherCondition struct { - Always *AlwaysCondition `json:"always,omitempty"` - ArrayCompare map[string]ArrayCompareCondition `json:"array_compare,omitempty"` - Compare map[string]map[conditionop.ConditionOp]FieldValue `json:"compare,omitempty"` - Never *NeverCondition `json:"never,omitempty"` - Script *ScriptCondition `json:"script,omitempty"` + AdditionalWatcherConditionProperty map[string]json.RawMessage `json:"-"` + Always *AlwaysCondition `json:"always,omitempty"` + ArrayCompare map[string]ArrayCompareCondition `json:"array_compare,omitempty"` + Compare map[string]map[conditionop.ConditionOp]FieldValue `json:"compare,omitempty"` + Never *NeverCondition `json:"never,omitempty"` + Script *ScriptCondition `json:"script,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s WatcherCondition) MarshalJSON() ([]byte, error) { + type opt WatcherCondition + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalWatcherConditionProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalWatcherConditionProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewWatcherCondition returns a WatcherCondition. func NewWatcherCondition() *WatcherCondition { r := &WatcherCondition{ - ArrayCompare: make(map[string]ArrayCompareCondition, 0), - Compare: make(map[string]map[conditionop.ConditionOp]FieldValue, 0), + AdditionalWatcherConditionProperty: make(map[string]json.RawMessage), + ArrayCompare: make(map[string]ArrayCompareCondition), + Compare: make(map[string]map[conditionop.ConditionOp]FieldValue), } return r } + +// true + +type WatcherConditionVariant interface { + WatcherConditionCaster() *WatcherCondition +} + +func (s *WatcherCondition) WatcherConditionCaster() *WatcherCondition { + return s +} diff --git a/typedapi/types/watcherinput.go b/typedapi/types/watcherinput.go index b799bde1d8..f8dd893a7c 100644 --- a/typedapi/types/watcherinput.go +++ b/typedapi/types/watcherinput.go @@ -16,29 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types import ( "encoding/json" + "fmt" ) // WatcherInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Input.ts#L90-L98 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Input.ts#L90-L98 type WatcherInput struct { - Chain *ChainInput `json:"chain,omitempty"` - Http *HttpInput `json:"http,omitempty"` - Search *SearchInput `json:"search,omitempty"` - Simple map[string]json.RawMessage `json:"simple,omitempty"` + AdditionalWatcherInputProperty map[string]json.RawMessage `json:"-"` + Chain *ChainInput `json:"chain,omitempty"` + Http *HttpInput `json:"http,omitempty"` + Search *SearchInput `json:"search,omitempty"` + Simple map[string]json.RawMessage `json:"simple,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s WatcherInput) MarshalJSON() ([]byte, error) { + type opt WatcherInput + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalWatcherInputProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalWatcherInputProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewWatcherInput returns a WatcherInput. func NewWatcherInput() *WatcherInput { r := &WatcherInput{ - Simple: make(map[string]json.RawMessage, 0), + AdditionalWatcherInputProperty: make(map[string]json.RawMessage), + Simple: make(map[string]json.RawMessage), } return r } + +// true + +type WatcherInputVariant interface { + WatcherInputCaster() *WatcherInput +} + +func (s *WatcherInput) WatcherInputCaster() *WatcherInput { + return s +} diff --git a/typedapi/types/watchernodestats.go b/typedapi/types/watchernodestats.go index 298bb7e5d2..2540ff0861 100644 --- a/typedapi/types/watchernodestats.go +++ b/typedapi/types/watchernodestats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,14 +33,35 @@ import ( // WatcherNodeStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/stats/types.ts#L33-L40 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/stats/types.ts#L33-L61 type WatcherNodeStats struct { - CurrentWatches []WatchRecordStats `json:"current_watches,omitempty"` - ExecutionThreadPool ExecutionThreadPool `json:"execution_thread_pool"` - NodeId string `json:"node_id"` - QueuedWatches []WatchRecordQueuedStats `json:"queued_watches,omitempty"` - WatchCount int64 `json:"watch_count"` - WatcherState watcherstate.WatcherState `json:"watcher_state"` + // CurrentWatches The current executing watches metric gives insight into the watches that are + // currently being executed by Watcher. + // Additional information is shared per watch that is currently executing. + // This information includes the `watch_id`, the time its execution started and + // its current execution phase. + // To include this metric, the `metric` option should be set to + // `current_watches` or `_all`. + // In addition you can also specify the `emit_stacktraces=true` parameter, which + // adds stack traces for each watch that is being run. + // These stack traces can give you more insight into an execution of a watch. + CurrentWatches []WatchRecordStats `json:"current_watches,omitempty"` + ExecutionThreadPool ExecutionThreadPool `json:"execution_thread_pool"` + NodeId string `json:"node_id"` + // QueuedWatches Watcher moderates the execution of watches such that their execution won't + // put too much pressure on the node and its resources. + // If too many watches trigger concurrently and there isn't enough capacity to + // run them all, some of the watches are queued, waiting for the current running + // watches to finish.s + // The queued watches metric gives insight on these queued watches. + // + // To include this metric, the `metric` option should include `queued_watches` + // or `_all`. + QueuedWatches []WatchRecordQueuedStats `json:"queued_watches,omitempty"` + // WatchCount The number of watches currently registered. + WatchCount int64 `json:"watch_count"` + // WatcherState The current state of Watcher. + WatcherState watcherstate.WatcherState `json:"watcher_state"` } func (s *WatcherNodeStats) UnmarshalJSON(data []byte) error { @@ -109,3 +130,5 @@ func NewWatcherNodeStats() *WatcherNodeStats { return r } + +// false diff --git a/typedapi/types/watcherstatusactions.go b/typedapi/types/watcherstatusactions.go index ba97d016e2..d2a3116e83 100644 --- a/typedapi/types/watcherstatusactions.go +++ b/typedapi/types/watcherstatusactions.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // WatcherStatusActions type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Action.ts#L56-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Action.ts#L56-L56 type WatcherStatusActions map[string]ActionStatus + +type WatcherStatusActionsVariant interface { + WatcherStatusActionsCaster() *WatcherStatusActions +} diff --git a/typedapi/types/watcherwatch.go b/typedapi/types/watcherwatch.go index 36966dad68..8eeb707c60 100644 --- a/typedapi/types/watcherwatch.go +++ b/typedapi/types/watcherwatch.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // WatcherWatch type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L398-L403 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L406-L411 type WatcherWatch struct { Action map[string]Counter `json:"action,omitempty"` Condition map[string]Counter `json:"condition,omitempty"` @@ -33,10 +33,12 @@ type WatcherWatch struct { // NewWatcherWatch returns a WatcherWatch. func NewWatcherWatch() *WatcherWatch { r := &WatcherWatch{ - Action: make(map[string]Counter, 0), - Condition: make(map[string]Counter, 0), - Input: make(map[string]Counter, 0), + Action: make(map[string]Counter), + Condition: make(map[string]Counter), + Input: make(map[string]Counter), } return r } + +// false diff --git a/typedapi/types/watcherwatchtrigger.go b/typedapi/types/watcherwatchtrigger.go index 43e1846884..71f4cce5f9 100644 --- a/typedapi/types/watcherwatchtrigger.go +++ b/typedapi/types/watcherwatchtrigger.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // WatcherWatchTrigger type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L405-L408 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L413-L416 type WatcherWatchTrigger struct { All_ Counter `json:"_all"` Schedule *WatcherWatchTriggerSchedule `json:"schedule,omitempty"` @@ -34,3 +34,5 @@ func NewWatcherWatchTrigger() *WatcherWatchTrigger { return r } + +// false diff --git a/typedapi/types/watcherwatchtriggerschedule.go b/typedapi/types/watcherwatchtriggerschedule.go index f06da3f7f9..da79780428 100644 --- a/typedapi/types/watcherwatchtriggerschedule.go +++ b/typedapi/types/watcherwatchtriggerschedule.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // WatcherWatchTriggerSchedule type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L464-L467 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L472-L475 type WatcherWatchTriggerSchedule struct { Active int64 `json:"active"` All_ Counter `json:"_all"` @@ -105,3 +105,5 @@ func NewWatcherWatchTriggerSchedule() *WatcherWatchTriggerSchedule { return r } + +// false diff --git a/typedapi/types/watchrecord.go b/typedapi/types/watchrecord.go index 24097d6bd6..7b858e0cec 100644 --- a/typedapi/types/watchrecord.go +++ b/typedapi/types/watchrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // WatchRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/execute_watch/types.ts#L27-L39 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/execute_watch/types.ts#L27-L39 type WatchRecord struct { Condition WatcherCondition `json:"condition"` Input WatcherInput `json:"input"` @@ -136,3 +136,5 @@ func NewWatchRecord() *WatchRecord { return r } + +// false diff --git a/typedapi/types/watchrecordqueuedstats.go b/typedapi/types/watchrecordqueuedstats.go index aa6c6a75d9..2f73ab875a 100644 --- a/typedapi/types/watchrecordqueuedstats.go +++ b/typedapi/types/watchrecordqueuedstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,8 +30,10 @@ import ( // WatchRecordQueuedStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/stats/types.ts#L50-L52 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/stats/types.ts#L71-L77 type WatchRecordQueuedStats struct { + // ExecutionTime The time the watch was run. + // This is just before the input is being run. ExecutionTime DateTime `json:"execution_time"` } @@ -66,3 +68,5 @@ func NewWatchRecordQueuedStats() *WatchRecordQueuedStats { return r } + +// false diff --git a/typedapi/types/watchrecordstats.go b/typedapi/types/watchrecordstats.go index 5b3ebdbc0e..649e47c95b 100644 --- a/typedapi/types/watchrecordstats.go +++ b/typedapi/types/watchrecordstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -32,14 +32,19 @@ import ( // WatchRecordStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/stats/types.ts#L54-L60 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/stats/types.ts#L79-L94 type WatchRecordStats struct { - ExecutedActions []string `json:"executed_actions,omitempty"` - ExecutionPhase executionphase.ExecutionPhase `json:"execution_phase"` - ExecutionTime DateTime `json:"execution_time"` - TriggeredTime DateTime `json:"triggered_time"` - WatchId string `json:"watch_id"` - WatchRecordId string `json:"watch_record_id"` + ExecutedActions []string `json:"executed_actions,omitempty"` + // ExecutionPhase The current watch execution phase. + ExecutionPhase executionphase.ExecutionPhase `json:"execution_phase"` + // ExecutionTime The time the watch was run. + // This is just before the input is being run. + ExecutionTime DateTime `json:"execution_time"` + // TriggeredTime The time the watch was triggered by the trigger engine. + TriggeredTime DateTime `json:"triggered_time"` + WatchId string `json:"watch_id"` + // WatchRecordId The watch record identifier. + WatchRecordId string `json:"watch_record_id"` } func (s *WatchRecordStats) UnmarshalJSON(data []byte) error { @@ -98,3 +103,5 @@ func NewWatchRecordStats() *WatchRecordStats { return r } + +// false diff --git a/typedapi/types/watchstatus.go b/typedapi/types/watchstatus.go index d85042a8d8..f9ef8bc3e4 100644 --- a/typedapi/types/watchstatus.go +++ b/typedapi/types/watchstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // WatchStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Watch.ts#L49-L56 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Watch.ts#L49-L56 type WatchStatus struct { Actions WatcherStatusActions `json:"actions"` ExecutionState *string `json:"execution_state,omitempty"` @@ -104,3 +104,13 @@ func NewWatchStatus() *WatchStatus { return r } + +// true + +type WatchStatusVariant interface { + WatchStatusCaster() *WatchStatus +} + +func (s *WatchStatus) WatchStatusCaster() *WatchStatus { + return s +} diff --git a/typedapi/types/watsonxservicesettings.go b/typedapi/types/watsonxservicesettings.go new file mode 100644 index 0000000000..eddb9c8502 --- /dev/null +++ b/typedapi/types/watsonxservicesettings.go @@ -0,0 +1,165 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WatsonxServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/inference/put_watsonx/PutWatsonxRequest.ts#L80-L117 +type WatsonxServiceSettings struct { + // ApiKey A valid API key of your Watsonx account. + // You can find your Watsonx API keys or you can create a new one on the API + // keys page. + // + // IMPORTANT: You need to provide the API key only once, during the inference + // model creation. + // The get inference endpoint API does not retrieve your API key. + // After creating the inference model, you cannot change the associated API key. + // If you want to use a different API key, delete the inference model and + // recreate it with the same name and the updated API key. + ApiKey string `json:"api_key"` + // ApiVersion A version parameter that takes a version date in the format of `YYYY-MM-DD`. + // For the active version data parameters, refer to the Wastonx documentation. + ApiVersion string `json:"api_version"` + // ModelId The name of the model to use for the inference task. + // Refer to the IBM Embedding Models section in the Watsonx documentation for + // the list of available text embedding models. + ModelId string `json:"model_id"` + // ProjectId The identifier of the IBM Cloud project to use for the inference task. + ProjectId string `json:"project_id"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // Watsonx. + // By default, the `watsonxai` service sets the number of requests allowed per + // minute to 120. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` + // Url The URL of the inference endpoint that you created on Watsonx. + Url string `json:"url"` +} + +func (s *WatsonxServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKey = o + + case "api_version": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiVersion", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiVersion = o + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "project_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ProjectId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProjectId = o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Url", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = o + + } + } + return nil +} + +// NewWatsonxServiceSettings returns a WatsonxServiceSettings. +func NewWatsonxServiceSettings() *WatsonxServiceSettings { + r := &WatsonxServiceSettings{} + + return r +} + +// true + +type WatsonxServiceSettingsVariant interface { + WatsonxServiceSettingsCaster() *WatsonxServiceSettings +} + +func (s *WatsonxServiceSettings) WatsonxServiceSettingsCaster() *WatsonxServiceSettings { + return s +} diff --git a/typedapi/types/aggregation.go b/typedapi/types/web.go similarity index 68% rename from typedapi/types/aggregation.go rename to typedapi/types/web.go index 49c72e45ba..ae546b90c1 100644 --- a/typedapi/types/aggregation.go +++ b/typedapi/types/web.go @@ -16,19 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types -// Aggregation type. +// Web type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregation.ts#L20-L20 -type Aggregation struct { +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ingest/_types/Database.ts#L61-L61 +type Web struct { } -// NewAggregation returns a Aggregation. -func NewAggregation() *Aggregation { - r := &Aggregation{} +// NewWeb returns a Web. +func NewWeb() *Web { + r := &Web{} return r } + +// false diff --git a/typedapi/types/webhookaction.go b/typedapi/types/webhookaction.go index 04e20b8ef3..90b50b8638 100644 --- a/typedapi/types/webhookaction.go +++ b/typedapi/types/webhookaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -34,7 +34,7 @@ import ( // WebhookAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L293-L293 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L293-L293 type WebhookAction struct { Auth *HttpInputAuthentication `json:"auth,omitempty"` Body *string `json:"body,omitempty"` @@ -166,9 +166,19 @@ func (s *WebhookAction) UnmarshalJSON(data []byte) error { // NewWebhookAction returns a WebhookAction. func NewWebhookAction() *WebhookAction { r := &WebhookAction{ - Headers: make(map[string]string, 0), - Params: make(map[string]string, 0), + Headers: make(map[string]string), + Params: make(map[string]string), } return r } + +// true + +type WebhookActionVariant interface { + WebhookActionCaster() *WebhookAction +} + +func (s *WebhookAction) WebhookActionCaster() *WebhookAction { + return s +} diff --git a/typedapi/types/webhookresult.go b/typedapi/types/webhookresult.go index b591b63ae1..20a7814cbc 100644 --- a/typedapi/types/webhookresult.go +++ b/typedapi/types/webhookresult.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // WebhookResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/_types/Actions.ts#L295-L298 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/_types/Actions.ts#L295-L298 type WebhookResult struct { Request HttpInputRequestResult `json:"request"` Response *HttpInputResponseResult `json:"response,omitempty"` @@ -34,3 +34,5 @@ func NewWebhookResult() *WebhookResult { return r } + +// false diff --git a/typedapi/types/weightedaverageaggregation.go b/typedapi/types/weightedaverageaggregation.go index 863fc72f0b..a0f49592ec 100644 --- a/typedapi/types/weightedaverageaggregation.go +++ b/typedapi/types/weightedaverageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -33,7 +33,7 @@ import ( // WeightedAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L449-L463 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L449-L463 type WeightedAverageAggregation struct { // Format A numeric response formatter. Format *string `json:"format,omitempty"` @@ -97,3 +97,13 @@ func NewWeightedAverageAggregation() *WeightedAverageAggregation { return r } + +// true + +type WeightedAverageAggregationVariant interface { + WeightedAverageAggregationCaster() *WeightedAverageAggregation +} + +func (s *WeightedAverageAggregation) WeightedAverageAggregationCaster() *WeightedAverageAggregation { + return s +} diff --git a/typedapi/types/weightedaveragevalue.go b/typedapi/types/weightedaveragevalue.go index 5f1fe73dfb..6d18eaab30 100644 --- a/typedapi/types/weightedaveragevalue.go +++ b/typedapi/types/weightedaveragevalue.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // WeightedAverageValue type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/metric.ts#L465-L475 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/metric.ts#L465-L475 type WeightedAverageValue struct { // Field The field from which to extract the values or weights. Field *string `json:"field,omitempty"` @@ -92,3 +92,13 @@ func NewWeightedAverageValue() *WeightedAverageValue { return r } + +// true + +type WeightedAverageValueVariant interface { + WeightedAverageValueCaster() *WeightedAverageValue +} + +func (s *WeightedAverageValue) WeightedAverageValueCaster() *WeightedAverageValue { + return s +} diff --git a/typedapi/types/weightedavgaggregate.go b/typedapi/types/weightedavgaggregate.go index 694b830b26..f01ce721cc 100644 --- a/typedapi/types/weightedavgaggregate.go +++ b/typedapi/types/weightedavgaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // WeightedAvgAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/aggregations/Aggregate.ts#L224-L229 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/aggregations/Aggregate.ts#L224-L229 type WeightedAvgAggregate struct { Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to @@ -89,3 +89,5 @@ func NewWeightedAvgAggregate() *WeightedAvgAggregate { return r } + +// false diff --git a/typedapi/types/weightedtokensquery.go b/typedapi/types/weightedtokensquery.go index 5b60d356e1..1508172fb3 100644 --- a/typedapi/types/weightedtokensquery.go +++ b/typedapi/types/weightedtokensquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // WeightedTokensQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/WeightedTokensQuery.ts#L25-L33 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/WeightedTokensQuery.ts#L25-L33 type WeightedTokensQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -110,8 +110,18 @@ func (s *WeightedTokensQuery) UnmarshalJSON(data []byte) error { // NewWeightedTokensQuery returns a WeightedTokensQuery. func NewWeightedTokensQuery() *WeightedTokensQuery { r := &WeightedTokensQuery{ - Tokens: make(map[string]float32, 0), + Tokens: make(map[string]float32), } return r } + +// true + +type WeightedTokensQueryVariant interface { + WeightedTokensQueryCaster() *WeightedTokensQuery +} + +func (s *WeightedTokensQuery) WeightedTokensQueryCaster() *WeightedTokensQuery { + return s +} diff --git a/typedapi/types/weights.go b/typedapi/types/weights.go index 0c5bd312e8..6f4fe66905 100644 --- a/typedapi/types/weights.go +++ b/typedapi/types/weights.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // Weights type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/put_trained_model/types.ts#L108-L110 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/put_trained_model/types.ts#L108-L110 type Weights struct { Weights Float64 `json:"weights"` } @@ -78,3 +78,13 @@ func NewWeights() *Weights { return r } + +// true + +type WeightsVariant interface { + WeightsCaster() *Weights +} + +func (s *Weights) WeightsCaster() *Weights { + return s +} diff --git a/typedapi/types/whitespaceanalyzer.go b/typedapi/types/whitespaceanalyzer.go index 3b490ee14c..231b8ffaff 100644 --- a/typedapi/types/whitespaceanalyzer.go +++ b/typedapi/types/whitespaceanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -30,7 +30,7 @@ import ( // WhitespaceAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/analyzers.ts#L354-L357 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/analyzers.ts#L421-L425 type WhitespaceAnalyzer struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewWhitespaceAnalyzer() *WhitespaceAnalyzer { return r } + +// true + +type WhitespaceAnalyzerVariant interface { + WhitespaceAnalyzerCaster() *WhitespaceAnalyzer +} + +func (s *WhitespaceAnalyzer) WhitespaceAnalyzerCaster() *WhitespaceAnalyzer { + return s +} diff --git a/typedapi/types/whitespacetokenizer.go b/typedapi/types/whitespacetokenizer.go index 725dead5e4..74cd85937e 100644 --- a/typedapi/types/whitespacetokenizer.go +++ b/typedapi/types/whitespacetokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // WhitespaceTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/tokenizers.ts#L135-L138 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/tokenizers.ts#L135-L138 type WhitespaceTokenizer struct { MaxTokenLength *int `json:"max_token_length,omitempty"` Type string `json:"type,omitempty"` @@ -104,3 +104,13 @@ func NewWhitespaceTokenizer() *WhitespaceTokenizer { return r } + +// true + +type WhitespaceTokenizerVariant interface { + WhitespaceTokenizerCaster() *WhitespaceTokenizer +} + +func (s *WhitespaceTokenizer) WhitespaceTokenizerCaster() *WhitespaceTokenizer { + return s +} diff --git a/typedapi/types/wildcardproperty.go b/typedapi/types/wildcardproperty.go index 287d01525d..d77d1e6202 100644 --- a/typedapi/types/wildcardproperty.go +++ b/typedapi/types/wildcardproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // WildcardProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/mapping/core.ts#L293-L300 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/mapping/core.ts#L309-L316 type WildcardProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -41,11 +42,12 @@ type WildcardProperty struct { Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *string `json:"null_value,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *string `json:"null_value,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *WildcardProperty) UnmarshalJSON(data []byte) error { @@ -117,301 +119,313 @@ func (s *WildcardProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -472,301 +486,313 @@ func (s *WildcardProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -786,6 +812,11 @@ func (s *WildcardProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -800,16 +831,17 @@ func (s *WildcardProperty) UnmarshalJSON(data []byte) error { func (s WildcardProperty) MarshalJSON() ([]byte, error) { type innerWildcardProperty WildcardProperty tmp := innerWildcardProperty{ - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - NullValue: s.NullValue, - Properties: s.Properties, - Store: s.Store, - Type: s.Type, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + NullValue: s.NullValue, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "wildcard" @@ -820,10 +852,20 @@ func (s WildcardProperty) MarshalJSON() ([]byte, error) { // NewWildcardProperty returns a WildcardProperty. func NewWildcardProperty() *WildcardProperty { r := &WildcardProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type WildcardPropertyVariant interface { + WildcardPropertyCaster() *WildcardProperty +} + +func (s *WildcardProperty) WildcardPropertyCaster() *WildcardProperty { + return s +} diff --git a/typedapi/types/wildcardquery.go b/typedapi/types/wildcardquery.go index 1e0fb75e37..0c2c665d9d 100644 --- a/typedapi/types/wildcardquery.go +++ b/typedapi/types/wildcardquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // WildcardQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/term.ts#L305-L325 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/term.ts#L305-L325 type WildcardQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -163,3 +163,13 @@ func NewWildcardQuery() *WildcardQuery { return r } + +// true + +type WildcardQueryVariant interface { + WildcardQueryCaster() *WildcardQuery +} + +func (s *WildcardQuery) WildcardQueryCaster() *WildcardQuery { + return s +} diff --git a/typedapi/types/wktgeobounds.go b/typedapi/types/wktgeobounds.go index 26039ad230..69b1ab2390 100644 --- a/typedapi/types/wktgeobounds.go +++ b/typedapi/types/wktgeobounds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // WktGeoBounds type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/Geo.ts#L150-L152 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/Geo.ts#L150-L152 type WktGeoBounds struct { Wkt string `json:"wkt"` } @@ -74,3 +74,13 @@ func NewWktGeoBounds() *WktGeoBounds { return r } + +// true + +type WktGeoBoundsVariant interface { + WktGeoBoundsCaster() *WktGeoBounds +} + +func (s *WktGeoBounds) WktGeoBoundsCaster() *WktGeoBounds { + return s +} diff --git a/typedapi/types/worddelimitergraphtokenfilter.go b/typedapi/types/worddelimitergraphtokenfilter.go index a206c01404..143cc5fa17 100644 --- a/typedapi/types/worddelimitergraphtokenfilter.go +++ b/typedapi/types/worddelimitergraphtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // WordDelimiterGraphTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L150-L167 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L150-L167 type WordDelimiterGraphTokenFilter struct { AdjustOffsets *bool `json:"adjust_offsets,omitempty"` CatenateAll *bool `json:"catenate_all,omitempty"` @@ -295,3 +295,13 @@ func NewWordDelimiterGraphTokenFilter() *WordDelimiterGraphTokenFilter { return r } + +// true + +type WordDelimiterGraphTokenFilterVariant interface { + WordDelimiterGraphTokenFilterCaster() *WordDelimiterGraphTokenFilter +} + +func (s *WordDelimiterGraphTokenFilter) WordDelimiterGraphTokenFilterCaster() *WordDelimiterGraphTokenFilter { + return s +} diff --git a/typedapi/types/worddelimitertokenfilter.go b/typedapi/types/worddelimitertokenfilter.go index 1bc9a71187..5bc8399c62 100644 --- a/typedapi/types/worddelimitertokenfilter.go +++ b/typedapi/types/worddelimitertokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // WordDelimiterTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/analysis/token_filters.ts#L133-L148 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/analysis/token_filters.ts#L133-L148 type WordDelimiterTokenFilter struct { CatenateAll *bool `json:"catenate_all,omitempty"` CatenateNumbers *bool `json:"catenate_numbers,omitempty"` @@ -263,3 +263,13 @@ func NewWordDelimiterTokenFilter() *WordDelimiterTokenFilter { return r } + +// true + +type WordDelimiterTokenFilterVariant interface { + WordDelimiterTokenFilterCaster() *WordDelimiterTokenFilter +} + +func (s *WordDelimiterTokenFilter) WordDelimiterTokenFilterCaster() *WordDelimiterTokenFilter { + return s +} diff --git a/typedapi/types/wrapperquery.go b/typedapi/types/wrapperquery.go index 88e5025473..75b8584c00 100644 --- a/typedapi/types/wrapperquery.go +++ b/typedapi/types/wrapperquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // WrapperQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_types/query_dsl/abstractions.ts#L501-L510 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/_types/query_dsl/abstractions.ts#L508-L517 type WrapperQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -111,3 +111,13 @@ func NewWrapperQuery() *WrapperQuery { return r } + +// true + +type WrapperQueryVariant interface { + WrapperQueryCaster() *WrapperQuery +} + +func (s *WrapperQuery) WrapperQueryCaster() *WrapperQuery { + return s +} diff --git a/typedapi/types/writeoperation.go b/typedapi/types/writeoperation.go deleted file mode 100644 index 78f198a270..0000000000 --- a/typedapi/types/writeoperation.go +++ /dev/null @@ -1,171 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" -) - -// WriteOperation type. -// -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/_global/bulk/types.ts#L109-L128 -type WriteOperation struct { - // DynamicTemplates A map from the full name of fields to the name of dynamic templates. - // Defaults to an empty map. - // If a name matches a dynamic template, then that template will be applied - // regardless of other match predicates defined in the template. - // If a field is already defined in the mapping, then this parameter won’t be - // used. - DynamicTemplates map[string]string `json:"dynamic_templates,omitempty"` - // Id_ The document ID. - Id_ *string `json:"_id,omitempty"` - IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` - IfSeqNo *int64 `json:"if_seq_no,omitempty"` - // Index_ Name of the index or index alias to perform the action on. - Index_ *string `json:"_index,omitempty"` - // Pipeline ID of the pipeline to use to preprocess incoming documents. - // If the index has a default ingest pipeline specified, then setting the value - // to `_none` disables the default ingest pipeline for this request. - // If a final pipeline is configured it will always run, regardless of the value - // of this parameter. - Pipeline *string `json:"pipeline,omitempty"` - // RequireAlias If `true`, the request’s actions must target an index alias. - RequireAlias *bool `json:"require_alias,omitempty"` - // Routing Custom value used to route operations to a specific shard. - Routing *string `json:"routing,omitempty"` - Version *int64 `json:"version,omitempty"` - VersionType *versiontype.VersionType `json:"version_type,omitempty"` -} - -func (s *WriteOperation) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "dynamic_templates": - if s.DynamicTemplates == nil { - s.DynamicTemplates = make(map[string]string, 0) - } - if err := dec.Decode(&s.DynamicTemplates); err != nil { - return fmt.Errorf("%s | %w", "DynamicTemplates", err) - } - - case "_id": - if err := dec.Decode(&s.Id_); err != nil { - return fmt.Errorf("%s | %w", "Id_", err) - } - - case "if_primary_term": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "IfPrimaryTerm", err) - } - s.IfPrimaryTerm = &value - case float64: - f := int64(v) - s.IfPrimaryTerm = &f - } - - case "if_seq_no": - if err := dec.Decode(&s.IfSeqNo); err != nil { - return fmt.Errorf("%s | %w", "IfSeqNo", err) - } - - case "_index": - if err := dec.Decode(&s.Index_); err != nil { - return fmt.Errorf("%s | %w", "Index_", err) - } - - case "pipeline": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Pipeline", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Pipeline = &o - - case "require_alias": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "RequireAlias", err) - } - s.RequireAlias = &value - case bool: - s.RequireAlias = &v - } - - case "routing": - if err := dec.Decode(&s.Routing); err != nil { - return fmt.Errorf("%s | %w", "Routing", err) - } - - case "version": - if err := dec.Decode(&s.Version); err != nil { - return fmt.Errorf("%s | %w", "Version", err) - } - - case "version_type": - if err := dec.Decode(&s.VersionType); err != nil { - return fmt.Errorf("%s | %w", "VersionType", err) - } - - } - } - return nil -} - -// NewWriteOperation returns a WriteOperation. -func NewWriteOperation() *WriteOperation { - r := &WriteOperation{ - DynamicTemplates: make(map[string]string, 0), - } - - return r -} diff --git a/typedapi/types/writesummaryinfo.go b/typedapi/types/writesummaryinfo.go new file mode 100644 index 0000000000..1507b265ee --- /dev/null +++ b/typedapi/types/writesummaryinfo.go @@ -0,0 +1,147 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WriteSummaryInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L162-L191 +type WriteSummaryInfo struct { + // Count The number of write operations performed in the test. + Count int `json:"count"` + // TotalElapsed The total elapsed time spent on writing blobs in the test. + TotalElapsed Duration `json:"total_elapsed"` + // TotalElapsedNanos The total elapsed time spent on writing blobs in the test, in nanoseconds. + TotalElapsedNanos int64 `json:"total_elapsed_nanos"` + // TotalSize The total size of all the blobs written in the test. + TotalSize ByteSize `json:"total_size"` + // TotalSizeBytes The total size of all the blobs written in the test, in bytes. + TotalSizeBytes int64 `json:"total_size_bytes"` + // TotalThrottled The total time spent waiting due to the `max_snapshot_bytes_per_sec` + // throttle. + TotalThrottled Duration `json:"total_throttled"` + // TotalThrottledNanos The total time spent waiting due to the `max_snapshot_bytes_per_sec` + // throttle, in nanoseconds. + TotalThrottledNanos int64 `json:"total_throttled_nanos"` +} + +func (s *WriteSummaryInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "total_elapsed": + if err := dec.Decode(&s.TotalElapsed); err != nil { + return fmt.Errorf("%s | %w", "TotalElapsed", err) + } + + case "total_elapsed_nanos": + if err := dec.Decode(&s.TotalElapsedNanos); err != nil { + return fmt.Errorf("%s | %w", "TotalElapsedNanos", err) + } + + case "total_size": + if err := dec.Decode(&s.TotalSize); err != nil { + return fmt.Errorf("%s | %w", "TotalSize", err) + } + + case "total_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSizeBytes", err) + } + s.TotalSizeBytes = value + case float64: + f := int64(v) + s.TotalSizeBytes = f + } + + case "total_throttled": + if err := dec.Decode(&s.TotalThrottled); err != nil { + return fmt.Errorf("%s | %w", "TotalThrottled", err) + } + + case "total_throttled_nanos": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalThrottledNanos", err) + } + s.TotalThrottledNanos = value + case float64: + f := int64(v) + s.TotalThrottledNanos = f + } + + } + } + return nil +} + +// NewWriteSummaryInfo returns a WriteSummaryInfo. +func NewWriteSummaryInfo() *WriteSummaryInfo { + r := &WriteSummaryInfo{} + + return r +} + +// false diff --git a/typedapi/types/xpackdatafeed.go b/typedapi/types/xpackdatafeed.go index b8a207b437..bf888c2691 100644 --- a/typedapi/types/xpackdatafeed.go +++ b/typedapi/types/xpackdatafeed.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // XpackDatafeed type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L75-L77 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L74-L76 type XpackDatafeed struct { Count int64 `json:"count"` } @@ -77,3 +77,5 @@ func NewXpackDatafeed() *XpackDatafeed { return r } + +// false diff --git a/typedapi/types/xpackfeature.go b/typedapi/types/xpackfeature.go index a8e6d33f32..767f453f1f 100644 --- a/typedapi/types/xpackfeature.go +++ b/typedapi/types/xpackfeature.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // XpackFeature type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/info/types.ts#L85-L90 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/info/types.ts#L84-L89 type XpackFeature struct { Available bool `json:"available"` Description *string `json:"description,omitempty"` @@ -110,3 +110,5 @@ func NewXpackFeature() *XpackFeature { return r } + +// false diff --git a/typedapi/types/xpackfeatures.go b/typedapi/types/xpackfeatures.go index a0ebf90247..393c3e253c 100644 --- a/typedapi/types/xpackfeatures.go +++ b/typedapi/types/xpackfeatures.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types // XpackFeatures type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/info/types.ts#L42-L83 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/info/types.ts#L42-L82 type XpackFeatures struct { AggregateMetric XpackFeature `json:"aggregate_metric"` Analytics XpackFeature `json:"analytics"` @@ -34,7 +34,6 @@ type XpackFeatures struct { EnterpriseSearch XpackFeature `json:"enterprise_search"` Eql XpackFeature `json:"eql"` Esql XpackFeature `json:"esql"` - FrozenIndices XpackFeature `json:"frozen_indices"` Graph XpackFeature `json:"graph"` Ilm XpackFeature `json:"ilm"` Logsdb XpackFeature `json:"logsdb"` @@ -60,3 +59,5 @@ func NewXpackFeatures() *XpackFeatures { return r } + +// false diff --git a/typedapi/types/xpackquery.go b/typedapi/types/xpackquery.go index 1383f53cf9..73e59760a4 100644 --- a/typedapi/types/xpackquery.go +++ b/typedapi/types/xpackquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // XpackQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L257-L262 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L269-L274 type XpackQuery struct { Count *int `json:"count,omitempty"` Failed *int `json:"failed,omitempty"` @@ -129,3 +129,5 @@ func NewXpackQuery() *XpackQuery { return r } + +// false diff --git a/typedapi/types/xpackrealm.go b/typedapi/types/xpackrealm.go index 1ef00b5291..5324a240e1 100644 --- a/typedapi/types/xpackrealm.go +++ b/typedapi/types/xpackrealm.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // XpackRealm type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L415-L424 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L423-L432 type XpackRealm struct { Available bool `json:"available"` Cache []RealmCache `json:"cache,omitempty"` @@ -139,3 +139,5 @@ func NewXpackRealm() *XpackRealm { return r } + +// false diff --git a/typedapi/types/xpackrolemapping.go b/typedapi/types/xpackrolemapping.go index 7935aa51df..e014d3a69f 100644 --- a/typedapi/types/xpackrolemapping.go +++ b/typedapi/types/xpackrolemapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // XpackRoleMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L268-L271 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L280-L283 type XpackRoleMapping struct { Enabled int `json:"enabled"` Size int `json:"size"` @@ -95,3 +95,5 @@ func NewXpackRoleMapping() *XpackRoleMapping { return r } + +// false diff --git a/typedapi/types/xpackruntimefieldtypes.go b/typedapi/types/xpackruntimefieldtypes.go index 8b157631ff..ba7f842ce4 100644 --- a/typedapi/types/xpackruntimefieldtypes.go +++ b/typedapi/types/xpackruntimefieldtypes.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // XpackRuntimeFieldTypes type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/types.ts#L273-L275 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/types.ts#L285-L287 type XpackRuntimeFieldTypes struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -97,3 +97,5 @@ func NewXpackRuntimeFieldTypes() *XpackRuntimeFieldTypes { return r } + +// false diff --git a/typedapi/types/zeroshotclassificationinferenceoptions.go b/typedapi/types/zeroshotclassificationinferenceoptions.go index 255c4a8ef7..c97a358c23 100644 --- a/typedapi/types/zeroshotclassificationinferenceoptions.go +++ b/typedapi/types/zeroshotclassificationinferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ZeroShotClassificationInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L201-L222 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L185-L206 type ZeroShotClassificationInferenceOptions struct { // ClassificationLabels The zero shot classification labels indicating entailment, neutral, and // contradiction @@ -129,3 +129,13 @@ func NewZeroShotClassificationInferenceOptions() *ZeroShotClassificationInferenc return r } + +// true + +type ZeroShotClassificationInferenceOptionsVariant interface { + ZeroShotClassificationInferenceOptionsCaster() *ZeroShotClassificationInferenceOptions +} + +func (s *ZeroShotClassificationInferenceOptions) ZeroShotClassificationInferenceOptionsCaster() *ZeroShotClassificationInferenceOptions { + return s +} diff --git a/typedapi/types/zeroshotclassificationinferenceupdateoptions.go b/typedapi/types/zeroshotclassificationinferenceupdateoptions.go index 6efa50a0df..989811d206 100644 --- a/typedapi/types/zeroshotclassificationinferenceupdateoptions.go +++ b/typedapi/types/zeroshotclassificationinferenceupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package types @@ -31,7 +31,7 @@ import ( // ZeroShotClassificationInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/ml/_types/inference.ts#L374-L383 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/ml/_types/inference.ts#L362-L371 type ZeroShotClassificationInferenceUpdateOptions struct { // Labels The labels to predict. Labels []string `json:"labels"` @@ -107,3 +107,13 @@ func NewZeroShotClassificationInferenceUpdateOptions() *ZeroShotClassificationIn return r } + +// true + +type ZeroShotClassificationInferenceUpdateOptionsVariant interface { + ZeroShotClassificationInferenceUpdateOptionsCaster() *ZeroShotClassificationInferenceUpdateOptions +} + +func (s *ZeroShotClassificationInferenceUpdateOptions) ZeroShotClassificationInferenceUpdateOptionsCaster() *ZeroShotClassificationInferenceUpdateOptions { + return s +} diff --git a/typedapi/watcher/ackwatch/ack_watch.go b/typedapi/watcher/ackwatch/ack_watch.go index ca67f0af19..d8b8d91289 100644 --- a/typedapi/watcher/ackwatch/ack_watch.go +++ b/typedapi/watcher/ackwatch/ack_watch.go @@ -16,10 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Acknowledges a watch, manually throttling the execution of the watch's -// actions. +// Acknowledge a watch. +// Acknowledging a watch enables you to manually throttle the execution of the +// watch's actions. +// +// The acknowledgement state of an action is stored in the +// `status.actions..ack.state` structure. +// +// IMPORTANT: If the specified watch is currently being executed, this API will +// return an error +// The reason for this behavior is to prevent overwriting the watch status from +// a watch execution. +// +// Acknowledging an action throttles further executions of that action until its +// `ack.state` is reset to `awaits_successful_execution`. +// This happens when the condition of the watch is not met (the condition +// evaluates to false). package ackwatch import ( @@ -80,10 +94,24 @@ func NewAckWatchFunc(tp elastictransport.Interface) NewAckWatch { } } -// Acknowledges a watch, manually throttling the execution of the watch's -// actions. +// Acknowledge a watch. +// Acknowledging a watch enables you to manually throttle the execution of the +// watch's actions. +// +// The acknowledgement state of an action is stored in the +// `status.actions..ack.state` structure. +// +// IMPORTANT: If the specified watch is currently being executed, this API will +// return an error +// The reason for this behavior is to prevent overwriting the watch status from +// a watch execution. +// +// Acknowledging an action throttles further executions of that action until its +// `ack.state` is reset to `awaits_successful_execution`. +// This happens when the condition of the watch is not met (the condition +// evaluates to false). // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-ack-watch.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch func New(tp elastictransport.Interface) *AckWatch { r := &AckWatch{ transport: tp, @@ -318,7 +346,7 @@ func (r *AckWatch) Header(key, value string) *AckWatch { return r } -// WatchId Watch ID +// WatchId The watch identifier. // API Name: watchid func (r *AckWatch) _watchid(watchid string) *AckWatch { r.paramSet |= watchidMask @@ -327,7 +355,8 @@ func (r *AckWatch) _watchid(watchid string) *AckWatch { return r } -// ActionId A comma-separated list of the action ids to be acked +// ActionId A comma-separated list of the action identifiers to acknowledge. +// If you omit this parameter, all of the actions of the watch are acknowledged. // API Name: actionid func (r *AckWatch) ActionId(actionid string) *AckWatch { r.paramSet |= actionidMask diff --git a/typedapi/watcher/ackwatch/response.go b/typedapi/watcher/ackwatch/response.go index 0383430949..73d272c2a7 100644 --- a/typedapi/watcher/ackwatch/response.go +++ b/typedapi/watcher/ackwatch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package ackwatch @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package ackwatch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/ack_watch/WatcherAckWatchResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/ack_watch/WatcherAckWatchResponse.ts#L22-L24 type Response struct { Status types.WatchStatus `json:"status"` } diff --git a/typedapi/watcher/activatewatch/activate_watch.go b/typedapi/watcher/activatewatch/activate_watch.go index e759b1c3a6..a00594d53f 100644 --- a/typedapi/watcher/activatewatch/activate_watch.go +++ b/typedapi/watcher/activatewatch/activate_watch.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Activates a currently inactive watch. +// Activate a watch. +// A watch can be either active or inactive. package activatewatch import ( @@ -76,9 +77,10 @@ func NewActivateWatchFunc(tp elastictransport.Interface) NewActivateWatch { } } -// Activates a currently inactive watch. +// Activate a watch. +// A watch can be either active or inactive. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-activate-watch.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch func New(tp elastictransport.Interface) *ActivateWatch { r := &ActivateWatch{ transport: tp, @@ -292,7 +294,7 @@ func (r *ActivateWatch) Header(key, value string) *ActivateWatch { return r } -// WatchId Watch ID +// WatchId The watch identifier. // API Name: watchid func (r *ActivateWatch) _watchid(watchid string) *ActivateWatch { r.paramSet |= watchidMask diff --git a/typedapi/watcher/activatewatch/response.go b/typedapi/watcher/activatewatch/response.go index d30062de83..9ae3eb1a6c 100644 --- a/typedapi/watcher/activatewatch/response.go +++ b/typedapi/watcher/activatewatch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package activatewatch @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package activatewatch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/activate_watch/WatcherActivateWatchResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/activate_watch/WatcherActivateWatchResponse.ts#L22-L24 type Response struct { Status types.ActivationStatus `json:"status"` } diff --git a/typedapi/watcher/deactivatewatch/deactivate_watch.go b/typedapi/watcher/deactivatewatch/deactivate_watch.go index e061b5593d..fb32f5fa49 100644 --- a/typedapi/watcher/deactivatewatch/deactivate_watch.go +++ b/typedapi/watcher/deactivatewatch/deactivate_watch.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Deactivates a currently active watch. +// Deactivate a watch. +// A watch can be either active or inactive. package deactivatewatch import ( @@ -76,9 +77,10 @@ func NewDeactivateWatchFunc(tp elastictransport.Interface) NewDeactivateWatch { } } -// Deactivates a currently active watch. +// Deactivate a watch. +// A watch can be either active or inactive. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-deactivate-watch.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch func New(tp elastictransport.Interface) *DeactivateWatch { r := &DeactivateWatch{ transport: tp, @@ -292,7 +294,7 @@ func (r *DeactivateWatch) Header(key, value string) *DeactivateWatch { return r } -// WatchId Watch ID +// WatchId The watch identifier. // API Name: watchid func (r *DeactivateWatch) _watchid(watchid string) *DeactivateWatch { r.paramSet |= watchidMask diff --git a/typedapi/watcher/deactivatewatch/response.go b/typedapi/watcher/deactivatewatch/response.go index d3088faee1..62cf3d76b3 100644 --- a/typedapi/watcher/deactivatewatch/response.go +++ b/typedapi/watcher/deactivatewatch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deactivatewatch @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package deactivatewatch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/deactivate_watch/DeactivateWatchResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/deactivate_watch/DeactivateWatchResponse.ts#L22-L24 type Response struct { Status types.ActivationStatus `json:"status"` } diff --git a/typedapi/watcher/deletewatch/delete_watch.go b/typedapi/watcher/deletewatch/delete_watch.go index 22f79cbb73..0d0e7c1d5d 100644 --- a/typedapi/watcher/deletewatch/delete_watch.go +++ b/typedapi/watcher/deletewatch/delete_watch.go @@ -16,9 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Removes a watch from Watcher. +// Delete a watch. +// When the watch is removed, the document representing the watch in the +// `.watches` index is gone and it will never be run again. +// +// Deleting a watch does not delete any watch execution records related to this +// watch from the watch history. +// +// IMPORTANT: Deleting a watch must be done by using only this API. +// Do not delete the watch directly from the `.watches` index using the +// Elasticsearch delete document API +// When Elasticsearch security features are enabled, make sure no write +// privileges are granted to anyone for the `.watches` index. package deletewatch import ( @@ -76,9 +87,20 @@ func NewDeleteWatchFunc(tp elastictransport.Interface) NewDeleteWatch { } } -// Removes a watch from Watcher. +// Delete a watch. +// When the watch is removed, the document representing the watch in the +// `.watches` index is gone and it will never be run again. +// +// Deleting a watch does not delete any watch execution records related to this +// watch from the watch history. +// +// IMPORTANT: Deleting a watch must be done by using only this API. +// Do not delete the watch directly from the `.watches` index using the +// Elasticsearch delete document API +// When Elasticsearch security features are enabled, make sure no write +// privileges are granted to anyone for the `.watches` index. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-delete-watch.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch func New(tp elastictransport.Interface) *DeleteWatch { r := &DeleteWatch{ transport: tp, @@ -290,7 +312,7 @@ func (r *DeleteWatch) Header(key, value string) *DeleteWatch { return r } -// Id Watch ID +// Id The watch identifier. // API Name: id func (r *DeleteWatch) _id(id string) *DeleteWatch { r.paramSet |= idMask diff --git a/typedapi/watcher/deletewatch/response.go b/typedapi/watcher/deletewatch/response.go index a045ebd5fa..613c2c938d 100644 --- a/typedapi/watcher/deletewatch/response.go +++ b/typedapi/watcher/deletewatch/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package deletewatch // Response holds the response body struct for the package deletewatch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/delete_watch/DeleteWatchResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/delete_watch/DeleteWatchResponse.ts#L22-L24 type Response struct { Found bool `json:"found"` Id_ string `json:"_id"` diff --git a/typedapi/watcher/executewatch/execute_watch.go b/typedapi/watcher/executewatch/execute_watch.go index 99ef0c21c8..a6152e827c 100644 --- a/typedapi/watcher/executewatch/execute_watch.go +++ b/typedapi/watcher/executewatch/execute_watch.go @@ -16,15 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b +// Run a watch. // This API can be used to force execution of the watch outside of its // triggering logic or to simulate the watch execution for debugging purposes. +// // For testing and debugging purposes, you also have fine-grained control on how -// the watch runs. You can execute the watch without executing all of its -// actions or alternatively by simulating them. You can also force execution by -// ignoring the watch condition and control whether a watch record would be -// written to the watch history after execution. +// the watch runs. +// You can run the watch without running all of its actions or alternatively by +// simulating them. +// You can also force execution by ignoring the watch condition and control +// whether a watch record would be written to the watch history after it runs. +// +// You can use the run watch API to run watches that are not yet registered by +// specifying the watch definition inline. +// This serves as great tool for testing and debugging your watches prior to +// adding them to Watcher. +// +// When Elasticsearch security features are enabled on your cluster, watches are +// run with the privileges of the user that stored the watches. +// If your user is allowed to read index `a`, but not index `b`, then the exact +// same set of rules will apply during execution of a watch. +// +// When using the run watch API, the authorization data of the user that called +// the API will be used as a base, instead of the information who stored the +// watch. package executewatch import ( @@ -86,15 +103,32 @@ func NewExecuteWatchFunc(tp elastictransport.Interface) NewExecuteWatch { } } +// Run a watch. // This API can be used to force execution of the watch outside of its // triggering logic or to simulate the watch execution for debugging purposes. +// // For testing and debugging purposes, you also have fine-grained control on how -// the watch runs. You can execute the watch without executing all of its -// actions or alternatively by simulating them. You can also force execution by -// ignoring the watch condition and control whether a watch record would be -// written to the watch history after execution. +// the watch runs. +// You can run the watch without running all of its actions or alternatively by +// simulating them. +// You can also force execution by ignoring the watch condition and control +// whether a watch record would be written to the watch history after it runs. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html +// You can use the run watch API to run watches that are not yet registered by +// specifying the watch definition inline. +// This serves as great tool for testing and debugging your watches prior to +// adding them to Watcher. +// +// When Elasticsearch security features are enabled on your cluster, watches are +// run with the privileges of the user that stored the watches. +// If your user is allowed to read index `a`, but not index `b`, then the exact +// same set of rules will apply during execution of a watch. +// +// When using the run watch API, the authorization data of the user that called +// the API will be used as a base, instead of the information who stored the +// watch. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch func New(tp elastictransport.Interface) *ExecuteWatch { r := &ExecuteWatch{ transport: tp, @@ -102,8 +136,6 @@ func New(tp elastictransport.Interface) *ExecuteWatch { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -328,7 +360,7 @@ func (r *ExecuteWatch) Header(key, value string) *ExecuteWatch { return r } -// Id Identifier for the watch. +// Id The watch identifier. // API Name: id func (r *ExecuteWatch) Id(id string) *ExecuteWatch { r.paramSet |= idMask @@ -389,69 +421,135 @@ func (r *ExecuteWatch) Pretty(pretty bool) *ExecuteWatch { return r } -// ActionModes Determines how to handle the watch actions as part of the watch execution. +// Determines how to handle the watch actions as part of the watch execution. // API name: action_modes func (r *ExecuteWatch) ActionModes(actionmodes map[string]actionexecutionmode.ActionExecutionMode) *ExecuteWatch { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ActionModes = actionmodes + return r +} + +func (r *ExecuteWatch) AddActionMode(key string, value actionexecutionmode.ActionExecutionMode) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]actionexecutionmode.ActionExecutionMode + if r.req.ActionModes == nil { + r.req.ActionModes = make(map[string]actionexecutionmode.ActionExecutionMode) + } else { + tmp = r.req.ActionModes + } + tmp[key] = value + + r.req.ActionModes = tmp return r } -// AlternativeInput When present, the watch uses this object as a payload instead of executing +// When present, the watch uses this object as a payload instead of executing // its own input. // API name: alternative_input func (r *ExecuteWatch) AlternativeInput(alternativeinput map[string]json.RawMessage) *ExecuteWatch { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.AlternativeInput = alternativeinput + return r +} + +func (r *ExecuteWatch) AddAlternativeInput(key string, value json.RawMessage) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.AlternativeInput == nil { + r.req.AlternativeInput = make(map[string]json.RawMessage) + } else { + tmp = r.req.AlternativeInput + } + + tmp[key] = value + r.req.AlternativeInput = tmp return r } -// IgnoreCondition When set to `true`, the watch execution uses the always condition. This can +// When set to `true`, the watch execution uses the always condition. This can // also be specified as an HTTP parameter. // API name: ignore_condition func (r *ExecuteWatch) IgnoreCondition(ignorecondition bool) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IgnoreCondition = &ignorecondition return r } -// RecordExecution When set to `true`, the watch record representing the watch execution result -// is persisted to the `.watcher-history` index for the current time. In -// addition, the status of the watch is updated, possibly throttling subsequent -// executions. This can also be specified as an HTTP parameter. +// When set to `true`, the watch record representing the watch execution result +// is persisted to the `.watcher-history` index for the current time. +// In addition, the status of the watch is updated, possibly throttling +// subsequent runs. +// This can also be specified as an HTTP parameter. // API name: record_execution func (r *ExecuteWatch) RecordExecution(recordexecution bool) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RecordExecution = &recordexecution return r } // API name: simulated_actions -func (r *ExecuteWatch) SimulatedActions(simulatedactions *types.SimulatedActions) *ExecuteWatch { +func (r *ExecuteWatch) SimulatedActions(simulatedactions types.SimulatedActionsVariant) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.SimulatedActions = simulatedactions + r.req.SimulatedActions = simulatedactions.SimulatedActionsCaster() return r } -// TriggerData This structure is parsed as the data of the trigger event that will be used -// during the watch execution +// This structure is parsed as the data of the trigger event that will be used +// during the watch execution. // API name: trigger_data -func (r *ExecuteWatch) TriggerData(triggerdata *types.ScheduleTriggerEvent) *ExecuteWatch { +func (r *ExecuteWatch) TriggerData(triggerdata types.ScheduleTriggerEventVariant) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.TriggerData = triggerdata + r.req.TriggerData = triggerdata.ScheduleTriggerEventCaster() return r } -// Watch When present, this watch is used instead of the one specified in the request. -// This watch is not persisted to the index and record_execution cannot be set. +// When present, this watch is used instead of the one specified in the request. +// This watch is not persisted to the index and `record_execution` cannot be +// set. // API name: watch -func (r *ExecuteWatch) Watch(watch *types.Watch) *ExecuteWatch { +func (r *ExecuteWatch) Watch(watch types.WatchVariant) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Watch = watch + r.req.Watch = watch.WatchCaster() return r } diff --git a/typedapi/watcher/executewatch/request.go b/typedapi/watcher/executewatch/request.go index f23c218a41..6b49067c47 100644 --- a/typedapi/watcher/executewatch/request.go +++ b/typedapi/watcher/executewatch/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package executewatch @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package executewatch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/execute_watch/WatcherExecuteWatchRequest.ts#L28-L79 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/execute_watch/WatcherExecuteWatchRequest.ts#L28-L105 type Request struct { // ActionModes Determines how to handle the watch actions as part of the watch execution. @@ -42,16 +42,18 @@ type Request struct { // also be specified as an HTTP parameter. IgnoreCondition *bool `json:"ignore_condition,omitempty"` // RecordExecution When set to `true`, the watch record representing the watch execution result - // is persisted to the `.watcher-history` index for the current time. In - // addition, the status of the watch is updated, possibly throttling subsequent - // executions. This can also be specified as an HTTP parameter. + // is persisted to the `.watcher-history` index for the current time. + // In addition, the status of the watch is updated, possibly throttling + // subsequent runs. + // This can also be specified as an HTTP parameter. RecordExecution *bool `json:"record_execution,omitempty"` SimulatedActions *types.SimulatedActions `json:"simulated_actions,omitempty"` // TriggerData This structure is parsed as the data of the trigger event that will be used - // during the watch execution + // during the watch execution. TriggerData *types.ScheduleTriggerEvent `json:"trigger_data,omitempty"` // Watch When present, this watch is used instead of the one specified in the request. - // This watch is not persisted to the index and record_execution cannot be set. + // This watch is not persisted to the index and `record_execution` cannot be + // set. Watch *types.Watch `json:"watch,omitempty"` } diff --git a/typedapi/watcher/executewatch/response.go b/typedapi/watcher/executewatch/response.go index 8bc3274875..ed72ec2fec 100644 --- a/typedapi/watcher/executewatch/response.go +++ b/typedapi/watcher/executewatch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package executewatch @@ -26,9 +26,14 @@ import ( // Response holds the response body struct for the package executewatch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/execute_watch/WatcherExecuteWatchResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/execute_watch/WatcherExecuteWatchResponse.ts#L23-L34 type Response struct { - Id_ string `json:"_id"` + + // Id_ The watch record identifier as it would be stored in the `.watcher-history` + // index. + Id_ string `json:"_id"` + // WatchRecord The watch record document as it would be stored in the `.watcher-history` + // index. WatchRecord types.WatchRecord `json:"watch_record"` } diff --git a/typedapi/watcher/getsettings/get_settings.go b/typedapi/watcher/getsettings/get_settings.go index d96710f83e..b4bff4810a 100644 --- a/typedapi/watcher/getsettings/get_settings.go +++ b/typedapi/watcher/getsettings/get_settings.go @@ -16,21 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieve settings for the watcher system index +// Get Watcher index settings. +// Get settings for the Watcher internal index (`.watches`). +// Only a subset of settings are shown, for example `index.auto_expand_replicas` +// and `index.number_of_replicas`. package getsettings import ( "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -65,9 +71,12 @@ func NewGetSettingsFunc(tp elastictransport.Interface) NewGetSettings { } } -// Retrieve settings for the watcher system index +// Get Watcher index settings. +// Get settings for the Watcher internal index (`.watches`). +// Only a subset of settings are shown, for example `index.auto_expand_replicas` +// and `index.number_of_replicas`. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-settings.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings func New(tp elastictransport.Interface) *GetSettings { r := &GetSettings{ transport: tp, @@ -180,8 +189,57 @@ func (r GetSettings) Perform(providedCtx context.Context) (*http.Response, error } // Do runs the request through the transport, handle the response and returns a getsettings.Response -func (r GetSettings) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) +func (r GetSettings) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.get_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. @@ -229,3 +287,57 @@ func (r *GetSettings) Header(key, value string) *GetSettings { return r } + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetSettings) MasterTimeout(duration string) *GetSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetSettings) ErrorTrace(errortrace bool) *GetSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetSettings) FilterPath(filterpaths ...string) *GetSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetSettings) Human(human bool) *GetSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetSettings) Pretty(pretty bool) *GetSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/watcher/getsettings/response.go b/typedapi/watcher/getsettings/response.go new file mode 100644 index 0000000000..b7b00f89bf --- /dev/null +++ b/typedapi/watcher/getsettings/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package getsettings + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package getsettings +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/get_settings/WatcherGetSettingsResponse.ts#L22-L26 +type Response struct { + Index types.IndexSettings `json:"index"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/watcher/getwatch/get_watch.go b/typedapi/watcher/getwatch/get_watch.go index 268bb48b4f..757318c0bf 100644 --- a/typedapi/watcher/getwatch/get_watch.go +++ b/typedapi/watcher/getwatch/get_watch.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves a watch by its ID. +// Get a watch. package getwatch import ( @@ -76,9 +76,9 @@ func NewGetWatchFunc(tp elastictransport.Interface) NewGetWatch { } } -// Retrieves a watch by its ID. +// Get a watch. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-watch.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch func New(tp elastictransport.Interface) *GetWatch { r := &GetWatch{ transport: tp, @@ -290,7 +290,7 @@ func (r *GetWatch) Header(key, value string) *GetWatch { return r } -// Id Watch ID +// Id The watch identifier. // API Name: id func (r *GetWatch) _id(id string) *GetWatch { r.paramSet |= idMask diff --git a/typedapi/watcher/getwatch/response.go b/typedapi/watcher/getwatch/response.go index 7ef47c2af1..08339785c5 100644 --- a/typedapi/watcher/getwatch/response.go +++ b/typedapi/watcher/getwatch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package getwatch @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getwatch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/get_watch/GetWatchResponse.ts#L24-L34 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/get_watch/GetWatchResponse.ts#L24-L34 type Response struct { Found bool `json:"found"` Id_ string `json:"_id"` diff --git a/typedapi/watcher/putwatch/put_watch.go b/typedapi/watcher/putwatch/put_watch.go index 7569f23d90..214f875ecc 100644 --- a/typedapi/watcher/putwatch/put_watch.go +++ b/typedapi/watcher/putwatch/put_watch.go @@ -16,9 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Creates a new watch, or updates an existing one. +// Create or update a watch. +// When a watch is registered, a new document that represents the watch is added +// to the `.watches` index and its trigger is immediately registered with the +// relevant trigger engine. +// Typically for the `schedule` trigger, the scheduler is the trigger engine. +// +// IMPORTANT: You must use Kibana or this API to create a watch. +// Do not add a watch directly to the `.watches` index by using the +// Elasticsearch index API. +// If Elasticsearch security features are enabled, do not give users write +// privileges on the `.watches` index. +// +// When you add a watch you can also define its initial active state by setting +// the *active* parameter. +// +// When Elasticsearch security features are enabled, your watch can index or +// search only on indices for which the user that stored the watch has +// privileges. +// If the user is able to read index `a`, but not index `b`, the same will apply +// when the watch runs. package putwatch import ( @@ -81,9 +100,28 @@ func NewPutWatchFunc(tp elastictransport.Interface) NewPutWatch { } } -// Creates a new watch, or updates an existing one. +// Create or update a watch. +// When a watch is registered, a new document that represents the watch is added +// to the `.watches` index and its trigger is immediately registered with the +// relevant trigger engine. +// Typically for the `schedule` trigger, the scheduler is the trigger engine. +// +// IMPORTANT: You must use Kibana or this API to create a watch. +// Do not add a watch directly to the `.watches` index by using the +// Elasticsearch index API. +// If Elasticsearch security features are enabled, do not give users write +// privileges on the `.watches` index. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-put-watch.html +// When you add a watch you can also define its initial active state by setting +// the *active* parameter. +// +// When Elasticsearch security features are enabled, your watch can index or +// search only on indices for which the user that stored the watch has +// privileges. +// If the user is able to read index `a`, but not index `b`, the same will apply +// when the watch runs. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch func New(tp elastictransport.Interface) *PutWatch { r := &PutWatch{ transport: tp, @@ -91,8 +129,6 @@ func New(tp elastictransport.Interface) *PutWatch { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -306,7 +342,7 @@ func (r *PutWatch) Header(key, value string) *PutWatch { return r } -// Id Watch ID +// Id The identifier for the watch. // API Name: id func (r *PutWatch) _id(id string) *PutWatch { r.paramSet |= idMask @@ -315,7 +351,8 @@ func (r *PutWatch) _id(id string) *PutWatch { return r } -// Active Specify whether the watch is in/active by default +// Active The initial state of the watch. +// The default value is `true`, which means the watch is active by default. // API name: active func (r *PutWatch) Active(active bool) *PutWatch { r.values.Set("active", strconv.FormatBool(active)) @@ -393,57 +430,131 @@ func (r *PutWatch) Pretty(pretty bool) *PutWatch { return r } +// The list of actions that will be run if the condition matches. // API name: actions func (r *PutWatch) Actions(actions map[string]types.WatcherAction) *PutWatch { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Actions = actions + return r +} + +func (r *PutWatch) AddAction(key string, value types.WatcherActionVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.WatcherAction + if r.req.Actions == nil { + r.req.Actions = make(map[string]types.WatcherAction) + } else { + tmp = r.req.Actions + } + + tmp[key] = *value.WatcherActionCaster() + r.req.Actions = tmp return r } +// The condition that defines if the actions should be run. // API name: condition -func (r *PutWatch) Condition(condition *types.WatcherCondition) *PutWatch { +func (r *PutWatch) Condition(condition types.WatcherConditionVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Condition = condition + r.req.Condition = condition.WatcherConditionCaster() return r } +// The input that defines the input that loads the data for the watch. // API name: input -func (r *PutWatch) Input(input *types.WatcherInput) *PutWatch { +func (r *PutWatch) Input(input types.WatcherInputVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Input = input + r.req.Input = input.WatcherInputCaster() return r } +// Metadata JSON that will be copied into the history entries. // API name: metadata -func (r *PutWatch) Metadata(metadata types.Metadata) *PutWatch { - r.req.Metadata = metadata +func (r *PutWatch) Metadata(metadata types.MetadataVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() return r } +// The minimum time between actions being run. +// The default is 5 seconds. +// This default can be changed in the config file with the setting +// `xpack.watcher.throttle.period.default_period`. +// If both this value and the `throttle_period_in_millis` parameter are +// specified, Watcher uses the last parameter included in the request. // API name: throttle_period -func (r *PutWatch) ThrottlePeriod(throttleperiod string) *PutWatch { +func (r *PutWatch) ThrottlePeriod(duration types.DurationVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.ThrottlePeriod = &throttleperiod + r.req.ThrottlePeriod = *duration.DurationCaster() return r } +// Minimum time in milliseconds between actions being run. Defaults to 5000. If +// both this value and the throttle_period parameter are specified, Watcher uses +// the last parameter included in the request. +// API name: throttle_period_in_millis +func (r *PutWatch) ThrottlePeriodInMillis(durationvalueunitmillis int64) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ThrottlePeriodInMillis = &durationvalueunitmillis + + return r +} + +// The transform that processes the watch payload to prepare it for the watch +// actions. // API name: transform -func (r *PutWatch) Transform(transform *types.TransformContainer) *PutWatch { +func (r *PutWatch) Transform(transform types.TransformContainerVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Transform = transform + r.req.Transform = transform.TransformContainerCaster() return r } +// The trigger that defines when the watch should run. // API name: trigger -func (r *PutWatch) Trigger(trigger *types.TriggerContainer) *PutWatch { +func (r *PutWatch) Trigger(trigger types.TriggerContainerVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Trigger = trigger + r.req.Trigger = trigger.TriggerContainerCaster() return r } diff --git a/typedapi/watcher/putwatch/request.go b/typedapi/watcher/putwatch/request.go index 0f19eced82..146fbd0c91 100644 --- a/typedapi/watcher/putwatch/request.go +++ b/typedapi/watcher/putwatch/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putwatch @@ -26,22 +26,39 @@ import ( "errors" "fmt" "io" - "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Request holds the request body struct for the package putwatch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/put_watch/WatcherPutWatchRequest.ts#L30-L53 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/put_watch/WatcherPutWatchRequest.ts#L31-L110 type Request struct { - Actions map[string]types.WatcherAction `json:"actions,omitempty"` - Condition *types.WatcherCondition `json:"condition,omitempty"` - Input *types.WatcherInput `json:"input,omitempty"` - Metadata types.Metadata `json:"metadata,omitempty"` - ThrottlePeriod *string `json:"throttle_period,omitempty"` - Transform *types.TransformContainer `json:"transform,omitempty"` - Trigger *types.TriggerContainer `json:"trigger,omitempty"` + + // Actions The list of actions that will be run if the condition matches. + Actions map[string]types.WatcherAction `json:"actions,omitempty"` + // Condition The condition that defines if the actions should be run. + Condition *types.WatcherCondition `json:"condition,omitempty"` + // Input The input that defines the input that loads the data for the watch. + Input *types.WatcherInput `json:"input,omitempty"` + // Metadata Metadata JSON that will be copied into the history entries. + Metadata types.Metadata `json:"metadata,omitempty"` + // ThrottlePeriod The minimum time between actions being run. + // The default is 5 seconds. + // This default can be changed in the config file with the setting + // `xpack.watcher.throttle.period.default_period`. + // If both this value and the `throttle_period_in_millis` parameter are + // specified, Watcher uses the last parameter included in the request. + ThrottlePeriod types.Duration `json:"throttle_period,omitempty"` + // ThrottlePeriodInMillis Minimum time in milliseconds between actions being run. Defaults to 5000. If + // both this value and the throttle_period parameter are specified, Watcher uses + // the last parameter included in the request. + ThrottlePeriodInMillis *int64 `json:"throttle_period_in_millis,omitempty"` + // Transform The transform that processes the watch payload to prepare it for the watch + // actions. + Transform *types.TransformContainer `json:"transform,omitempty"` + // Trigger The trigger that defines when the watch should run. + Trigger *types.TriggerContainer `json:"trigger,omitempty"` } // NewRequest returns a Request @@ -103,16 +120,14 @@ func (s *Request) UnmarshalJSON(data []byte) error { } case "throttle_period": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.ThrottlePeriod); err != nil { return fmt.Errorf("%s | %w", "ThrottlePeriod", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) + + case "throttle_period_in_millis": + if err := dec.Decode(&s.ThrottlePeriodInMillis); err != nil { + return fmt.Errorf("%s | %w", "ThrottlePeriodInMillis", err) } - s.ThrottlePeriod = &o case "transform": if err := dec.Decode(&s.Transform); err != nil { diff --git a/typedapi/watcher/putwatch/response.go b/typedapi/watcher/putwatch/response.go index cbdba8ce92..6f06fe4069 100644 --- a/typedapi/watcher/putwatch/response.go +++ b/typedapi/watcher/putwatch/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package putwatch // Response holds the response body struct for the package putwatch // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/put_watch/WatcherPutWatchResponse.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/put_watch/WatcherPutWatchResponse.ts#L23-L31 type Response struct { Created bool `json:"created"` Id_ string `json:"_id"` diff --git a/typedapi/watcher/querywatches/query_watches.go b/typedapi/watcher/querywatches/query_watches.go index 3bc2c8f30b..6c365ea1ab 100644 --- a/typedapi/watcher/querywatches/query_watches.go +++ b/typedapi/watcher/querywatches/query_watches.go @@ -16,9 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves stored watches. +// Query watches. +// Get all registered watches in a paginated manner and optionally filter +// watches by a query. +// +// Note that only the `_id` and `metadata.*` fields are queryable or sortable. package querywatches import ( @@ -73,9 +77,13 @@ func NewQueryWatchesFunc(tp elastictransport.Interface) NewQueryWatches { } } -// Retrieves stored watches. +// Query watches. +// Get all registered watches in a paginated manner and optionally filter +// watches by a query. +// +// Note that only the `_id` and `metadata.*` fields are queryable or sortable. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-query-watches.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-query-watches func New(tp elastictransport.Interface) *QueryWatches { r := &QueryWatches{ transport: tp, @@ -83,8 +91,6 @@ func New(tp elastictransport.Interface) *QueryWatches { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -338,43 +344,74 @@ func (r *QueryWatches) Pretty(pretty bool) *QueryWatches { return r } -// From The offset from the first result to fetch. Needs to be non-negative. +// The offset from the first result to fetch. +// It must be non-negative. // API name: from func (r *QueryWatches) From(from int) *QueryWatches { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.From = &from return r } -// Query Optional, query filter watches to be returned. +// A query that filters the watches to be returned. // API name: query -func (r *QueryWatches) Query(query *types.Query) *QueryWatches { +func (r *QueryWatches) Query(query types.QueryVariant) *QueryWatches { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// SearchAfter Optional search After to do pagination using last hit’s sort values. +// Retrieve the next page of hits using a set of sort values from the previous +// page. // API name: search_after -func (r *QueryWatches) SearchAfter(sortresults ...types.FieldValue) *QueryWatches { - r.req.SearchAfter = sortresults +func (r *QueryWatches) SearchAfter(sortresults ...types.FieldValueVariant) *QueryWatches { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } return r } -// Size The number of hits to return. Needs to be non-negative. +// The number of hits to return. +// It must be non-negative. // API name: size func (r *QueryWatches) Size(size int) *QueryWatches { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } -// Sort Optional sort definition. +// One or more fields used to sort the search results. // API name: sort -func (r *QueryWatches) Sort(sorts ...types.SortCombinations) *QueryWatches { - r.req.Sort = sorts +func (r *QueryWatches) Sort(sorts ...types.SortCombinationsVariant) *QueryWatches { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } return r } diff --git a/typedapi/watcher/querywatches/request.go b/typedapi/watcher/querywatches/request.go index b779cdf16d..62665e565e 100644 --- a/typedapi/watcher/querywatches/request.go +++ b/typedapi/watcher/querywatches/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package querywatches @@ -33,18 +33,21 @@ import ( // Request holds the request body struct for the package querywatches // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/query_watches/WatcherQueryWatchesRequest.ts#L25-L48 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/query_watches/WatcherQueryWatchesRequest.ts#L25-L70 type Request struct { - // From The offset from the first result to fetch. Needs to be non-negative. + // From The offset from the first result to fetch. + // It must be non-negative. From *int `json:"from,omitempty"` - // Query Optional, query filter watches to be returned. + // Query A query that filters the watches to be returned. Query *types.Query `json:"query,omitempty"` - // SearchAfter Optional search After to do pagination using last hit’s sort values. + // SearchAfter Retrieve the next page of hits using a set of sort values from the previous + // page. SearchAfter []types.FieldValue `json:"search_after,omitempty"` - // Size The number of hits to return. Needs to be non-negative. + // Size The number of hits to return. + // It must be non-negative. Size *int `json:"size,omitempty"` - // Sort Optional sort definition. + // Sort One or more fields used to sort the search results. Sort []types.SortCombinations `json:"sort,omitempty"` } diff --git a/typedapi/watcher/querywatches/response.go b/typedapi/watcher/querywatches/response.go index 5fdd653efa..9b94191992 100644 --- a/typedapi/watcher/querywatches/response.go +++ b/typedapi/watcher/querywatches/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package querywatches @@ -26,9 +26,13 @@ import ( // Response holds the response body struct for the package querywatches // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/query_watches/WatcherQueryWatchesResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/query_watches/WatcherQueryWatchesResponse.ts#L23-L34 type Response struct { - Count int `json:"count"` + + // Count The total number of watches found. + Count int `json:"count"` + // Watches A list of watches based on the `from`, `size`, or `search_after` request body + // parameters. Watches []types.QueryWatch `json:"watches"` } diff --git a/typedapi/watcher/start/response.go b/typedapi/watcher/start/response.go index c8fe513923..7acc4bd19b 100644 --- a/typedapi/watcher/start/response.go +++ b/typedapi/watcher/start/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package start // Response holds the response body struct for the package start // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/start/WatcherStartResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/start/WatcherStartResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/watcher/start/start.go b/typedapi/watcher/start/start.go index 554cc563b3..3e902cd7a4 100644 --- a/typedapi/watcher/start/start.go +++ b/typedapi/watcher/start/start.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Starts Watcher if it is not already running. +// Start the watch service. +// Start the Watcher service if it is not already running. package start import ( @@ -68,9 +69,10 @@ func NewStartFunc(tp elastictransport.Interface) NewStart { } } -// Starts Watcher if it is not already running. +// Start the watch service. +// Start the Watcher service if it is not already running. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-start.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start func New(tp elastictransport.Interface) *Start { r := &Start{ transport: tp, @@ -276,6 +278,14 @@ func (r *Start) Header(key, value string) *Start { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Start) MasterTimeout(duration string) *Start { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/watcher/stats/response.go b/typedapi/watcher/stats/response.go index b11f3ea4de..158394cd0c 100644 --- a/typedapi/watcher/stats/response.go +++ b/typedapi/watcher/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package stats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/stats/WatcherStatsResponse.ts#L24-L32 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/stats/WatcherStatsResponse.ts#L24-L32 type Response struct { ClusterName string `json:"cluster_name"` ManuallyStopped bool `json:"manually_stopped"` diff --git a/typedapi/watcher/stats/stats.go b/typedapi/watcher/stats/stats.go index d4ab278957..1a56603b52 100644 --- a/typedapi/watcher/stats/stats.go +++ b/typedapi/watcher/stats/stats.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Retrieves the current Watcher metrics. +// Get Watcher statistics. +// This API always returns basic metrics. +// You retrieve more metrics by using the metric parameter. package stats import ( @@ -74,9 +76,11 @@ func NewStatsFunc(tp elastictransport.Interface) NewStats { } } -// Retrieves the current Watcher metrics. +// Get Watcher statistics. +// This API always returns basic metrics. +// You retrieve more metrics by using the metric parameter. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stats.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats func New(tp elastictransport.Interface) *Stats { r := &Stats{ transport: tp, diff --git a/typedapi/watcher/stop/response.go b/typedapi/watcher/stop/response.go index 519243100f..472cff8772 100644 --- a/typedapi/watcher/stop/response.go +++ b/typedapi/watcher/stop/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package stop // Response holds the response body struct for the package stop // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/watcher/stop/WatcherStopResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/stop/WatcherStopResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/watcher/stop/stop.go b/typedapi/watcher/stop/stop.go index 84a8460e04..29dc71a757 100644 --- a/typedapi/watcher/stop/stop.go +++ b/typedapi/watcher/stop/stop.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Stops Watcher if it is running. +// Stop the watch service. +// Stop the Watcher service if it is running. package stop import ( @@ -68,9 +69,10 @@ func NewStopFunc(tp elastictransport.Interface) NewStop { } } -// Stops Watcher if it is running. +// Stop the watch service. +// Stop the Watcher service if it is running. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stop.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop func New(tp elastictransport.Interface) *Stop { r := &Stop{ transport: tp, @@ -276,6 +278,17 @@ func (r *Stop) Header(key, value string) *Stop { return r } +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *Stop) MasterTimeout(duration string) *Stop { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/watcher/updatesettings/request.go b/typedapi/watcher/updatesettings/request.go new file mode 100644 index 0000000000..c1f70e5728 --- /dev/null +++ b/typedapi/watcher/updatesettings/request.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package updatesettings + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package updatesettings +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/update_settings/WatcherUpdateSettingsRequest.ts#L24-L58 +type Request struct { + IndexAutoExpandReplicas *string `json:"index.auto_expand_replicas,omitempty"` + IndexNumberOfReplicas *int `json:"index.number_of_replicas,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatesettings request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/watcher/updatesettings/response.go b/typedapi/watcher/updatesettings/response.go new file mode 100644 index 0000000000..1abd70b767 --- /dev/null +++ b/typedapi/watcher/updatesettings/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b + +package updatesettings + +// Response holds the response body struct for the package updatesettings +// +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/watcher/update_settings/WatcherUpdateSettingsResponse.ts#L20-L24 +type Response struct { + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/watcher/updatesettings/update_settings.go b/typedapi/watcher/updatesettings/update_settings.go index 6020f42e53..6138c6cc1a 100644 --- a/typedapi/watcher/updatesettings/update_settings.go +++ b/typedapi/watcher/updatesettings/update_settings.go @@ -16,21 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Update settings for the watcher system index +// Update Watcher index settings. +// Update settings for the Watcher internal index (`.watches`). +// Only a subset of settings can be modified. +// This includes `index.auto_expand_replicas` and `index.number_of_replicas`. package updatesettings import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -45,6 +52,10 @@ type UpdateSettings struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -65,14 +76,19 @@ func NewUpdateSettingsFunc(tp elastictransport.Interface) NewUpdateSettings { } } -// Update settings for the watcher system index +// Update Watcher index settings. +// Update settings for the Watcher internal index (`.watches`). +// Only a subset of settings can be modified. +// This includes `index.auto_expand_replicas` and `index.number_of_replicas`. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-update-settings.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings func New(tp elastictransport.Interface) *UpdateSettings { r := &UpdateSettings{ transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -84,6 +100,21 @@ func New(tp elastictransport.Interface) *UpdateSettings { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateSettings) Raw(raw io.Reader) *UpdateSettings { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateSettings) Request(req *Request) *UpdateSettings { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *UpdateSettings) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -93,6 +124,31 @@ func (r *UpdateSettings) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateSettings: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -180,13 +236,7 @@ func (r UpdateSettings) Perform(providedCtx context.Context) (*http.Response, er } // Do runs the request through the transport, handle the response and returns a updatesettings.Response -func (r UpdateSettings) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r UpdateSettings) IsSuccess(providedCtx context.Context) (bool, error) { +func (r UpdateSettings) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -197,30 +247,46 @@ func (r UpdateSettings) IsSuccess(providedCtx context.Context) (bool, error) { ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the UpdateSettings query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode } - return false, nil + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the UpdateSettings headers map. @@ -229,3 +295,91 @@ func (r *UpdateSettings) Header(key, value string) *UpdateSettings { return r } + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *UpdateSettings) MasterTimeout(duration string) *UpdateSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *UpdateSettings) Timeout(duration string) *UpdateSettings { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateSettings) ErrorTrace(errortrace bool) *UpdateSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateSettings) FilterPath(filterpaths ...string) *UpdateSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateSettings) Human(human bool) *UpdateSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateSettings) Pretty(pretty bool) *UpdateSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: index.auto_expand_replicas +func (r *UpdateSettings) IndexAutoExpandReplicas(indexautoexpandreplicas string) *UpdateSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexAutoExpandReplicas = &indexautoexpandreplicas + + return r +} + +// API name: index.number_of_replicas +func (r *UpdateSettings) IndexNumberOfReplicas(indexnumberofreplicas int) *UpdateSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexNumberOfReplicas = &indexnumberofreplicas + + return r +} diff --git a/typedapi/xpack/info/info.go b/typedapi/xpack/info/info.go index 1439bbff1c..feb6f4b546 100644 --- a/typedapi/xpack/info/info.go +++ b/typedapi/xpack/info/info.go @@ -16,9 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// Provides general information about the installed X-Pack features. +// Get information. +// The information provided by the API includes: +// +// * Build information including the build number and timestamp. +// * License information about the currently installed license. +// * Feature information for the features that are currently enabled and +// available under the current license. package info import ( @@ -69,9 +75,15 @@ func NewInfoFunc(tp elastictransport.Interface) NewInfo { } } -// Provides general information about the installed X-Pack features. +// Get information. +// The information provided by the API includes: +// +// * Build information including the build number and timestamp. +// * License information about the currently installed license. +// * Feature information for the features that are currently enabled and +// available under the current license. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-info func New(tp elastictransport.Interface) *Info { r := &Info{ transport: tp, @@ -276,7 +288,8 @@ func (r *Info) Header(key, value string) *Info { } // Categories A comma-separated list of the information categories to include in the -// response. For example, `build,license,features`. +// response. +// For example, `build,license,features`. // API name: categories func (r *Info) Categories(categories ...xpackcategory.XPackCategory) *Info { tmp := []string{} @@ -297,7 +310,8 @@ func (r *Info) AcceptEnterprise(acceptenterprise bool) *Info { } // Human Defines whether additional human-readable information is included in the -// response. In particular, it adds descriptions and a tag line. +// response. +// In particular, it adds descriptions and a tag line. // API name: human func (r *Info) Human(human bool) *Info { r.values.Set("human", strconv.FormatBool(human)) diff --git a/typedapi/xpack/info/response.go b/typedapi/xpack/info/response.go index b582881c35..841eb330ad 100644 --- a/typedapi/xpack/info/response.go +++ b/typedapi/xpack/info/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package info @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package info // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/info/XPackInfoResponse.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/info/XPackInfoResponse.ts#L22-L29 type Response struct { Build types.BuildInformation `json:"build"` Features types.XpackFeatures `json:"features"` diff --git a/typedapi/xpack/usage/response.go b/typedapi/xpack/usage/response.go index bedb0fedb8..ff86f04d60 100644 --- a/typedapi/xpack/usage/response.go +++ b/typedapi/xpack/usage/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b package usage @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package usage // -// https://github.com/elastic/elasticsearch-specification/blob/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827/specification/xpack/usage/XPackUsageResponse.ts#L43-L79 +// https://github.com/elastic/elasticsearch-specification/blob/c75a0abec670d027d13eb8d6f23374f86621c76b/specification/xpack/usage/XPackUsageResponse.ts#L42-L77 type Response struct { AggregateMetric types.Base `json:"aggregate_metric"` Analytics types.Analytics `json:"analytics"` @@ -39,7 +39,6 @@ type Response struct { Enrich *types.Base `json:"enrich,omitempty"` Eql types.Eql `json:"eql"` Flattened *types.Flattened `json:"flattened,omitempty"` - FrozenIndices types.FrozenIndices `json:"frozen_indices"` Graph types.Base `json:"graph"` HealthApi *types.HealthStatistics `json:"health_api,omitempty"` Ilm types.Ilm `json:"ilm"` diff --git a/typedapi/xpack/usage/usage.go b/typedapi/xpack/usage/usage.go index 1406573c27..6bff2a3a9a 100644 --- a/typedapi/xpack/usage/usage.go +++ b/typedapi/xpack/usage/usage.go @@ -16,10 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/48e2d9de9de2911b8cb1cf715e4bc0a2b1f4b827 +// https://github.com/elastic/elasticsearch-specification/tree/c75a0abec670d027d13eb8d6f23374f86621c76b -// This API provides information about which features are currently enabled and -// available under the current license and some usage statistics. +// Get usage information. +// Get information about the features that are currently enabled and available +// under the current license. +// The API also provides some usage statistics. package usage import ( @@ -69,10 +71,12 @@ func NewUsageFunc(tp elastictransport.Interface) NewUsage { } } -// This API provides information about which features are currently enabled and -// available under the current license and some usage statistics. +// Get usage information. +// Get information about the features that are currently enabled and available +// under the current license. +// The API also provides some usage statistics. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/usage-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack func New(tp elastictransport.Interface) *Usage { r := &Usage{ transport: tp, @@ -278,8 +282,10 @@ func (r *Usage) Header(key, value string) *Usage { return r } -// MasterTimeout Period to wait for a connection to the master node. If no response is -// received before the timeout expires, the request fails and returns an error. +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: master_timeout func (r *Usage) MasterTimeout(duration string) *Usage { r.values.Set("master_timeout", duration)